More progress on parsing elf files. Fixes to memfs file read/write

Increased inode block pointers to 40. The current maximum allowed (and checked).
Updates to file size after every file write ensures subsequent writes can
correctly operate using updated file size information (i.e. not try to add
more pages that are already present). We cannot do this inside write() because
directory writes rely on byte-granularity updates on file buffers, whereas
file updates are by page-granularity (currently).
This commit is contained in:
Bahadir Balban
2008-11-21 19:26:10 +02:00
parent 27d331895b
commit 2d5a08ff32
15 changed files with 187 additions and 30 deletions

View File

@@ -18,6 +18,7 @@
#include <user.h>
#include <task.h>
#include <exit.h>
#include <lib/elf/elf.h>
/*
* Different from vfs_open(), which validates an already opened
@@ -78,8 +79,7 @@ int task_setup_from_executable(struct vm_file *vmfile, struct tcb *task,
{
memset(efd, 0, sizeof(*efd));
return elf_parse_executable(task, vmfile, efd,
pager_map_page, pager_unmap_page);
return elf_parse_executable(task, vmfile, efd);
}
int do_execve(struct tcb *sender, char *filename)

View File

@@ -262,12 +262,12 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* If there's only one in list */
if (before->list.next == &vmo->page_cache) {
/* Add to end if greater */
/* Add as next if greater */
if (this->offset > before->offset)
list_add_tail(&this->list, &before->list);
/* Add to beginning if smaller */
else if (this->offset < before->offset)
list_add(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->offset < before->offset)
list_add_tail(&this->list, &before->list);
else
BUG();
return 0;
@@ -276,7 +276,7 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* If this page is in-between two other, insert it there */
if (before->offset < this->offset &&
after->offset > this->offset) {
list_add_tail(&this->list, &before->list);
list_add(&this->list, &before->list);
return 0;
}
BUG_ON(this->offset == before->offset);
@@ -565,7 +565,7 @@ int new_file_pages(struct vm_file *f, unsigned long start, unsigned long end)
*/
/* Writes user data in buffer into pages in cache */
int write_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
int write_cache_pages_orig(struct vm_file *vmfile, struct tcb *task, void *buf,
unsigned long pfn_start, unsigned long pfn_end,
unsigned long cursor_offset, int count)
{
@@ -623,6 +623,62 @@ copy:
return count - left;
}
/*
* Writes user data in buffer into pages in cache. If a page is not
* found, it's a bug. The writeable page range must have been readied
* by read_file_pages()/new_file_pages().
*/
int write_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
unsigned long pfn_start, unsigned long pfn_end,
unsigned long cursor_offset, int count)
{
struct page *head;
unsigned long last_pgoff; /* Last copied page's offset */
unsigned long copy_offset; /* Current copy offset on the buffer */
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list) {
/* First page */
if (head->offset == pfn_start) {
left = count;
/* Copy the first page and unmap. */
copysize = (left < PAGE_SIZE) ? left : PAGE_SIZE;
copy_offset = (unsigned long)buf;
page_copy(head, task_virt_to_page(task, copy_offset),
cursor_offset, copy_offset & PAGE_MASK, copysize);
head->flags |= VM_DIRTY;
head->owner->flags |= VM_DIRTY;
left -= copysize;
last_pgoff = head->offset;
/* Rest of the consecutive pages */
} else if (head->offset > pfn_start && head->offset < pfn_end) {
/* Make sure we're advancing on pages consecutively */
BUG_ON(head->offset != last_pgoff + 1);
copysize = (left < PAGE_SIZE) ? left : PAGE_SIZE;
copy_offset = (unsigned long)buf + count - left;
/* Must be page aligned */
BUG_ON(!is_page_aligned(copy_offset));
page_copy(head, task_virt_to_page(task, copy_offset),
0, 0, copysize);
head->flags |= VM_DIRTY;
left -= copysize;
last_pgoff = head->offset;
} else if (head->offset == pfn_end || left == 0)
break;
}
BUG_ON(left != 0);
return count - left;
}
/*
* Reads a page range from an ordered list of pages into buffer.
*

View File

@@ -4,10 +4,10 @@
* Copyright (C) 2008 Bahadir Balban
*/
#include <vm_area.h>
#include <lib/elf.h>
#include <lib/elfprg.h>
#include <lib/elfsym.h>
#include <lib/elfsect.h>
#include <lib/elf/elf.h>
#include <lib/elf/elfprg.h>
#include <lib/elf/elfsym.h>
#include <lib/elf/elfsect.h>
int elf_probe(struct elf_header *header)
@@ -32,8 +32,7 @@ int elf_probe(struct elf_header *header)
* only) segment that has type LOAD. Then it looks at the section header
* table, to find out about every loadable section that is part of this
* aforementioned loadable program segment. Each section is marked in the
* efd and tcb structures for further memory mappings. Loading an elf
* executable is simple as that, but it is described poorly in manuals.
* efd and tcb structures for further memory mappings.
*/
int elf_parse_executable(struct tcb *task, struct vm_file *file,
struct exec_file_desc *efd)
@@ -41,7 +40,7 @@ int elf_parse_executable(struct tcb *task, struct vm_file *file,
int err;
struct elf_header *elf_header = pager_map_page(file, 0);
struct elf_program_header *prg_header_start, *prg_header_load;
struct elf_section_header *sect_header_start;
struct elf_section_header *sect_header;
/* Test that it is a valid elf file */
if ((err = elf_probe(elf_header)) < 0)
@@ -53,14 +52,55 @@ int elf_parse_executable(struct tcb *task, struct vm_file *file,
/* Get the first loadable segment */
for (int i = 0; i < elf_header->e_phnum; i++) {
if (prg_header_start[i].type == PT_LOAD) {
if (prg_header_start[i].p_type == PT_LOAD) {
prg_header_load = &prg_header_start[i];
break;
}
}
/* Get the section header table */
sect_header_start = (struct elf_section_header *)
((void *)elf_header + elf_header->e_shoff);
sect_header = (struct elf_section_header *)
((void *)elf_header + elf_header->e_shoff);
/*
* Sift through sections and copy their marks to tcb and efd
* if they are recognised and loadable sections.
*
* NOTE: There may be multiple sections of same kind, in
* consecutive address regions. Then we need to increase
* that region's marks.
*/
for (int i = 0; i < elf_header->e_shnum; i++) {
struct elf_section_header *section = &sect_header[i];
/* Text section */
if (section->sh_type == SHT_PROGBITS &&
section->sh_flags & SHF_ALLOC &&
section->sh_flags & SHF_EXECINSTR) {
efd->text_offset = section->sh_offset;
task->text_start = section->sh_addr;
task->text_end = section->sh_addr + section->sh_size;
}
/* Data section */
if (section->sh_type == SHT_PROGBITS &&
section->sh_flags & SHF_ALLOC &&
section->sh_flags & SHF_WRITE) {
efd->data_offset = section->sh_offset;
task->data_start = section->sh_addr;
task->data_end = section->sh_addr + section->sh_size;
}
/* BSS section */
if (section->sh_type == SHT_NOBITS &&
section->sh_flags & SHF_ALLOC &&
section->sh_flags & SHF_WRITE) {
efd->bss_offset = section->sh_offset;
task->bss_start = section->sh_addr;
task->bss_end = section->sh_addr + section->sh_size;
}
}
return 0;
}

View File

@@ -347,6 +347,8 @@ int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_
void *mapped;
struct vm_file *shm;
/* Set up heap as one page after bss */
/* mmap task's text to task's address space. */
if (IS_ERR(mapped = do_mmap(file, efd->text_offset, task, task->text_start,
VM_READ | VM_WRITE | VM_EXEC | VMA_PRIVATE,