Added detailed mapping of bss.

.data and .text now needs to be 4K apart.
removed an early copy of test0 called test1
This commit is contained in:
Bahadir Balban
2008-11-23 21:27:11 +02:00
parent 311d6917c4
commit d0e878b32c
12 changed files with 119 additions and 386 deletions

View File

@@ -5,6 +5,7 @@
*/
#include <memory.h>
#include <vm_area.h>
#include <l4/api/errno.h>
#include <lib/elf/elf.h>
#include <lib/elf/elfprg.h>
#include <lib/elf/elfsym.h>
@@ -39,6 +40,7 @@ int elf_test_expand_segment(struct elf_section_header *section,
(section->sh_flags & sec_flmask) == sec_flags) {
/* Set new section */
if (!*start) {
BUG_ON(*offset || *end);
*offset = section->sh_offset;
*start = section->sh_addr;
*end = section->sh_addr + section->sh_size;
@@ -49,13 +51,11 @@ int elf_test_expand_segment(struct elf_section_header *section,
return 0;
}
/*
* Sift through sections and copy their marks to tcb and efd
* if they are recognised and loadable sections.
*
* NOTE: There may be multiple sections of same kind, in
* consecutive address regions. Then we need to increase
* that region's marks.
* if they are recognised and loadable sections. Test the
* assigned segment marks and return an error if they're invalid.
*/
int elf_mark_segments(struct elf_section_header *sect_header, int nsections,
struct tcb *task, struct exec_file_desc *efd)
@@ -64,24 +64,45 @@ int elf_mark_segments(struct elf_section_header *sect_header, int nsections,
struct elf_section_header *section = &sect_header[i];
/* Text + read-only data segments */
elf_test_expand_segment(section, SHT_PROGBITS, SHF_ALLOC,
SHF_ALLOC | SHF_WRITE, &task->text_start,
&task->text_end, &efd->text_offset);
elf_test_expand_segment(section, SHT_PROGBITS,
SHF_ALLOC, SHF_ALLOC | SHF_WRITE,
&task->text_start, &task->text_end,
&efd->text_offset);
/* Data segment */
elf_test_expand_segment(section, SHT_PROGBITS, SHF_ALLOC | SHF_WRITE,
SHF_ALLOC | SHF_WRITE, &task->data_start,
&task->data_end, &efd->data_offset);
elf_test_expand_segment(section, SHT_PROGBITS, SHF_ALLOC |
SHF_WRITE, SHF_ALLOC | SHF_WRITE,
&task->data_start, &task->data_end,
&efd->data_offset);
/* Bss segment */
elf_test_expand_segment(section, SHT_NOBITS, SHF_ALLOC | SHF_WRITE,
SHF_ALLOC | SHF_WRITE, &task->bss_start,
&task->bss_end, &efd->bss_offset);
elf_test_expand_segment(section, SHT_NOBITS, SHF_ALLOC |
SHF_WRITE, SHF_ALLOC | SHF_WRITE,
&task->bss_start, &task->bss_end,
&efd->bss_offset);
}
if (!task->text_start || !task->data_start || !task->bss_start) {
printf("%s: NOTE: Could not find one of text, data or "
"bss segments in elf file.\n", __FUNCTION__);
/* Test anomalies with the mappings */
/* No text */
if (!task->text_start) {
printf("%s: Error: Could not find a text "
"segment in ELF file.\n", __FUNCTION__);
return -ENOEXEC;
}
/* Warn if no data or bss but it's not an error */
if (!task->data_start || !task->bss_start) {
printf("%s: NOTE: Could not find a data and/or "
"bss segment in ELF file.\n", __FUNCTION__);
}
/* Data and text are less than page apart */
if ((task->data_start - task->text_start) < PAGE_SIZE) {
printf("%s: Error: Distance between data and text"
" sections are less than page size (4K)\n",
__FUNCTION__);
return -ENOEXEC;
}
return 0;
@@ -105,7 +126,7 @@ int elf_parse_executable(struct tcb *task, struct vm_file *file,
struct elf_section_header *sect_header;
unsigned long sect_offset, sect_size;
unsigned long prg_offset, prg_size;
int err;
int err = 0;
/* Test that it is a valid elf file */
if ((err = elf_probe(elf_headerp)) < 0)
@@ -138,7 +159,8 @@ int elf_parse_executable(struct tcb *task, struct vm_file *file,
sect_header = (struct elf_section_header *)
pager_map_file_range(file, sect_offset, sect_size);
elf_mark_segments(sect_header, elf_header.e_shnum, task, efd);
/* Copy segment marks from ELF file to task + efd. Return errors */
err = elf_mark_segments(sect_header, elf_header.e_shnum, task, efd);
/* Unmap program header table */
pager_unmap_pages(prg_header_start, __pfn(page_align_up(prg_size)));
@@ -146,6 +168,6 @@ int elf_parse_executable(struct tcb *task, struct vm_file *file,
/* Unmap section header table */
pager_unmap_pages(sect_header, __pfn(page_align_up(sect_size)));
return 0;
return err;
}

View File

@@ -16,6 +16,7 @@
#include <l4lib/arch/utcb.h>
#include <l4lib/ipcdefs.h>
#include <l4lib/exregs.h>
#include <l4/lib/math.h>
#include <lib/addr.h>
#include <lib/malloc.h>
#include <init.h>
@@ -342,12 +343,79 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
return task;
}
/*
* If bss comes consecutively after the data section, prefault the
* last page of the data section and zero out the bit that contains
* the beginning of bss. If bss spans into more pages, then map those
* pages as anonymous pages which are mapped by the devzero file.
*/
int task_map_bss(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task)
{
unsigned long bss_mmap_start;
void *mapped;
/*
* Test if bss starts right from the end of data,
* and not on a new page boundary.
*/
if ((task->data_end == task->bss_start) &&
!is_page_aligned(task->bss_start)) {
unsigned long bss_size = task->bss_end - task->bss_start;
struct page *last_data_page;
void *pagebuf, *bss;
/* Prefault the last data page */
BUG_ON(prefault_page(task, task->data_end,
VM_READ | VM_WRITE) < 0);
/* Get the page */
last_data_page = task_virt_to_page(task, task->data_end);
/* Map the page */
pagebuf = l4_map_helper((void *)page_to_phys(last_data_page), 1);
/* Find the bss offset */
bss = (void *)((unsigned long)pagebuf |
(PAGE_MASK & task->bss_start));
/*
* Zero out the part that is bss. This is minimum of either
* end of bss or until the end of page, whichever is met first.
*/
memset((void *)bss, 0, min(TILL_PAGE_ENDS(task->data_end),
(int)bss_size));
/* Unmap the page */
l4_unmap_helper(pagebuf, 1);
/* Push bss mmap start to next page */
bss_mmap_start = page_align_up(task->bss_start);
} else /* Otherwise bss mmap start is same as bss_start */
bss_mmap_start = task->bss_start;
/*
* Now if there are more pages covering bss,
* map those as anonymous zero pages
*/
if (task->bss_end > bss_mmap_start) {
if (IS_ERR(mapped = do_mmap(0, 0, task, task->bss_start,
VM_READ | VM_WRITE |
VMA_PRIVATE | VMA_ANONYMOUS,
__pfn(page_align_up(task->bss_end) -
page_align(task->bss_start))))) {
printf("do_mmap: Mapping environment failed with %d.\n",
(int)mapped);
return (int)mapped;
}
}
return 0;
}
int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_desc *efd)
{
void *mapped;
struct vm_file *shm;
/* Set up heap as one page after bss */
int err;
/* mmap task's text to task's address space. */
if (IS_ERR(mapped = do_mmap(file, efd->text_offset, task, task->text_start,
@@ -368,14 +436,12 @@ int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_
}
/* mmap task's bss as anonymous memory. */
if (IS_ERR(mapped = do_mmap(0, 0, task, task->bss_start,
VM_READ | VM_WRITE |
VMA_PRIVATE | VMA_ANONYMOUS,
__pfn(task->bss_end - task->bss_start)))) {
printf("do_mmap: Mapping environment failed with %d.\n",
(int)mapped);
return (int)mapped;
if ((err = task_map_bss(file, efd, task)) < 0) {
printf("%s: Mapping bss has failed.\n",
__FUNCTION__);
return err;
}
/* mmap task's environment as anonymous memory. */
if (IS_ERR(mapped = do_mmap(0, 0, task, task->env_start,
VM_READ | VM_WRITE |