Brought mm0 initialization up to init_execve()

Changes:
It is now possible to use do_mmap() from within mm0.

- pager_new_virtual()/delete_virtual() return addresses that are
  disjoint from find_unmapped_area() used by mmap() interface for
  anonymous or not-fixed areas.
- find_unmapped_area() now uses task->map_start task->map_end instead
  of task->start and task->end. task->start/end are still valid task
  space addresses for mmap(), but finding a new address is limited to
  map_start/map_end.

- We have both interfaces because mmap() is only useful for backed-files.
  When the pager needs to access a user memory range for example, that is
  not backed by a file and thus we need to use pager_new_virtual() instead
  of mmap() for mapping.
This commit is contained in:
Bahadir Balban
2009-10-06 14:15:33 +03:00
parent 56ceed0786
commit 965f2f9456
11 changed files with 101 additions and 36 deletions

View File

@@ -42,7 +42,7 @@
#define MEMFS_TOTAL_SIZE SZ_4MB
#define MEMFS_TOTAL_INODES 128
#define MEMFS_TOTAL_BLOCKS 2000
#define MEMFS_FMAX_BLOCKS 40
#define MEMFS_FMAX_BLOCKS 60
#define MEMFS_BLOCK_SIZE PAGE_SIZE
#define MEMFS_MAGIC 0xB
#define MEMFS_NAME "memfs"

View File

@@ -9,6 +9,11 @@
#include <vm_area.h>
#include <init.h>
#include <physmem.h>
#include <linker.h>
#define PAGER_MMAP_SEGMENT SZ_4MB
#define PAGER_MMAP_START (page_align_up(__stack))
#define PAGER_MMAP_END (PAGER_MMAP_START + PAGER_MMAP_SEGMENT)
void init_mm_descriptors(struct page_bitmap *page_map,
struct bootdesc *bootdesc, struct membank *membank);

View File

@@ -166,4 +166,7 @@ struct tcb *task_create(struct tcb *orig,
unsigned int ctrl_flags,
unsigned int alloc_flags);
int prefault_range(struct tcb *task, unsigned long start,
unsigned long end, unsigned int vm_flags);
#endif /* __TASK_H__ */

View File

@@ -970,7 +970,7 @@ int prefault_page(struct tcb *task, unsigned long address,
.address = address,
};
dprintf("Pre-faulting address 0x%x, on task %d, with flags: 0x%x\n",
dprintf("Pre-faulting address 0x%lx, on task %d, with flags: 0x%x\n",
address, task->tid, vmflags);
/* Find the vma */

View File

@@ -1,7 +1,7 @@
/*
* Initialise the system.
*
* Copyright (C) 2007, 2008 Bahadir Balban
* Copyright (C) 2007 - 2009 Bahadir Balban
*/
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
@@ -58,6 +58,7 @@ void print_pfn_range(int pfn, int size)
printf("Used: 0x%x - 0x%x\n", addr, end);
}
/*
* This sets up the mm0 task struct and memory environment but omits
* bits that are already done such as creating a new thread, setting
@@ -105,9 +106,9 @@ int pager_setup_task(void)
task->bss_start = (unsigned long)__start_bss;
task->bss_end = (unsigned long)__end_bss;
/* Task's region available for mmap */
task->map_start = page_align_up((unsigned long)__stack);
task->map_end = 0xF0000000; /* FIXME: Fix this */
/* Task's region available for mmap as */
task->map_start = PAGER_MMAP_START;
task->map_end = PAGER_MMAP_END;
/* Task's total map boundaries */
task->start = task->text_start;
@@ -124,8 +125,8 @@ int pager_setup_task(void)
VM_WRITE | VM_EXEC | VMA_PRIVATE,
__pfn(page_align_up(task->map_start) -
task->start)))) {
printf("do_mmap: failed with %d.\n", (int)mapped);
return (int)mapped;
printf("FATAL: do_mmap: failed with %d.\n", (int)mapped);
BUG();
}
task_setup_utcb(task);
@@ -453,25 +454,61 @@ void copy_init_process(void)
struct svc_image *init_img;
unsigned long img_size;
void *init_img_start, *init_img_end;
struct tcb *self = find_task(self_tid());
void *mapped;
int err;
if ((fd = sys_open(find_task(self_tid()),
"/test0", O_TRUNC | O_RDWR | O_CREAT,
0)) < 0) {
if ((fd = sys_open(self, "/test0", O_TRUNC |
O_RDWR | O_CREAT, 0)) < 0) {
printf("FATAL: Could not open file "
"to write initial task.\n");
BUG();
}
init_img = bootdesc_get_image_byname("test0");
img_size = init_img->phys_end - init_img->phys_start;
img_size = page_align_up(init_img->phys_end) -
page_align(init_img->phys_start);
init_img_start = l4_map_helper((void *)init_img->phys_start, __pfn(img_size));
init_img_start = l4_map_helper((void *)init_img->phys_start,
__pfn(img_size));
init_img_end = init_img_start + img_size;
sys_write(find_task(self_tid()), fd, init_img_start, img_size);
/*
* Map an anonymous region and prefault it.
*/
if (IS_ERR(mapped =
do_mmap(0, 0, self, 0,
VMA_ANONYMOUS | VM_READ |
VM_WRITE | VM_EXEC | VMA_PRIVATE,
__pfn(img_size)))) {
printf("FATAL: do_mmap: failed with %d.\n",
(int)mapped);
BUG();
}
/* Prefault it */
if ((err = prefault_range(self, (unsigned long)mapped,
img_size,
VM_READ | VM_WRITE)) < 0) {
printf("FATAL: Prefaulting init image failed.\n");
BUG();
}
/* Copy the raw image to anon region */
memcpy(mapped, init_img_start, img_size);
/* Write it to real file from anon region */
sys_write(find_task(self_tid()), fd, mapped, img_size);
/* Close file */
sys_close(find_task(self_tid()), fd);
/* Unmap anon region */
do_munmap(self, (unsigned long)mapped, img_size);
/* Unmap raw virtual range for image memory */
l4_unmap_helper(init_img_start,__pfn(img_size));
}
void start_init_process(void)

View File

@@ -40,21 +40,27 @@ static struct pager_virtual_address_id_pool {
.bitlimit = ADDRESS_POOL_256MB * 32,
};
/* For supplying contiguous virtual addresses to pager */
/* For supplying contiguous virtual addresses to pager
*
* MM0:
* task->start
* Text
* Data
* Bss
* Stack
* mmap area start
* mmap area end
*
* pager address pool
*
* task->end
*/
int pager_address_pool_init(void)
{
/*
* Initialise id pool for pager virtual address
* allocation. This spans from end of pager image
* until the end of the virtual address range
* allocated for the pager
*/
address_pool_init_with_idpool(&pager_vaddr_pool,
address_pool_init_with_idpool(&pager_vaddr_pool,
(struct id_pool *)
&pager_virtual_address_id_pool,
page_align_up((unsigned long)__end + PAGE_SIZE),
/* FIXME: Fix this! Same as mm0's map_start and map_end */
(unsigned long)0xF0000000);
PAGER_MMAP_END, 0xF0000000);
return 0;
}

View File

@@ -110,22 +110,22 @@ int task_insert_vma(struct vm_area *this, struct link *vma_list)
*/
unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
{
unsigned long pfn_start = __pfn(task->start);
unsigned long pfn_start = __pfn(task->map_start);
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma;
if (npages > __pfn(task->end - task->start))
if (npages > __pfn(task->map_end - task->map_start))
return 0;
/* If no vmas, first map slot is available. */
if (list_empty(&task->vm_area_head->list))
return task->start;
return task->map_start;
/* First vma to check our range against */
vma = link_to_struct(task->vm_area_head->list.next, struct vm_area, list);
/* Start searching from task's end of data to start of stack */
while (pfn_end <= __pfn(task->end)) {
while (pfn_end <= __pfn(task->map_end)) {
/* If intersection, skip the vma and fast-forward to next */
if (set_intersection(pfn_start, pfn_end,
@@ -140,7 +140,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
* Are we out of task map area?
*/
if (vma->list.next == &task->vm_area_head->list) {
if (pfn_end > __pfn(task->end))
if (pfn_end > __pfn(task->map_end))
break; /* Yes, fail */
else /* No, success */
return __pfn_to_addr(pfn_start);
@@ -151,7 +151,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
struct vm_area, list);
continue;
}
BUG_ON(pfn_start + npages > __pfn(task->end));
BUG_ON(pfn_start + npages > __pfn(task->map_end));
return __pfn_to_addr(pfn_start);
}
@@ -308,7 +308,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
}
/* Finished initialising the vma, add it to task */
dprintf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__,
dprintf("%s: Mapping 0x%lx - 0x%lx\n", __FUNCTION__,
map_address, map_address + __pfn_to_addr(npages));
task_insert_vma(new, &task->vm_area_head->list);

View File

@@ -87,8 +87,12 @@ int file_page_out(struct vm_object *vm_obj, unsigned long page_offset)
paddr = (void *)page_to_phys(page);
vaddr = l4_new_virtual(1);
/* FIXME: Are we sure that pages need to be mapped to self one-by-one? */
BUG();
/* FIXME:
* Are we sure that pages need
* to be mapped to self one-by-one?
*
* This needs fixing.
*/
/* Map the page to self */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());

View File

@@ -713,3 +713,13 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f)
return 0;
}
int prefault_range(struct tcb *task, unsigned long start,
unsigned long end, unsigned int vm_flags)
{
for (unsigned long i = start; i < start + end; i += PAGE_SIZE)
prefault_page(task, i, vm_flags);
return 0;
}

View File

@@ -63,7 +63,7 @@ void print_cache_pages(struct vm_object *vmo)
printf("Pages:\n======\n");
list_foreach_struct(p, &vmo->page_cache, list) {
dprintf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
dprintf("Page offset: 0x%lx, virtual: 0x%lx, refcnt: %d\n", p->offset,
p->virtual, p->refcnt);
}
}

View File

@@ -19,7 +19,7 @@
#include INC_SUBARCH(mm.h)
/* Abort debugging conditions */
// #define DEBUG_ABORTS
#define DEBUG_ABORTS
#if defined (DEBUG_ABORTS)
#define dbg_abort(...) dprintk(__VA_ARGS__)
#else