From d306d6b451468d928673a27ecaaf0c4b9e9dbcd9 Mon Sep 17 00:00:00 2001 From: Bahadir Balban Date: Thu, 5 Nov 2009 18:25:26 +0200 Subject: [PATCH] posix: Fixing mm0 mapping task pages incorrectly Modifying task_virt_to_page() so that it takes into account page protections. If mm0 writes to a task page that is meant to be read-only, (e.g. the zero page) the design is broken. Every access to a task's page will take the page fault route, and the page fault handler will return the page instead of 0. --- conts/libl4/src/arm/exregs.c | 1 + conts/posix/mm0/include/vm_area.h | 8 +++++--- conts/posix/mm0/mm/fault.c | 23 +++++++++++++++++++++-- conts/posix/mm0/mm/file.c | 6 ++++-- conts/posix/mm0/mm/memory.c | 4 ++-- conts/posix/mm0/mm/pagers.c | 2 +- conts/posix/mm0/mm/task.c | 30 +++++++++++++++++++----------- conts/posix/mm0/mm/user.c | 6 ++++-- 8 files changed, 57 insertions(+), 23 deletions(-) diff --git a/conts/libl4/src/arm/exregs.c b/conts/libl4/src/arm/exregs.c index ccbb620..b418c4f 100644 --- a/conts/libl4/src/arm/exregs.c +++ b/conts/libl4/src/arm/exregs.c @@ -6,6 +6,7 @@ */ #include #include +#include #include INC_GLUE(message.h) void exregs_print_registers(void) diff --git a/conts/posix/mm0/include/vm_area.h b/conts/posix/mm0/include/vm_area.h index 2b7a2d4..ee3d1a8 100644 --- a/conts/posix/mm0/include/vm_area.h +++ b/conts/posix/mm0/include/vm_area.h @@ -25,8 +25,8 @@ /* Protection flags */ #define VM_NONE (1 << 0) #define VM_READ (1 << 1) -#define VM_WRITE (1 << 2) -#define VM_EXEC (1 << 3) +#define VM_EXEC (1 << 2) +#define VM_WRITE (1 << 3) #define VM_PROT_MASK (VM_READ | VM_WRITE | VM_EXEC) /* Shared copy of a file */ @@ -235,13 +235,15 @@ void *pager_map_page(struct vm_file *f, unsigned long page_offset); void pager_unmap_page(void *vaddr); /* To get currently mapped page of a virtual address on a task */ -struct page *task_virt_to_page(struct tcb *t, unsigned long virtual); +struct page *task_virt_to_page(struct tcb *t, unsigned long virtual, + unsigned int vm_flags); int validate_task_range(struct tcb *t, unsigned long start, unsigned long end, unsigned int vmflags); /* Changes all shadows and their ptes to read-only */ int vm_freeze_shadows(struct tcb *task); +int vm_compare_prot_flags(unsigned int current, unsigned int needed); int task_insert_vma(struct vm_area *vma, struct link *vma_list); /* Main page fault entry point */ diff --git a/conts/posix/mm0/mm/fault.c b/conts/posix/mm0/mm/fault.c index 9991a5c..3fb45a8 100644 --- a/conts/posix/mm0/mm/fault.c +++ b/conts/posix/mm0/mm/fault.c @@ -898,13 +898,29 @@ int page_fault_handler(struct tcb *sender, fault_kdata_t *fkdata) return err; } +int vm_compare_prot_flags(unsigned int current, unsigned int needed) +{ + current &= VM_PROT_MASK; + needed &= VM_PROT_MASK; + + if (needed & VM_READ) + if (current & (VM_READ | VM_WRITE)) + return 1; + + if (needed & VM_WRITE && + (current & VM_WRITE)) + return 1; + + return 0; +} + /* * Makes the virtual to page translation for a given user task. * It traverses the vm_objects and returns the first encountered * instance of the page. If page is not mapped in the task's address * space, (not faulted at all), returns error. */ -struct page *task_virt_to_page(struct tcb *t, unsigned long virtual) +struct page *task_virt_to_page(struct tcb *t, unsigned long virtual, unsigned int vm_flags) { unsigned long vma_offset; unsigned long file_offset; @@ -947,7 +963,10 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual) } } - /* Found it */ + /* Found one, but does it have the right permissions */ + if (!vm_compare_prot_flags(vmo_link->obj->flags, vm_flags)) + return PTR_ERR(-EFAULT); + // printf("%s: %s: Found page with file_offset: 0x%x\n", // __TASKNAME__, __FUNCTION__, page->offset); // vm_object_print(vmo_link->obj); diff --git a/conts/posix/mm0/mm/file.c b/conts/posix/mm0/mm/file.c index e8b2d60..3c887ff 100644 --- a/conts/posix/mm0/mm/file.c +++ b/conts/posix/mm0/mm/file.c @@ -552,14 +552,16 @@ int copy_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf, copysize = min(copysize, PAGE_SIZE - page_offset(task_offset)); if (read) - page_copy(task_virt_to_page(task, task_offset), + page_copy(task_virt_to_page(task, task_offset, + VM_READ | VM_WRITE), file_page, page_offset(task_offset), page_offset(file_offset), copysize); else page_copy(file_page, - task_virt_to_page(task, task_offset), + task_virt_to_page(task, task_offset, + VM_READ), page_offset(file_offset), page_offset(task_offset), copysize); diff --git a/conts/posix/mm0/mm/memory.c b/conts/posix/mm0/mm/memory.c index f018ae1..6ff662c 100644 --- a/conts/posix/mm0/mm/memory.c +++ b/conts/posix/mm0/mm/memory.c @@ -179,7 +179,7 @@ void *pager_validate_map_user_range2(struct tcb *user, void *userptr, /* Map every page contiguously in the allocated virtual address range */ for (unsigned long addr = start; addr < end; addr += PAGE_SIZE) { - struct page *p = task_virt_to_page(user, addr); + struct page *p = task_virt_to_page(user, addr, vm_flags); if (IS_ERR(p)) { /* Unmap pages mapped so far */ @@ -191,7 +191,7 @@ void *pager_validate_map_user_range2(struct tcb *user, void *userptr, return p; } - l4_map((void *)page_to_phys(task_virt_to_page(user, addr)), + l4_map((void *)page_to_phys(task_virt_to_page(user, addr, vm_flags)), virt, 1, MAP_USR_RW_FLAGS, self_tid()); virt += PAGE_SIZE; } diff --git a/conts/posix/mm0/mm/pagers.c b/conts/posix/mm0/mm/pagers.c index babd55e..c962969 100644 --- a/conts/posix/mm0/mm/pagers.c +++ b/conts/posix/mm0/mm/pagers.c @@ -24,8 +24,8 @@ struct page *page_init(struct page *page) link_init(&page->list); return page; - } + struct page *find_page(struct vm_object *obj, unsigned long pfn) { struct page *p; diff --git a/conts/posix/mm0/mm/task.c b/conts/posix/mm0/mm/task.c index 3a3a721..885f2cb 100644 --- a/conts/posix/mm0/mm/task.c +++ b/conts/posix/mm0/mm/task.c @@ -504,6 +504,18 @@ int task_copy_args_to_user(char *user_stack, return 0; } +int prefault_range(struct tcb *task, unsigned long start, + unsigned long size, unsigned int vm_flags) +{ + int err; + + for (unsigned long i = start; i < start + size; i += PAGE_SIZE) + if ((err = prefault_page(task, i, vm_flags)) < 0) + return err; + return 0; +} + + int task_map_stack(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task, struct args_struct *args, struct args_struct *env) @@ -541,6 +553,10 @@ int task_map_stack(struct vm_file *f, struct exec_file_desc *efd, return (int)mapped; } + /* Prefault the stack for writing. */ + BUG_ON(prefault_range(task, task->args_start, + stack_used, VM_READ | VM_WRITE) < 0); + /* Map the stack's part that will contain args and environment */ if (IS_ERR(args_on_stack = pager_validate_map_user_range2(task, @@ -585,7 +601,8 @@ int task_map_bss(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task BUG_ON(prefault_page(task, task->data_end, VM_READ | VM_WRITE) < 0); /* Get the page */ - last_data_page = task_virt_to_page(task, task->data_end); + last_data_page = task_virt_to_page(task, task->data_end, + VM_READ | VM_WRITE); /* Map the page. FIXME: PAGE COLOR!!! */ pagebuf = l4_map_helper((void *)page_to_phys(last_data_page), 1); @@ -628,6 +645,7 @@ int task_map_bss(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task return 0; } + int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_desc *efd, struct args_struct *args, struct args_struct *env) { @@ -764,13 +782,3 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f) return 0; } -int prefault_range(struct tcb *task, unsigned long start, - unsigned long end, unsigned int vm_flags) -{ - for (unsigned long i = start; i < start + end; i += PAGE_SIZE) - prefault_page(task, i, vm_flags); - - return 0; -} - - diff --git a/conts/posix/mm0/mm/user.c b/conts/posix/mm0/mm/user.c index 4d86853..8b3edbb 100644 --- a/conts/posix/mm0/mm/user.c +++ b/conts/posix/mm0/mm/user.c @@ -58,14 +58,16 @@ void *pager_validate_map_user_range(struct tcb *user, void *userptr, return 0; /* Map first page and calculate the mapped address of pointer */ - mapped = l4_map_helper((void *)page_to_phys(task_virt_to_page(user, start)), 1); + mapped = l4_map_helper((void *)page_to_phys(task_virt_to_page(user, start, + vm_flags)), 1); mapped = (void *)(((unsigned long)mapped) | ((unsigned long)(PAGE_MASK & (unsigned long)userptr))); /* Map the rest of the pages, if any */ for (unsigned long i = start + PAGE_SIZE; i < end; i += PAGE_SIZE) l4_map_helper((void *)page_to_phys(task_virt_to_page(user, - start + i)), 1); + start + i, + vm_flags)), 1); return mapped; }