posix: Fixing mm0 mapping task pages incorrectly

Modifying task_virt_to_page() so that it takes into account page
protections. If mm0 writes to a task page that is meant to be
read-only, (e.g. the zero page) the design is broken.

Every access to a task's page will take the page fault route,
and the page fault handler will return the page instead of 0.
This commit is contained in:
Bahadir Balban
2009-11-05 18:25:26 +02:00
parent 35a5dc4c92
commit d306d6b451
8 changed files with 57 additions and 23 deletions

View File

@@ -6,6 +6,7 @@
*/
#include <l4/macros.h>
#include <l4lib/exregs.h>
#include <l4lib/arch/syslib.h>
#include INC_GLUE(message.h)
void exregs_print_registers(void)

View File

@@ -25,8 +25,8 @@
/* Protection flags */
#define VM_NONE (1 << 0)
#define VM_READ (1 << 1)
#define VM_WRITE (1 << 2)
#define VM_EXEC (1 << 3)
#define VM_EXEC (1 << 2)
#define VM_WRITE (1 << 3)
#define VM_PROT_MASK (VM_READ | VM_WRITE | VM_EXEC)
/* Shared copy of a file */
@@ -235,13 +235,15 @@ void *pager_map_page(struct vm_file *f, unsigned long page_offset);
void pager_unmap_page(void *vaddr);
/* To get currently mapped page of a virtual address on a task */
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual);
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual,
unsigned int vm_flags);
int validate_task_range(struct tcb *t, unsigned long start,
unsigned long end, unsigned int vmflags);
/* Changes all shadows and their ptes to read-only */
int vm_freeze_shadows(struct tcb *task);
int vm_compare_prot_flags(unsigned int current, unsigned int needed);
int task_insert_vma(struct vm_area *vma, struct link *vma_list);
/* Main page fault entry point */

View File

@@ -898,13 +898,29 @@ int page_fault_handler(struct tcb *sender, fault_kdata_t *fkdata)
return err;
}
int vm_compare_prot_flags(unsigned int current, unsigned int needed)
{
current &= VM_PROT_MASK;
needed &= VM_PROT_MASK;
if (needed & VM_READ)
if (current & (VM_READ | VM_WRITE))
return 1;
if (needed & VM_WRITE &&
(current & VM_WRITE))
return 1;
return 0;
}
/*
* Makes the virtual to page translation for a given user task.
* It traverses the vm_objects and returns the first encountered
* instance of the page. If page is not mapped in the task's address
* space, (not faulted at all), returns error.
*/
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual, unsigned int vm_flags)
{
unsigned long vma_offset;
unsigned long file_offset;
@@ -947,7 +963,10 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
}
}
/* Found it */
/* Found one, but does it have the right permissions */
if (!vm_compare_prot_flags(vmo_link->obj->flags, vm_flags))
return PTR_ERR(-EFAULT);
// printf("%s: %s: Found page with file_offset: 0x%x\n",
// __TASKNAME__, __FUNCTION__, page->offset);
// vm_object_print(vmo_link->obj);

View File

@@ -552,14 +552,16 @@ int copy_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
copysize = min(copysize, PAGE_SIZE - page_offset(task_offset));
if (read)
page_copy(task_virt_to_page(task, task_offset),
page_copy(task_virt_to_page(task, task_offset,
VM_READ | VM_WRITE),
file_page,
page_offset(task_offset),
page_offset(file_offset),
copysize);
else
page_copy(file_page,
task_virt_to_page(task, task_offset),
task_virt_to_page(task, task_offset,
VM_READ),
page_offset(file_offset),
page_offset(task_offset),
copysize);

View File

@@ -179,7 +179,7 @@ void *pager_validate_map_user_range2(struct tcb *user, void *userptr,
/* Map every page contiguously in the allocated virtual address range */
for (unsigned long addr = start; addr < end; addr += PAGE_SIZE) {
struct page *p = task_virt_to_page(user, addr);
struct page *p = task_virt_to_page(user, addr, vm_flags);
if (IS_ERR(p)) {
/* Unmap pages mapped so far */
@@ -191,7 +191,7 @@ void *pager_validate_map_user_range2(struct tcb *user, void *userptr,
return p;
}
l4_map((void *)page_to_phys(task_virt_to_page(user, addr)),
l4_map((void *)page_to_phys(task_virt_to_page(user, addr, vm_flags)),
virt, 1, MAP_USR_RW_FLAGS, self_tid());
virt += PAGE_SIZE;
}

View File

@@ -24,8 +24,8 @@ struct page *page_init(struct page *page)
link_init(&page->list);
return page;
}
struct page *find_page(struct vm_object *obj, unsigned long pfn)
{
struct page *p;

View File

@@ -504,6 +504,18 @@ int task_copy_args_to_user(char *user_stack,
return 0;
}
int prefault_range(struct tcb *task, unsigned long start,
unsigned long size, unsigned int vm_flags)
{
int err;
for (unsigned long i = start; i < start + size; i += PAGE_SIZE)
if ((err = prefault_page(task, i, vm_flags)) < 0)
return err;
return 0;
}
int task_map_stack(struct vm_file *f, struct exec_file_desc *efd,
struct tcb *task, struct args_struct *args,
struct args_struct *env)
@@ -541,6 +553,10 @@ int task_map_stack(struct vm_file *f, struct exec_file_desc *efd,
return (int)mapped;
}
/* Prefault the stack for writing. */
BUG_ON(prefault_range(task, task->args_start,
stack_used, VM_READ | VM_WRITE) < 0);
/* Map the stack's part that will contain args and environment */
if (IS_ERR(args_on_stack =
pager_validate_map_user_range2(task,
@@ -585,7 +601,8 @@ int task_map_bss(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task
BUG_ON(prefault_page(task, task->data_end,
VM_READ | VM_WRITE) < 0);
/* Get the page */
last_data_page = task_virt_to_page(task, task->data_end);
last_data_page = task_virt_to_page(task, task->data_end,
VM_READ | VM_WRITE);
/* Map the page. FIXME: PAGE COLOR!!! */
pagebuf = l4_map_helper((void *)page_to_phys(last_data_page), 1);
@@ -628,6 +645,7 @@ int task_map_bss(struct vm_file *f, struct exec_file_desc *efd, struct tcb *task
return 0;
}
int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_desc *efd,
struct args_struct *args, struct args_struct *env)
{
@@ -764,13 +782,3 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f)
return 0;
}
int prefault_range(struct tcb *task, unsigned long start,
unsigned long end, unsigned int vm_flags)
{
for (unsigned long i = start; i < start + end; i += PAGE_SIZE)
prefault_page(task, i, vm_flags);
return 0;
}

View File

@@ -58,14 +58,16 @@ void *pager_validate_map_user_range(struct tcb *user, void *userptr,
return 0;
/* Map first page and calculate the mapped address of pointer */
mapped = l4_map_helper((void *)page_to_phys(task_virt_to_page(user, start)), 1);
mapped = l4_map_helper((void *)page_to_phys(task_virt_to_page(user, start,
vm_flags)), 1);
mapped = (void *)(((unsigned long)mapped) |
((unsigned long)(PAGE_MASK & (unsigned long)userptr)));
/* Map the rest of the pages, if any */
for (unsigned long i = start + PAGE_SIZE; i < end; i += PAGE_SIZE)
l4_map_helper((void *)page_to_phys(task_virt_to_page(user,
start + i)), 1);
start + i,
vm_flags)), 1);
return mapped;
}