Added new routines that map and check the validity of user buffers.

mmap uses this mechanism to get arguments. It needs to be tested.
This commit is contained in:
Bahadir Balban
2008-11-03 11:27:10 +02:00
parent 87d1b91743
commit ca8959eee0
11 changed files with 131 additions and 59 deletions

View File

@@ -855,28 +855,6 @@ int page_fault_handler(struct tcb *sender, fault_kdata_t *fkdata)
return err;
}
/* Checks if an address range is a validly mapped area for a task */
int validate_task_range(struct tcb *t, unsigned long start,
unsigned long end, unsigned int vmflags)
{
struct vm_area *vma;
start = page_align(start);
end = page_align_up(end);
/* Find the vma that maps that virtual address */
for (unsigned long vaddr = start; vaddr < end; vaddr += PAGE_SIZE) {
if (!(vma = find_vma(vaddr, &t->vm_area_head->list))) {
printf("%s: No VMA found for 0x%x on task: %d\n",
__FUNCTION__, vaddr, t->tid);
return -EINVAL;
}
if ((vma->flags & vmflags) != vmflags)
return -EINVAL;
}
return 0;
}
/*
* Makes the virtual to page translation for a given user task.
* It traverses the vm_objects and returns the first encountered

View File

@@ -15,6 +15,7 @@
#include <string.h>
#include <globals.h>
#include <file.h>
#include <user.h>
/* Copy from one page's buffer into another page */
int page_copy(struct page *dst, struct page *src,
@@ -613,10 +614,10 @@ int sys_read(struct tcb *task, int fd, void *buf, int count)
return 0;
/* Check user buffer validity. */
if ((ret = validate_task_range(task, (unsigned long)buf,
if ((ret = pager_validate_user_range(task, buf,
(unsigned long)(buf + count),
VM_READ)) < 0)
return ret;
return -EFAULT;
vmfile = task->files->fd[fd].vmfile;
cursor = task->files->fd[fd].cursor;
@@ -683,10 +684,10 @@ int sys_write(struct tcb *task, int fd, void *buf, int count)
/* Check user buffer validity. */
if ((ret = validate_task_range(task, (unsigned long)buf,
(unsigned long)(buf + count),
VM_WRITE | VM_READ)) < 0)
return ret;
if ((ret = pager_validate_user_range(task, buf,
(unsigned long)(buf + count),
VM_WRITE | VM_READ)) < 0)
return -EINVAL;
vmfile = task->files->fd[fd].vmfile;
cursor = task->files->fd[fd].cursor;

View File

@@ -14,6 +14,8 @@
#include <mmap.h>
#include <file.h>
#include <shm.h>
#include <syscalls.h>
#include <user.h>
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long file_offset)
@@ -259,8 +261,8 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
}
/* mmap system call implementation */
void *sys_mmap(struct tcb *task, void *start, size_t length, int prot,
int flags, int fd, unsigned long pfn)
void *__sys_mmap(struct tcb *task, void *start, size_t length, int prot,
int flags, int fd, unsigned long pfn)
{
unsigned long npages = __pfn(page_align_up(length));
unsigned long base = (unsigned long)start;
@@ -319,6 +321,25 @@ void *sys_mmap(struct tcb *task, void *start, size_t length, int prot,
return do_mmap(file, __pfn_to_addr(pfn), task, base, vmflags, npages);
}
void *sys_mmap(struct tcb *task, struct sys_mmap_args *args)
{
struct sys_mmap_args *mapped_args;
void *ret;
if (!(mapped_args = pager_validate_map_user_range(task, args,
sizeof(*args),
VM_READ | VM_WRITE)))
return PTR_ERR(-EINVAL);
ret = __sys_mmap(task, args->start, args->length, args->prot,
args->flags, args->fd, args->offset);
pager_unmap_user_range(mapped_args, sizeof(*args));
return ret;
}
/* Sets the end of data segment for sender */
int sys_brk(struct tcb *sender, void *ds_end)
{

73
tasks/mm0/src/user.c Normal file
View File

@@ -0,0 +1,73 @@
/*
* Functions to validate, map and unmap user buffers.
*
* Copyright (C) 2008 Bahadir Balban
*/
#include <l4lib/arch/syslib.h>
#include <vm_area.h>
#include <task.h>
#include <user.h>
/*
* Checks if the given user virtual address range is
* validly owned by that user with given flags.
*
* FIXME: This scans the vmas page by page, we can do it faster
* by leaping from one vma to next.
*/
int pager_validate_user_range(struct tcb *user, void *userptr, unsigned long size,
unsigned int vmflags)
{
struct vm_area *vma;
unsigned long start = page_align(userptr);
unsigned long end = page_align_up(userptr + size);
/* Find the vma that maps that virtual address */
for (unsigned long vaddr = start; vaddr < end; vaddr += PAGE_SIZE) {
if (!(vma = find_vma(vaddr, &user->vm_area_head->list))) {
printf("%s: No VMA found for 0x%x on task: %d\n",
__FUNCTION__, vaddr, user->tid);
return -1;
}
if ((vma->flags & vmflags) != vmflags)
return -1;
}
return 0;
}
/*
* Validates and maps the user virtual address range to the pager.
* Every virtual page needs to be mapped individually because it's
* not guaranteed pages are physically contiguous.
*/
void *pager_validate_map_user_range(struct tcb *user, void *userptr,
unsigned long size, unsigned int vm_flags)
{
unsigned long start = page_align(userptr);
unsigned long end = page_align_up(userptr + size);
void *mapped = 0;
/* Validate that user task owns this address range */
if (pager_validate_user_range(user, userptr, size, vm_flags) < 0)
return 0;
/* Map first page and calculate the mapped address of pointer */
mapped = l4_map_helper((void *)page_to_phys(task_virt_to_page(user, start)), 1);
mapped = (void *)(((unsigned long)mapped) |
((unsigned long)(PAGE_MASK & (unsigned long)userptr)));
/* Map the rest of the pages, if any */
for (unsigned long i = start + PAGE_SIZE; i < end; i += PAGE_SIZE)
l4_map_helper((void *)page_to_phys(task_virt_to_page(user,
start + i)), 1);
return mapped;
}
void pager_unmap_user_range(void *mapped_ptr, unsigned long size)
{
l4_unmap_helper((void *)page_align(mapped_ptr),
__pfn(page_align(mapped_ptr) + page_align_up(size)));
}