Prefaulting of utcb pages seem to work.

Next: fs0 should shmat() on others' utcbs.
This commit is contained in:
Bahadir Balban
2008-03-22 15:57:20 +00:00
parent 0dfbde6bc0
commit 82a7228d89
9 changed files with 211 additions and 78 deletions

View File

@@ -18,7 +18,7 @@
#include INC_SUBARCH(mm.h)
/* Abort debugging conditions */
//#define DEBUG_ABORTS
// #define DEBUG_ABORTS
#if defined (DEBUG_ABORTS)
#define dbg_abort(...) dprintk(__VA_ARGS__)
#else

View File

@@ -68,8 +68,9 @@ struct tcb {
unsigned long map_start;
unsigned long map_end;
/* UTCB address */
/* UTCB information */
unsigned long utcb_address;
int utcb_mapped;
/* Virtual memory areas */
struct list_head vm_area_list;
@@ -83,6 +84,6 @@ struct tcb *find_task(int tid);
struct initdata;
void init_pm(struct initdata *initdata);
void send_task_data(l4id_t requester);
int send_task_data(l4id_t requester);
#endif /* __TASK_H__ */

View File

@@ -2,6 +2,7 @@
#define __MM0_UTCB_H__
#include <l4lib/types.h>
#include <task.h>
void *utcb_vaddr_new(void);
int utcb_pool_init(void);
@@ -9,4 +10,7 @@ int utcb_pool_init(void);
/* IPC to send utcb address information to tasks */
int task_send_utcb_address(l4id_t sender, l4id_t taskid);
/* Prefault an *mmaped* utcb */
int utcb_prefault(struct tcb *task, unsigned int vmflags);
#endif

View File

@@ -81,6 +81,7 @@ struct fault_data {
fault_kdata_t *kdata; /* Generic data flonged by the kernel */
unsigned int reason; /* Generic fault reason flags */
unsigned int address; /* Aborted address */
unsigned int pte_flags; /* Generic protection flags on pte */
struct vm_area *vma; /* Inittask-related fault data */
struct tcb *task; /* Inittask-related fault data */
};
@@ -206,7 +207,14 @@ struct vm_file *vm_file_create(void);
int vm_object_delete(struct vm_object *vmo);
void vm_object_print(struct vm_object *vmo);
/* Used for pre-faulting a page from mm0 */
int prefault_page(struct tcb *task, unsigned long address,
unsigned int vmflags);
/* To get currently mapped page of a virtual address on a task */
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual);
/* Main page fault entry point */
void page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);
int page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);
#endif /* __VM_AREA_H__ */

View File

@@ -26,7 +26,7 @@ unsigned int vm_prot_flags(pte_t pte)
}
#if defined(DEBUG_FAULT_HANDLING)
void print_fault_params(struct fault_data *fault)
void arch_print_fault_params(struct fault_data *fault)
{
printf("%s: Handling %s fault (%s abort) from %d. fault @ 0x%x\n",
__TASKNAME__, (fault->reason & VM_READ) ? "read" : "write",
@@ -34,7 +34,7 @@ void print_fault_params(struct fault_data *fault)
fault->task->tid, fault->address);
}
#else
void print_fault_params(struct fault_data *fault) { }
void arch_print_fault_params(struct fault_data *fault) { }
#endif
@@ -49,7 +49,9 @@ void print_fault_params(struct fault_data *fault) { }
void set_generic_fault_params(struct fault_data *fault)
{
unsigned int prot_flags = vm_prot_flags(fault->kdata->pte);
fault->reason = 0;
fault->pte_flags = prot_flags;
if (is_prefetch_abort(fault->kdata->fsr)) {
fault->reason |= VM_READ;
@@ -65,6 +67,6 @@ void set_generic_fault_params(struct fault_data *fault)
else
BUG();
}
print_fault_params(fault);
arch_print_fault_params(fault);
}

View File

@@ -416,7 +416,7 @@ int __do_page_fault(struct fault_data *fault)
{
unsigned int reason = fault->reason;
unsigned int vma_flags = fault->vma->flags;
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
unsigned int pte_flags = fault->pte_flags;
struct vm_area *vma = fault->vma;
struct vm_obj_link *vmo_link;
unsigned long file_offset;
@@ -452,6 +452,8 @@ int __do_page_fault(struct fault_data *fault)
(void *)page_align(fault->address), 1,
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
fault->task->tid);
/* Print about the action, if debug enabled */
dprintf("%s: Mapped 0x%x as readable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo_link->obj);
@@ -858,7 +860,6 @@ int do_page_fault(struct fault_data *fault)
{
unsigned int vma_flags = (fault->vma) ? fault->vma->flags : VM_NONE;
unsigned int reason = fault->reason;
int err;
/* vma flags show no access */
if (vma_flags & VM_NONE) {
@@ -880,15 +881,12 @@ int do_page_fault(struct fault_data *fault)
}
/* Handle legitimate faults */
__do_page_fault(fault);
/* Return the ipc and by doing so restart the faulty thread */
l4_ipc_return(err);
return 0;
return __do_page_fault(fault);
}
void page_fault_handler(l4id_t sender, fault_kdata_t *fkdata)
int page_fault_handler(l4id_t sender, fault_kdata_t *fkdata)
{
int err;
struct fault_data fault = {
/* Fault data from kernel */
.kdata = fkdata,
@@ -909,6 +907,118 @@ void page_fault_handler(l4id_t sender, fault_kdata_t *fkdata)
"Bad things will happen.\n");
/* Handle the actual fault */
do_page_fault(&fault);
err = do_page_fault(&fault);
/*
* Return the ipc and by doing so restart the faulty thread.
* FIXME: We could kill the thread if there was an error???
* Perhaps via a kill message to kernel?
*/
l4_ipc_return(err);
return 0;
}
/*
* Makes the virtual to page translation for a given user task.
* It traverses the vm_objects and returns the first encountered
* instance of the page. If page is not mapped in the task's address
* space, (not faulted at all), returns error.
*/
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
{
unsigned long vma_offset;
unsigned long file_offset;
struct vm_obj_link *vmo_link;
struct vm_area *vma;
struct page *page;
/* First find the vma that maps that virtual address */
if (!(vma = find_vma(virtual, &t->vm_area_list))) {
printf("%s: No VMA found for 0x%x on task: %d\n",
__FUNCTION__, virtual, t->tid);
return PTR_ERR(-EINVAL);
}
/* Find the pfn offset of virtual address in this vma */
BUG_ON(__pfn(virtual) < vma->pfn_start ||
__pfn(virtual) > vma->pfn_end);
vma_offset = __pfn(virtual) - vma->pfn_start;
/* Find the file offset of virtual address in this file */
file_offset = vma->file_offset + vma_offset;
/* Get the first object, either original file or a shadow */
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
printf("%s:%s: No vm object in vma!\n",
__TASKNAME__, __FUNCTION__);
BUG();
}
/* Traverse the list of read-only vm objects and search for the page */
while (IS_ERR(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
file_offset))) {
if (!(vmo_link = vma_next_link(&vmo_link->list,
&vma->vm_obj_list))) {
printf("%s:%s: Traversed all shadows and the original "
"file's vm_object, but could not find the "
"faulty page in this vma.\n",__TASKNAME__,
__FUNCTION__);
BUG();
}
}
/* Found it */
printf("%s: %s: Found page with file_offset: 0x%x. vm object:\n",
__TASKNAME__, __FUNCTION__, (unsigned long)page, page->offset);
vm_object_print(vmo_link->obj);
return page;
}
/*
* Prefaults the page with given virtual address, to given task
* with given reasons. Multiple reasons are allowed, they are
* handled separately in order.
*/
int prefault_page(struct tcb *task, unsigned long address,
unsigned int vmflags)
{
int err;
struct fault_data fault = {
.task = task,
.address = address,
};
dprintf("Pre-faulting address 0x%x, on task %d, with flags: 0x%x\n",
address, task->tid, vmflags);
/* Find the vma */
if (!(fault.vma = find_vma(fault.address,
&fault.task->vm_area_list))) {
err = -EINVAL;
dprintf("%s: Invalid: No vma for given address. %d\n",
__FUNCTION__, err);
return err;
}
/* Flags may indicate multiple fault reasons. First do the read */
if (vmflags & VM_READ) {
fault.pte_flags = VM_NONE;
fault.reason = VM_READ;
if ((err = do_page_fault(&fault)) < 0)
return err;
}
/* Now write */
if (vmflags & VM_WRITE) {
fault.pte_flags = VM_READ;
fault.reason = VM_WRITE;
if ((err = do_page_fault(&fault)) < 0)
return err;
}
/* No exec or any other fault reason allowed. */
BUG_ON(vmflags & ~(VM_READ | VM_WRITE));
return 0;
}

View File

@@ -7,6 +7,7 @@
#include <stdio.h>
#include <task.h>
#include <mmap.h>
#include <utcb.h>
#include <vm_area.h>
#include <l4/lib/string.h>
#include <kmalloc/kmalloc.h>
@@ -52,16 +53,15 @@ void shm_init()
* the descriptor has an address, allocates one from the shm address pool.
*/
static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
l4id_t tid)
struct tcb *task)
{
struct shm_descriptor *shm = shm_file_to_desc(shm_file);
struct tcb *task = find_task(tid);
unsigned int vmflags;
int err;
if (!task) {
printf("%s:%s: Cannot find caller task with tid %d\n",
__TASKNAME__, __FUNCTION__, tid);
__TASKNAME__, __FUNCTION__, task->tid);
BUG();
}
@@ -107,11 +107,22 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
int sys_shmat(l4id_t requester, l4id_t shmid, void *shmaddr, int shmflg)
{
struct vm_file *shm_file, *n;
struct tcb *task = find_task(requester);
list_for_each_entry_safe(shm_file, n, &shm_file_list, list) {
if (shm_file_to_desc(shm_file)->shmid == shmid) {
shmaddr = do_shmat(shm_file, shmaddr,
shmflg, requester);
shmflg, task);
/*
* UTCBs get special treatment here. If the task
* is attaching to its utcb, mm0 prefaults it so
* that it can access it later on whether or not
* the task makes a syscall to mm0 without first
* faulting the utcb.
*/
if ((unsigned long)shmaddr == task->utcb_address)
utcb_prefault(task, VM_READ | VM_WRITE);
l4_ipc_return((int)shmaddr);
return 0;
@@ -216,7 +227,7 @@ int sys_shmget(key_t key, int size, int shmflg)
if (npages > SHM_SHMMAX || npages < SHM_SHMMIN) {
l4_ipc_return(-EINVAL);
return 0;
} else
}
/*
* IPC_PRIVATE means create a no-key shm area, i.e. private to this
@@ -236,8 +247,8 @@ int sys_shmget(key_t key, int size, int shmflg)
if (shm_desc->key == key) {
/*
* Exclusive means create request
* on existing key should fail.
* Exclusive means a create request
* on an existing key should fail.
*/
if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
l4_ipc_return(-EEXIST);

View File

@@ -359,55 +359,6 @@ void init_pm(struct initdata *initdata)
start_boot_tasks(initdata);
}
/*
* Makes the virtual to page translation for a given user task.
* If page is not mapped (either not faulted or swapped), returns 0.
*/
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
{
unsigned long vma_offset;
unsigned long file_offset;
struct vm_obj_link *vmo_link;
struct vm_area *vma;
struct page *page;
/* First find the vma that maps that virtual address */
if (!(vma = find_vma(virtual, &t->vm_area_list))) {
printf("%s: No VMA found for 0x%x on task: %d\n",
__FUNCTION__, virtual, t->tid);
return PTR_ERR(-EINVAL);
}
/* Find the pfn offset of virtual address in this vma */
BUG_ON(__pfn(virtual) < vma->pfn_start ||
__pfn(virtual) > vma->pfn_end);
vma_offset = __pfn(virtual) - vma->pfn_start;
/* Find the file offset of virtual address in this file */
file_offset = vma->file_offset + vma_offset;
/* Get the initial link */
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
&vma->vm_obj_list)));
/* Is page there in the cache ??? */
while(!(page = find_page(vmo_link->obj, file_offset))) {
/* No, check the next link */
if (!(vmo_link = vma_next_link(&vma->vm_obj_list,
&vma->vm_obj_list)));
/* Exhausted the objects. The page is not there. */
return 0;
}
/* Found it */
printf("%s: %s: Found page @ 0x%x, file_offset: 0x%x, "
"with vma @ 0x%x, vm object @ 0x%x\n", __TASKNAME__,
__FUNCTION__, (unsigned long)page, page->offset,
vma, vmo_link->obj);
return page;
}
struct task_data {
unsigned long tid;
unsigned long utcb_address;
@@ -423,7 +374,7 @@ struct task_data_head {
* are running, and their tids, which includes itself. This function
* provides that information.
*/
void send_task_data(l4id_t requester)
int send_task_data(l4id_t requester)
{
int li, err;
struct tcb *t, *vfs;
@@ -433,15 +384,21 @@ void send_task_data(l4id_t requester)
printf("%s: Task data requested by %d, which is not "
"FS0 id %d, ignoring.\n", __TASKNAME__, requester,
VFS_TID);
return;
return 0;
}
BUG_ON(!(vfs = find_task(requester)));
BUG_ON(!vfs->utcb_address);
/* Map in vfs's utcb. FIXME: Whatif it is already mapped? */
l4_map((void *)page_to_phys(task_virt_to_page(vfs, vfs->utcb_address)),
(void *)vfs->utcb_address, 1, MAP_USR_RW_FLAGS, self_tid());
/*
* When task does shmat() for its utcb, mm0 prefaults and maps it
* to itself, and sets this flag. Check that this has occured
* to ensure we have access to it. Otherwise return error.
*/
if (!vfs->utcb_mapped) {
l4_ipc_return(-ENOENT);
return 0;
}
/* Write all requested task information to utcb's user buffer area */
tdata_head = (struct task_data_head *)vfs->utcb_address;
@@ -462,5 +419,7 @@ void send_task_data(l4id_t requester)
printf("%s: L4 IPC Error: %d.\n", __FUNCTION__, err);
BUG();
}
return 0;
}

View File

@@ -11,6 +11,7 @@
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <task.h>
#include <vm_area.h>
#include INC_GLUE(memlayout.h)
static struct address_pool utcb_vaddr_pool;
@@ -35,7 +36,8 @@ void *utcb_vaddr_new(void)
/*
* Sends utcb address information to requester task, allocates
* an address if it doesn't exist and the requester is asking
* for its own.
* for its own. The requester then uses this address as a shm key and
* maps its own utcb via shmget/shmat.
*/
int task_send_utcb_address(l4id_t sender, l4id_t taskid)
{
@@ -69,5 +71,41 @@ int task_send_utcb_address(l4id_t sender, l4id_t taskid)
return 0;
}
/*
* Triggered during a sys_shmat() by a client task when mapping its utcb.
* This prefaults the utcb and maps it in to mm0 so that it can freely
* access it anytime later.
*/
int utcb_prefault(struct tcb *task, unsigned int vmflags)
{
int err;
struct page *pg;
/* First map in the page to task with given flags, e.g. read/write */
if ((err = prefault_page(task, task->utcb_address, vmflags)) < 0) {
printf("%s: Failed: %d\n", __FUNCTION__, err);
return err;
}
/*
* Get the topmost page. Since we did both a VM_READ and VM_WRITE
* prefault, this gets a writeable instead of a read-only page.
*/
pg = task_virt_to_page(task, task->utcb_address);
if (!pg || IS_ERR(pg)) {
printf("%s: Cannot retrieve task %d's utcb page.\n",
__FUNCTION__, task->tid);
BUG();
}
/* Map it in to self */
l4_map((void *)page_to_phys(pg), (void *)task->utcb_address, 1,
MAP_USR_RW_FLAGS, self_tid());
/* Flag that says this task's utcb is mapped to mm0 as r/w */
task->utcb_mapped = 1;
return 0;
}