Preparing to add utcbs at disjoint virtual addresses.

This will help when syscalls have long arguments individual
utcbs can be mapped to server tasks and kept mapped in until the
tasks die, as opposed to map requests every time a server task maps
a different utcb at the same virtual address.

The changes have preparation code to also passing the utcb info
through the stack as part of the environment.

To sum up env and arg regions have also been added above the stack and
env region is to be used to pass on the utcb address information at
task startup.
This commit is contained in:
Bahadir Balban
2008-02-28 00:25:04 +00:00
parent 6e6a92be8b
commit e7b0e46065
10 changed files with 91 additions and 41 deletions

View File

@@ -26,7 +26,7 @@ extern __l4_thread_switch_t __l4_thread_switch;
unsigned int l4_thread_switch (u32 dest);
typedef int (*__l4_getid_t)(struct task_ids *ids);
extern __l4_getid_t __l4_getpid;
extern __l4_getid_t __l4_getid;
int l4_getid(struct task_ids *ids);
typedef int (*__l4_ipc_t)(l4id_t to, l4id_t from);

View File

@@ -21,15 +21,17 @@ struct utcb {
u32 tid; /* Thread id */
/*
* This field is used by servers as the ptr to current tcb,
* i.e. the task that this server is serving to.
* For passing ipc data larger than mrs,
* that is, if the callee is allowed to map it
*/
unsigned long usr_handle;
char userbuf[];
};
extern struct utcb *utcb;
static inline struct utcb *l4_get_utcb()
{
return *(struct utcb **)USER_UTCB_REF;
return utcb;
// (struct utcb **)USER_UTCB_REF;
}
/* Functions to read/write utcb registers */

View File

@@ -14,7 +14,6 @@
/* SHMID used betweeen FS0 and BLKDEV0 servers */
#define FS_BLKDEV_SHMID 0
/*** IPC Tags used between server tasks ***/
/* For ping ponging */

View File

@@ -20,6 +20,9 @@ __l4_kmem_reclaim_t __l4_kmem_reclaim = 0;
struct kip *kip;
/* UTCB address of this task. */
struct utcb *utcb;
void __l4_init(void)
{
kip = l4_kernel_interface(0, 0, 0);

View File

@@ -54,6 +54,13 @@ struct tcb {
unsigned long stack_end; /* Exclusive of last currently mapped page */
unsigned long heap_start;
unsigned long heap_end; /* Exclusive of last currently mapped page */
unsigned long env_start;
unsigned long env_end;
unsigned long args_start;
unsigned long args_end;
/* UTCB address */
unsigned long utcb_addr;
/* Virtual memory areas */
struct list_head vm_area_list;

View File

@@ -14,11 +14,11 @@
#include <l4/api/thread.h>
#include <l4/api/space.h>
#include <l4/api/ipc.h>
#include <shm.h>
#include <task.h>
#include <vm_area.h>
#include <syscalls.h>
#include <file.h>
#include <shm.h>
#include <task.h>
void handle_requests(void)
{

View File

@@ -252,6 +252,12 @@ int do_file_page(struct fault_data *fault)
return 0;
}
/* Check if faulty page has environment and argument information */
int is_env_arg_page(struct fault_data *fault)
{
return fault->address >= page_align(fault->task->stack_end);
}
/*
* Handles any page allocation or file ownership change for anonymous pages.
* For read accesses initialises a wired-in zero page and for write accesses
@@ -281,24 +287,21 @@ int do_anon_page(struct fault_data *fault)
return 0;
}
/* For non-existant pages just map the zero page. */
if (fault->reason & VM_READ) {
/* For non-existant pages just map the zero page, unless it is the
* beginning of stack which requires environment and argument data. */
if (fault->reason & VM_READ && is_env_arg_page(fault)) {
/*
* Zero page is a special wired-in page that is mapped
* many times in many tasks. Just update its count field.
*/
paddr = get_zero_page();
#if defined(SHM_DISJOINT_VADDR_POOL)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RO_FLAGS, fault->task->tid);
#else
#error ARM v5 Cache aliasing possibility. Map this uncached on VMA_SHARED
#endif
}
/* Write faults require a real zero initialised page */
if (fault->reason & VM_WRITE) {
if (fault->reason & VM_WRITE || is_env_arg_page(fault)) {
paddr = alloc_page(1);
vaddr = phys_to_virt(paddr);
page = phys_to_page(paddr);
@@ -316,16 +319,15 @@ int do_anon_page(struct fault_data *fault)
/* Clear the page */
memset((void *)vaddr, 0, PAGE_SIZE);
if (is_env_arg_page(fault))
/* TODO: Fill in environment information here. */
/* Remove temporary mapping */
l4_unmap((void *)vaddr, 1, self_tid());
#if defined(SHM_DISJOINT_VADDR_POOL)
/* Map the page to task */
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
#else
#error ARM v5 Cache aliasing possibility. Map this uncached on VMA_SHARED.
#endif
spin_lock(&page->lock);
/* vma's swap file owns this page */

View File

@@ -198,7 +198,12 @@ int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
void *vaddr = phys_to_virt(paddr);
page = phys_to_page(paddr);
/* Map new page at a self virtual address temporarily */
/*
* Map new page at a self virtual address.
* NOTE: this is not unmapped here but in
* read_cache_pages where mm0's work with the
* page is done.
*/
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/* Read-in the page using the file's pager */
@@ -208,11 +213,6 @@ int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
page->count++;
page->owner = vmfile;
page->f_offset = f_offset;
/* TODO:
* Page is not mapped into any address space except mm0.
* Shall we add mm0 vaddr here ???
*/
page->virtual = 0;
/* Add the page to owner's list of in-memory pages */
@@ -232,6 +232,7 @@ int read_cache_pages(struct vm_file *vmfile, void *buf, unsigned long pfn_start,
{
struct page *head, *next;
int copysize, left;
void *page_virtual;
list_for_each_entry(head, &vmfile->page_cache_list, list)
if (head->f_offset == pfn_start)
@@ -241,20 +242,27 @@ int read_cache_pages(struct vm_file *vmfile, void *buf, unsigned long pfn_start,
copy:
left = count;
/* Copy the first page */
/*
* This function assumes the pages are already in-memory and
* they are mapped into the current address space.
*/
page_virtual = phys_to_virt((void *)page_to_phys(head));
/* Copy the first page and unmap it from current task. */
copysize = (left <= PAGE_SIZE) ? left : PAGE_SIZE;
memcpy(buf, (void *)phys_to_virt((void *)page_to_phys(head)) + offset,
memcpy(buf, page_virtual + offset,
copysize);
left -= copysize;
l4_unmap(page_virtual, 1, self_tid());
/* Copy the rest. Urgh, lots of arithmetic here. */
/* Copy the rest and unmap. */
list_for_each_entry(next, &head->list, list) {
if (left == 0 || next->f_offset == pfn_end)
break;
copysize = (left <= PAGE_SIZE) ? left : PAGE_SIZE;
memcpy(buf + count - left,
(void *)phys_to_virt((void *)page_to_phys(next)),
copysize);
page_virtual = phys_to_virt((void *)page_to_phys(next));
memcpy(buf + count - left, page_virtual, copysize);
l4_unmap(page_virtual, 1, self_tid());
left -= copysize;
}
BUG_ON(left != 0);

View File

@@ -10,18 +10,23 @@
#include <mm/alloc_page.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <task.h>
#include <shm.h>
#include <file.h>
#include <init.h>
#include <utcb.h>
/* Initialise the utcb virtual address pool and its own utcb */
void init_utcb(void)
{
struct task_ids ids;
void *utcb_page = alloc_page(1); /* Allocate a utcb page */
void *utcb_virt, *utcb_page;
l4_getid(&ids);
l4_map(utcb_page, l4_get_utcb(), 1, MAP_USR_RW_FLAGS, ids.tid);
/* Allocate and map one for self */
utcb_virt = utcb_vaddr_new();
printf("%s: Mapping 0x%x as utcb to self.\n", __TASKNAME__, utcb_virt);
utcb_page = alloc_page(1);
l4_map(utcb_page, utcb_virt, 1, MAP_USR_RW_FLAGS, self_tid());
}
void init_mm(struct initdata *initdata)

View File

@@ -21,6 +21,7 @@
#include <vm_area.h>
#include <memory.h>
#include <file.h>
#include <utcb.h>
struct tcb_head {
struct list_head list;
@@ -70,6 +71,9 @@ struct tcb *create_init_tcb(struct tcb_head *tcbs)
list_add_tail(&task->list, &tcbs->list);
tcbs->total++;
/* Allocate a utcb virtual address */
task->utcb_addr = (unsigned long)utcb_vaddr_new();
return task;
}
@@ -102,10 +106,6 @@ int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
ids.spid = -1;
}
/* Set up task's registers */
sp = align(USER_AREA_END - 1, 8);
pc = USER_AREA_START;
/* Create vm file and tcb */
file = vmfile_alloc_init();
task = create_init_tcb(tcbs);
@@ -119,6 +119,28 @@ int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
file->pager = &boot_file_pager;
list_add(&file->list, &initdata->boot_file_list);
/*
* Setup task's regions so that they are taken into account
* during page faults.
*/
task->stack_start = USER_AREA_END - PAGE_SIZE * 4;
/* Next address after 8 spaces, and 8-byte alignment */
task->stack_end = align(USER_AREA_END - 8, 8) + sizeof(int);
/* No argument space, but 8 bytes for utcb address environment */
task->env_start = task->stack_end;
task->env_end = USER_AREA_END;
task->args_start = task->env_start;
task->args_end = task->env_start;
/* Only text start is valid */
task->text_start = USER_AREA_START;
/* Set up task's registers */
sp = align(task->stack_end - 1, sizeof(int));
pc = task->text_start;
/* mmap each task's physical image to task's address space. */
if ((err = do_mmap(file, 0, task, USER_AREA_START,
VM_READ | VM_WRITE | VM_EXEC,
@@ -128,14 +150,16 @@ int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
}
/* mmap each task's stack as 4-page anonymous memory. */
if ((err = do_mmap(0, 0, task, USER_AREA_END - PAGE_SIZE * 4,
if ((err = do_mmap(0, 0, task, task->stack_start,
VM_READ | VM_WRITE | VMA_ANON, 4) < 0)) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
/* mmap each task's utcb as single page anonymous memory. */
if ((err = do_mmap(0, 0, task, (unsigned long)l4_get_utcb(),
printf("%s: Mapping utcb for new task at: 0x%x\n", __TASKNAME__,
task->utcb_addr);
if ((err = do_mmap(0, 0, task, task->utcb_addr,
VM_READ | VM_WRITE | VMA_ANON, 1) < 0)) {
printf("do_mmap: Mapping utcb failed with %d.\n", err);
goto error;