Towards implementing fork.

Issue is that shadow object references from original objects are into
the links rather than the objects.
This commit is contained in:
Bahadir Balban
2008-08-19 18:03:23 +03:00
parent f436b44e81
commit 69db3a04c0
11 changed files with 258 additions and 93 deletions

View File

@@ -87,6 +87,7 @@ struct tcb *find_task(int tid);
struct initdata;
void init_pm(struct initdata *initdata);
struct tcb *task_create(struct task_ids *ids, unsigned int flags);
int send_task_data(l4id_t requester);
#endif /* __TASK_H__ */

View File

@@ -122,7 +122,7 @@ struct vm_pager {
struct vm_object {
int npages; /* Number of pages in memory */
int refcnt; /* Number of shadows (or vmas) that refer */
struct list_head shadowers; /* List of vm objects that shadow this one */
struct list_head shadowers; /* List of links to the vm object that shadows this one */
struct vm_object *orig_obj; /* Original object that this one shadows */
unsigned int flags; /* Defines the type and flags of the object */
struct list_head list; /* List of all vm objects in memory */
@@ -142,7 +142,13 @@ struct vm_file {
/* To create per-vma vm_object lists */
struct vm_obj_link {
struct list_head list;
struct list_head shref; /* Ref to shadowers by original objects */
/*
* Ref to shadowers by original objects. This could be in the shadow
* object itself, but then we would not be able to reach its link
* when trying to free it.
*/
struct list_head shref;
struct vm_object *obj;
};
@@ -218,6 +224,9 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual);
int validate_task_range(struct tcb *t, unsigned long start,
unsigned long end, unsigned int vmflags);
/* Changes all shadows and their ptes to read-only */
int vm_freeze_shadows(struct tcb *task);
/* Main page fault entry point */
int page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);

View File

@@ -4,41 +4,79 @@
* Copyright (C) 2008 Bahadir Balban
*/
#include <syscalls.h>
#include <vm_area.h>
#include <task.h>
int copy_tcb(struct tcb *p, struct tcb *c)
int copy_vmas(struct tcb *to, struct tcb *from)
{
/* Copy program segments, file descriptors, vm areas */
struct vm_area *vma, new;
struct vm_obj_link *vmo_link, *new_link;
list_for_each_entry(vma, from->vm_area_list, list) {
/* Create a new vma */
new = vma_new(vma->pfn_start, vma->pfn_end - vma->pfn_start,
vma->flags, vma->file_offset);
/*
* Populate it with links to every object that the original
* vma is linked to. Note, that we don't copy vm objects but
* just the links to them, because vm objects are not
* per-process data.
*/
/* Get the first object, either original file or a shadow */
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
printf("%s:%s: No vm object in vma!\n",
__TASKNAME__, __FUNCTION__);
BUG();
}
/* Create a new link */
new_link = vm_objlink_create();
/* Copy all fields from original link.
* E.g. if ori
}
}
/*
* Sets all r/w shadow objects as read-only for the process
* so that copy-on-write incidents cause read faults.
*/
int vm_freeze_shadows(struct tcb *t)
int copy_tcb(struct tcb *to, struct tcb *from)
{
/* Make all shadows read-only */
/* Copy program segments, file descriptors, vm areas */
to->start = from->start;
to->end = from->end;
to->text_start = from->text_start;
to->text_end = from->text_end;
to->data_start = from->data_start;
to->data_end = from->data_end;
to->bss_start = from->bss_start;
to->bss_end = from->bss_end;
to->stack_start = from->stack_start;
to->stack_end = from->stack_end;
to->heap_start = from->heap_start;
to->heap_end = from->heap_end;
to->env_start = from->env_start;
to->env_end = from->env_end;
to->args_start = from->args_start;
to->args_end = from->args_end;
to->map_start = from->map_start;
to->map_end = from->map_end;
/*
* Make all writeable shadow entries
* in the page table as read-only
*/
/* UTCB ??? */
BUG();
/* Copy all vm areas */
copy_vmas(to, from);
/* Copy all file descriptors */
memcpy(to->fd, from->fd,
TASK_FILES_MAX * sizeof(struct file_descriptor));
}
int do_fork(struct tcb *parent)
{
struct tcb *child;
/* Make all parent shadows read only */
vm_freeze_shadows(parent);
/* Create a new L4 thread with new space */
l4_thread_create(parent);
/* Create a new local tcb */
child = tcb_alloc_init();
/* Copy parent tcb to child */
copy_tcb(struct tcb *parent, struct tcb *child);
struct task_ids ids = { .tid = TASK_ID_INVALID, .spid = TASK_ID_INVALID };
/*
* Allocate and copy parent pgd + all pmds to child.
@@ -54,6 +92,16 @@ int do_fork(struct tcb *parent)
* every one of them will have to fault on frozen shadows individually.
*/
/* Make all shadows in this task read-only */
vm_freeze_shadows(parent);
/* Create a new L4 thread with parent's page tables copied */
ids.spid = parent->spid;
child = task_create(&ids, THREAD_CREATE_COPYSPACE);
/* Copy parent tcb to child */
copy_tcb(child, parent);
/* FIXME: Need to copy parent register values to child ??? */
/* Notify fs0 about forked process */

View File

@@ -19,6 +19,12 @@
#include <shm.h>
#include <file.h>
/* Given a page and the vma it is in, returns that page's virtual address */
unsigned long vma_page_to_virtual(struct vm_area *vma, struct page *p)
{
return __pfn_to_addr(vma->pfn_start + p->offset);
}
unsigned long fault_to_file_offset(struct fault_data *fault)
{
/* Fault's offset in its vma */
@@ -53,7 +59,7 @@ struct vm_obj_link *vma_next_link(struct list_head *link,
return list_entry(link->next, struct vm_obj_link, list);
}
/* Unlinks obj_link from its vma and deletes it but keeps the object. */
/* Unlinks orig_link from its vma and deletes it but keeps the object. */
int vma_drop_link(struct vm_obj_link *shadower_link,
struct vm_obj_link *orig_link)
{
@@ -215,6 +221,7 @@ struct vm_obj_link *vma_create_shadow(void)
struct vm_object *vmo;
struct vm_obj_link *vmo_link;
/* FIXME: Why not use vm_objlink_create() ??? */
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
return 0;
@@ -507,6 +514,60 @@ out_success:
return 0;
}
/*
* Sets all r/w shadow objects as read-only for the process
* so that as expected after a fork() operation, writes to those
* objects cause copy-on-write incidents.
*/
int vm_freeze_shadows(struct tcb *task)
{
unsigned long virtual;
struct vm_area *vma;
struct vm_obj_link *vmo_link;
struct vm_object *vmo;
struct page *p;
list_for_each_entry(vma, &task->vm_area_list, list) {
/* Shared vmas don't have shadows */
if (vma->flags & VMA_SHARED)
continue;
/* Get the first object */
while ((vmo_link = vma_next_link(&vma->vm_obj_list,
&vma->vm_obj_list))) {
vmo = vmo_link->obj;
/* Is this a writeable shadow? */
if ((vmo->flags & VM_OBJ_SHADOW) &&
(vmo->flags & VM_WRITE)) {
/* Make the object read only */
vmo->flags &= ~VM_WRITE;
vmo->flags |= VM_READ;
/*
* Make all pages on it read-only
* in the page tables.
*/
list_for_each_entry(p, &vmo->page_cache, list) {
/* Find virtual address of each page */
virtual = vma_page_to_virtual(vma, p);
/* Map the page as read-only */
l4_map((void *)page_to_phys(p),
(void *)virtual,
MAP_USR_RO_FLAGS, task->tid);
}
break;
}
}
}
return 0;
}
#if 0
/*
* Old function, likely to be ditched.

View File

@@ -346,8 +346,7 @@ int sys_munmap(l4id_t sender, void *vaddr, unsigned long size)
}
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long file_offset,
struct vm_file *mapfile)
unsigned int flags, unsigned long file_offset)
{
struct vm_area *vma;
@@ -526,8 +525,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
}
/* For valid regions that aren't allocated by us, create the vma. */
if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset,
mapfile)))
if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset)))
return -ENOMEM;
/* Attach the file as the first vm object of this vma */

View File

@@ -76,13 +76,13 @@ struct tcb *tcb_alloc_init(void)
}
struct tcb *task_create(struct task_ids *ids)
struct tcb *task_create(struct task_ids *ids, unsigned int flags)
{
struct tcb *task;
int err;
/* Create the thread structures and address space */
if ((err = l4_thread_control(THREAD_CREATE, ids)) < 0) {
if ((err = l4_thread_control(THREAD_CREATE | flags, ids)) < 0) {
printf("l4_thread_control failed with %d.\n", err);
return PTR_ERR(err);
}
@@ -262,7 +262,7 @@ int task_exec(struct vm_file *f, unsigned long task_region_start,
struct tcb *task;
int err;
if (IS_ERR(task = task_create(ids)))
if (IS_ERR(task = task_create(ids, THREAD_CREATE_NEWSPC)))
return (int)task;
if ((err = task_setup_regions(f, task, task_region_start,