Minor changes in README. Added fault debugging printfs that can be turned on/off.

Tasks boot fine up to doing ipc using their utcbs.

UTCB PLAN:

- Push ipc registers into private environment instead of a shared utcb,
  but map-in a shared utcb to pass on long data to server tasks.
- Shared utcb has unique virtual address for every thread.
- Forked child does inherit parent's utcb, but cannot use it to communicate to
  any server. It must explicitly obtain its own utcb for that.
- Clone could have a flag to explicitly not inherit parent utcb, which is the
  right thing to do.
- MM0 serves a syscall to obtain self utcb.
- By this method, upon forks tasks don't need to map-in a utcb unless they want
  to pass long data.
This commit is contained in:
Bahadir Balban
2008-03-17 17:09:19 +00:00
parent 509e949983
commit d2aa9a552b
6 changed files with 86 additions and 50 deletions

View File

@@ -14,6 +14,13 @@
#include <arch/mm.h>
#include <lib/spinlock.h>
// #define DEBUG_FAULT_HANDLING
#ifdef DEBUG_FAULT_HANDLING
#define dprintf(...) printf(__VA_ARGS__)
#else
#define dprintf(...)
#endif
/* Protection flags */
#define VM_NONE (1 << 0)
#define VM_READ (1 << 1)

View File

@@ -3,6 +3,7 @@
*/
#include <arch/mm.h>
#include <task.h>
#include <vm_area.h>
/* Extracts generic protection flags from architecture-specific pte */
unsigned int vm_prot_flags(pte_t pte)
@@ -24,6 +25,18 @@ unsigned int vm_prot_flags(pte_t pte)
return vm_prot_flags;
}
#if defined(DEBUG_FAULT_HANDLING)
void print_fault_params(struct fault_data *fault)
{
printf("%s: Handling %s fault (%s abort) from %d. fault @ 0x%x\n",
__TASKNAME__, (fault->reason & VM_READ) ? "read" : "write",
is_prefetch_abort(fault->kdata->fsr) ? "prefetch" : "data",
fault->task->tid, fault->address);
}
#else
void print_fault_params(struct fault_data *fault) { }
#endif
/*
* PTE STATES:
@@ -52,9 +65,6 @@ void set_generic_fault_params(struct fault_data *fault)
else
BUG();
}
printf("%s: Handling %s fault (%s abort) from %d. fault @ 0x%x\n",
__TASKNAME__, (fault->reason & VM_READ) ? "read" : "write",
is_prefetch_abort(fault->kdata->fsr) ? "prefetch" : "data",
fault->task->tid, fault->address);
print_fault_params(fault);
}

View File

@@ -19,13 +19,6 @@
#include <shm.h>
#include <file.h>
#define DEBUG_FAULT_HANDLING
#ifdef DEBUG_FAULT_HANDLING
#define dprint(...) printf(__VA_ARGS__)
#else
#define dprint(...)
#endif
unsigned long fault_to_file_offset(struct fault_data *fault)
{
/* Fault's offset in its vma */
@@ -284,8 +277,6 @@ int copy_on_write(struct fault_data *fault)
__TASKNAME__, __FUNCTION__);
BUG();
}
printf("Top object:\n");
vm_object_print(vmo_link->obj);
/* Is the object read-only? Create a shadow object if so.
*
@@ -298,7 +289,7 @@ int copy_on_write(struct fault_data *fault)
if (!(vmo_link->obj->flags & VM_WRITE)) {
if (!(shadow_link = vma_create_shadow()))
return -ENOMEM;
printf("%s: Created a shadow.\n", __TASKNAME__);
dprintf("%s: Created a shadow.\n", __TASKNAME__);
/* Initialise the shadow */
shadow = shadow_link->obj;
shadow->refcnt = 1;
@@ -322,7 +313,7 @@ int copy_on_write(struct fault_data *fault)
/* Shadow is the copier object */
copier_link = shadow_link;
} else {
printf("No shadows. Going to add to topmost r/w shadow object\n");
dprintf("No shadows. Going to add to topmost r/w shadow object\n");
/* No new shadows, the topmost r/w vmo is the copier object */
copier_link = vmo_link;
@@ -336,7 +327,7 @@ int copy_on_write(struct fault_data *fault)
}
/* Traverse the list of read-only vm objects and search for the page */
while (!(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
while (IS_ERR(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
file_offset))) {
if (!(vmo_link = vma_next_link(&vmo_link->list,
&vma->vm_obj_list))) {
@@ -372,8 +363,8 @@ int copy_on_write(struct fault_data *fault)
(void *)page_align(fault->address), 1,
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
fault->task->tid);
printf("%s: Mapped 0x%x as writable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
dprintf("%s: Mapped 0x%x as writable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(new_page->owner);
/*
@@ -419,22 +410,31 @@ int __do_page_fault(struct fault_data *fault)
struct vm_area *vma = fault->vma;
unsigned long file_offset;
struct vm_obj_link *vmo_link;
struct vm_object *vmo;
struct page *page;
/* Handle read */
if ((reason & VM_READ) && (pte_flags & VM_NONE)) {
file_offset = fault_to_file_offset(fault);
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
&vma->vm_obj_list)));
vmo = vmo_link->obj;
/* Get the page from its pager */
if (IS_ERR(page = vmo->pager->ops.page_in(vmo, file_offset))) {
printf("%s: Could not obtain faulty page.\n",
__TASKNAME__);
/* Get the first object, either original file or a shadow */
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
printf("%s:%s: No vm object in vma!\n",
__TASKNAME__, __FUNCTION__);
BUG();
}
/* Traverse the list of read-only vm objects and search for the page */
while (IS_ERR(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
file_offset))) {
if (!(vmo_link = vma_next_link(&vmo_link->list,
&vma->vm_obj_list))) {
printf("%s:%s: Traversed all shadows and the original "
"file's vm_object, but could not find the "
"faulty page in this vma.\n",__TASKNAME__,
__FUNCTION__);
BUG();
}
}
BUG_ON(!page);
/* Map it to faulty task */
@@ -442,17 +442,17 @@ int __do_page_fault(struct fault_data *fault)
(void *)page_align(fault->address), 1,
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
fault->task->tid);
printf("%s: Mapped 0x%x as readable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo);
dprintf("%s: Mapped 0x%x as readable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo_link->obj);
}
/* Handle write */
if ((reason & VM_WRITE) && (pte_flags & VM_READ)) {
/* Copy-on-write */
if (vma_flags & VMA_PRIVATE) {
if (vma_flags & VMA_PRIVATE)
copy_on_write(fault);
}
/* Regular files */
if ((vma_flags & VMA_SHARED) && !(vma_flags & VMA_ANONYMOUS)) {
/* No regular files are mapped yet */
@@ -460,10 +460,10 @@ int __do_page_fault(struct fault_data *fault)
file_offset = fault_to_file_offset(fault);
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
&vma->vm_obj_list)));
vmo = vmo_link->obj;
/* Get the page from its pager */
if (IS_ERR(page = vmo->pager->ops.page_in(vmo, file_offset))) {
if (IS_ERR(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
file_offset))) {
printf("%s: Could not obtain faulty page.\n",
__TASKNAME__);
BUG();
@@ -475,11 +475,16 @@ int __do_page_fault(struct fault_data *fault)
(void *)page_align(fault->address), 1,
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
fault->task->tid);
printf("%s: Mapped 0x%x as writable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo);
dprintf("%s: Mapped 0x%x as writable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo_link->obj);
}
/* FIXME: Just do fs files for now, anon shm objects later. */
/* Things to think about:
* - Is utcb a shm memory really? Then each task must map it in via
* shmget(). FS0 must map all user tasks' utcb via shmget() as well.
* For example to pass on pathnames etc.
*/
BUG_ON((vma_flags & VMA_SHARED) && (vma_flags & VMA_ANONYMOUS));
}

View File

@@ -9,13 +9,23 @@
#include <kmalloc/kmalloc.h>
// #define DEBUG_FAULT_HANDLING
#ifdef DEBUG_FAULT_HANDLING
#define dprintf(...) printf(__VA_ARGS__)
#else
#define dprintf(...)
#endif
#if defined(DEBUG_FAULT_HANDLING)
void print_cache_pages(struct vm_object *vmo)
{
struct page *p;
printf("Pages:\n======\n");
if (!list_empty(&vmo->page_cache))
printf("Pages:\n======\n");
list_for_each_entry(p, &vmo->page_cache, list) {
printf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
dprintf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
p->virtual, p->refcnt);
}
}
@@ -38,6 +48,10 @@ void vm_object_print(struct vm_object *vmo)
print_cache_pages(vmo);
printf("\n");
}
#else
void print_cache_pages(struct vm_object *vmo) { }
void vm_object_print(struct vm_object *vmo) { }
#endif
/* Global list of in-memory vm objects. */
LIST_HEAD(vm_object_list);