Added a few vm_object debug functions. Fixed few bugs.

Next issues: For every read fault, the fault must traverse the
vma's object stack until the page is found. The problem was that
we were only searching the first object, that object was a writable
shadow, and the shadow didn't have the read-only page, and the 0
return value was interpreted with IS_ERR() and failed, so address
0 was mapped into the location, and QEMU blew off.
This commit is contained in:
Bahadir Balban
2008-03-16 18:57:26 +00:00
parent 0f4a4ae5b4
commit 509e949983
7 changed files with 94 additions and 21 deletions

View File

@@ -25,7 +25,7 @@
#define SECTION_MASK ARM_SECTION_MASK
#define SECTION_BITS ARM_SECTION_BITS
/* Aligns to the upper page (ceiling) */
/* Aligns to the upper page (ceiling) FIXME: Must add a wraparound checker. */
#define page_align_up(addr) ((((unsigned int)(addr)) + \
(PAGE_SIZE - 1)) & \
(~PAGE_MASK))

View File

@@ -17,7 +17,7 @@
#include INC_SUBARCH(mm.h)
/* Abort debugging conditions */
#define DEBUG_ABORTS
//#define DEBUG_ABORTS
#if defined (DEBUG_ABORTS)
#define dbg_abort(...) dprintk(__VA_ARGS__)
#else

View File

@@ -196,6 +196,7 @@ struct vm_object *vm_object_alloc_init(void);
struct vm_object *vm_object_create(void);
struct vm_file *vm_file_create(void);
int vm_object_delete(struct vm_object *vmo);
void vm_object_print(struct vm_object *vmo);
/* Main page fault entry point */
void page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);

View File

@@ -19,6 +19,13 @@
#include <shm.h>
#include <file.h>
#define DEBUG_FAULT_HANDLING
#ifdef DEBUG_FAULT_HANDLING
#define dprint(...) printf(__VA_ARGS__)
#else
#define dprint(...)
#endif
unsigned long fault_to_file_offset(struct fault_data *fault)
{
/* Fault's offset in its vma */
@@ -61,7 +68,22 @@ int vma_drop_link(struct vm_obj_link *shadower_link,
list_del(&orig_link->list);
/* Reduce object's ref count */
BUG_ON(--orig_link->obj->refcnt <= 0);
orig_link->obj->refcnt--;
/*
* Refcount could go as low as 1 but not zero because shortly
* after it goes down to one, it is removed from the link
* chain so it can never exist with a refcount less than 1
* in the chain.
*/
if (orig_link->obj->refcnt < 1) {
printf("%s: Shadower:\n", __FUNCTION__);
vm_object_print(shadower_link->obj);
printf("%s: Original:\n", __FUNCTION__);
vm_object_print(orig_link->obj);
BUG();
}
/*
* Remove the shadower from original's shadower list.
@@ -81,14 +103,14 @@ int vma_drop_link(struct vm_obj_link *shadower_link,
* Note this just checks the page cache, so if any objects have pages
* swapped to disk, this function does not rule.
*/
int vm_object_check_subset(struct vm_object *copier,
struct vm_object *lesser)
int vm_object_is_subset(struct vm_object *copier,
struct vm_object *lesser)
{
struct page *pc, *pl;
/* Copier must have equal or more pages to overlap lesser */
if (copier->npages < lesser->npages)
return 1;
return 0;
/*
* Do a page by page comparison. Every lesser page
@@ -96,12 +118,12 @@ int vm_object_check_subset(struct vm_object *copier,
*/
list_for_each_entry(pl, &lesser->page_cache, list)
if (!(pc = find_page(copier, pl->offset)))
return 1;
return 0;
/*
* For all pages of lesser vmo, there seems to be a page
* in the copier vmo. So lesser is a subset of copier
*/
return 0;
return 1;
}
/* Merges link 1 to link 2 */
@@ -262,6 +284,8 @@ int copy_on_write(struct fault_data *fault)
__TASKNAME__, __FUNCTION__);
BUG();
}
printf("Top object:\n");
vm_object_print(vmo_link->obj);
/* Is the object read-only? Create a shadow object if so.
*
@@ -292,9 +316,13 @@ int copy_on_write(struct fault_data *fault)
*/
list_add(&shadow_link->list, &vma->vm_obj_list);
/* Add to global object list */
list_add(&shadow->list, &vm_object_list);
/* Shadow is the copier object */
copier_link = shadow_link;
} else {
printf("No shadows. Going to add to topmost r/w shadow object\n");
/* No new shadows, the topmost r/w vmo is the copier object */
copier_link = vmo_link;
@@ -332,11 +360,12 @@ int copy_on_write(struct fault_data *fault)
new_page->owner = copier_link->obj;
new_page->offset = file_offset;
new_page->virtual = 0;
BUG_ON(!list_empty(&new_page->list));
spin_unlock(&page->lock);
/* Add the page to owner's list of in-memory pages */
BUG_ON(!list_empty(&new_page->list));
insert_page_olist(new_page, new_page->owner);
spin_unlock(&page->lock);
new_page->owner->npages++;
/* Map the new page to faulty task */
l4_map((void *)page_to_phys(new_page),
@@ -345,6 +374,7 @@ int copy_on_write(struct fault_data *fault)
fault->task->tid);
printf("%s: Mapped 0x%x as writable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(new_page->owner);
/*
* Finished handling the actual fault, now check for possible
@@ -358,7 +388,7 @@ int copy_on_write(struct fault_data *fault)
}
/* Compare whether page caches overlap */
if (vm_object_check_subset(copier_link->obj, vmo_link->obj)) {
if (vm_object_is_subset(copier_link->obj, vmo_link->obj)) {
/*
* They do overlap, so keep reference to object but
* drop and delete the vma link.
@@ -405,6 +435,7 @@ int __do_page_fault(struct fault_data *fault)
__TASKNAME__);
BUG();
}
BUG_ON(!page);
/* Map it to faulty task */
l4_map((void *)page_to_phys(page),
@@ -413,6 +444,7 @@ int __do_page_fault(struct fault_data *fault)
fault->task->tid);
printf("%s: Mapped 0x%x as readable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo);
}
/* Handle write */
@@ -423,6 +455,8 @@ int __do_page_fault(struct fault_data *fault)
}
/* Regular files */
if ((vma_flags & VMA_SHARED) && !(vma_flags & VMA_ANONYMOUS)) {
/* No regular files are mapped yet */
BUG();
file_offset = fault_to_file_offset(fault);
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
&vma->vm_obj_list)));
@@ -434,6 +468,7 @@ int __do_page_fault(struct fault_data *fault)
__TASKNAME__);
BUG();
}
BUG_ON(!page);
/* Map it to faulty task */
l4_map((void *)page_to_phys(page),
@@ -442,6 +477,7 @@ int __do_page_fault(struct fault_data *fault)
fault->task->tid);
printf("%s: Mapped 0x%x as writable to tid %d.\n", __TASKNAME__,
page_align(fault->address), fault->task->tid);
vm_object_print(vmo);
}
/* FIXME: Just do fs files for now, anon shm objects later. */
BUG_ON((vma_flags & VMA_SHARED) && (vma_flags & VMA_ANONYMOUS));

View File

@@ -12,7 +12,7 @@
#include <file.h>
#include <init.h>
#include INC_ARCH(bootdesc.h)
#include <l4/api/errno.h>
struct page *page_init(struct page *page)
{
@@ -143,8 +143,13 @@ struct vm_swap_node {
*/
struct page *swap_page_in(struct vm_object *vm_obj, unsigned long file_offset)
{
struct page *p;
/* No swapping yet, so the page is either here or not here. */
return find_page(vm_obj, file_offset);
if (!(p = find_page(vm_obj, file_offset)))
return PTR_ERR(-EINVAL);
else
return p;
}
struct vm_pager swap_pager = {
@@ -278,7 +283,7 @@ struct vm_file *get_devzero(void)
struct vm_file *f;
list_for_each_entry(f, &vm_file_list, list)
if ((f->type & VM_FILE_DEVZERO) == VM_FILE_DEVZERO)
if (f->type == VM_FILE_DEVZERO)
return f;
return 0;
}
@@ -299,11 +304,12 @@ int init_devzero(void)
/* Allocate and initialise devzero file */
devzero = vm_file_create();
devzero->vm_obj.npages = ~0;
devzero->vm_obj.pager = &devzero_pager;
devzero->vm_obj.flags = VM_OBJ_FILE;
devzero->type = VM_FILE_DEVZERO;
devzero->priv_data = zpage;
devzero->length = page_align(~0UL); /* So we dont wraparound to 0! */
devzero->vm_obj.npages = __pfn(devzero->length);
devzero->vm_obj.pager = &devzero_pager;
devzero->vm_obj.flags = VM_OBJ_FILE;
list_add(&devzero->vm_obj.list, &vm_object_list);
list_add(&devzero->list, &vm_file_list);

View File

@@ -266,7 +266,7 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
/* mmap each task's environment as anonymous memory. */
if ((err = do_mmap(0, 0, task, task->env_start,
VM_READ | VM_WRITE | VMA_PRIVATE | VMA_ANONYMOUS,
__pfn(task->env_end - task->env_start)) < 0)) {
__pfn(task->env_end - task->env_start))) < 0) {
printf("do_mmap: Mapping environment failed with %d.\n",
err);
goto error;
@@ -275,7 +275,7 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
/* mmap each task's stack as anonymous memory. */
if ((err = do_mmap(0, 0, task, task->stack_start,
VM_READ | VM_WRITE | VMA_PRIVATE | VMA_ANONYMOUS,
__pfn(task->stack_end - task->stack_start)) < 0)) {
__pfn(task->stack_end - task->stack_start))) < 0) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
@@ -285,7 +285,7 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
task->utcb_address);
if ((err = do_mmap(0, 0, task, task->utcb_address,
VM_READ | VM_WRITE | VMA_SHARED | VMA_ANONYMOUS,
__pfn(DEFAULT_UTCB_SIZE)) < 0)) {
__pfn(DEFAULT_UTCB_SIZE))) < 0) {
printf("do_mmap: Mapping utcb failed with %d.\n", err);
goto error;
}
@@ -296,7 +296,7 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
/* Start the thread */
printf("Starting task with id %d\n", task->tid);
if ((err = l4_thread_control(THREAD_RUN, ids) < 0)) {
if ((err = l4_thread_control(THREAD_RUN, ids)) < 0) {
printf("l4_thread_control failed with %d\n", err);
goto error;
}

View File

@@ -9,6 +9,36 @@
#include <kmalloc/kmalloc.h>
void print_cache_pages(struct vm_object *vmo)
{
struct page *p;
printf("Pages:\n======\n");
list_for_each_entry(p, &vmo->page_cache, list) {
printf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
p->virtual, p->refcnt);
}
}
void vm_object_print(struct vm_object *vmo)
{
struct vm_file *f;
printf("Object type: %s %s. Refs: %d. Pages in cache: %d.\n",
vmo->flags & VM_WRITE ? "writeable" : "read-only",
vmo->flags & VM_OBJ_FILE ? "file" : "shadow", vmo->refcnt,
vmo->npages);
if (vmo->flags & VM_OBJ_FILE) {
f = vm_object_to_file(vmo);
printf("File type: %s\n",
(f->type == VM_FILE_DEVZERO) ? "devzero" :
((f->type == VM_FILE_BOOTFILE) ? "bootfile" : "regular"));
// printf("File type: 0x%x\n", f->type);
}
print_cache_pages(vmo);
printf("\n");
}
/* Global list of in-memory vm objects. */
LIST_HEAD(vm_object_list);