Mixed changes

- Adding prefaulting of fs0 to avoid page fault deadlocks.
- Fixed a bug that a vmo page_cache equivalence would simply drop a link to
  an original vmo, even if the vmo could have more pages outside the page cache,
  or if the vmo was not a shadow vmo.
- Fixed a bug with page allocator where recursion would corrupt global variables.
- Now going to fix or re-write a simpler page allocator that works.
This commit is contained in:
Bahadir Balban
2008-09-06 11:15:41 +03:00
parent f6deedff87
commit 6c1da12fec
7 changed files with 164 additions and 25 deletions

View File

@@ -96,24 +96,27 @@ struct vm_object *vma_drop_link(struct vm_obj_link *shadower_link,
/*
* Checks if page cache pages of lesser is a subset of those of copier.
*
* FIXME:
* Note this just checks the page cache, so if any objects have pages
* swapped to disk, this function does not rule.
* swapped to disk, this function won't work, which is a logic error.
* This should really count the swapped ones as well.
*/
int vm_object_is_subset(struct vm_object *copier,
struct vm_object *lesser)
int vm_object_is_subset(struct vm_object *shadow,
struct vm_object *original)
{
struct page *pc, *pl;
/* Copier must have equal or more pages to overlap lesser */
if (copier->npages < lesser->npages)
if (shadow->npages < original->npages)
return 0;
/*
* Do a page by page comparison. Every lesser page
* must be in copier for overlap.
*/
list_for_each_entry(pl, &lesser->page_cache, list)
if (!(pc = find_page(copier, pl->offset)))
list_for_each_entry(pl, &original->page_cache, list)
if (!(pc = find_page(shadow, pl->offset)))
return 0;
/*
* For all pages of lesser vmo, there seems to be a page
@@ -122,6 +125,16 @@ int vm_object_is_subset(struct vm_object *copier,
return 1;
}
static inline int vm_object_is_droppable(struct vm_object *shadow,
struct vm_object *original)
{
if (vm_object_is_subset(shadow, original)
&& (original->flags & VM_OBJ_SHADOW))
return 1;
else
return 0;
}
/*
* When one shadow object is redundant, merges it into the shadow in front of it.
* Note it must be determined that it is redundant before calling this function.
@@ -231,8 +244,7 @@ struct page *copy_to_new_page(struct page *orig)
void *new_vaddr, *vaddr, *paddr;
struct page *new;
if (!(paddr = alloc_page(1)))
return 0;
BUG_ON(!(paddr = alloc_page(1)));
new = phys_to_page(paddr);
@@ -258,8 +270,8 @@ struct page *copy_to_new_page(struct page *orig)
int vma_drop_merge_delete(struct vm_obj_link *shadow_link,
struct vm_obj_link *orig_link)
{
/* Can we can drop one link? */
if (vm_object_is_subset(shadow_link->obj, orig_link->obj)) {
/* Can we drop one link? */
if (vm_object_is_droppable(shadow_link->obj, orig_link->obj)) {
struct vm_object *dropped;
dprintf("VM OBJECT is a subset of its shadow.\nShadow:\n");

View File

@@ -258,6 +258,28 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
return 0;
}
/*
* Prefaults all mapped regions of a task. The reason we have this is
* some servers are in the page fault handling path (e.g. fs0), and we
* don't want them to fault and cause deadlocks and circular deps.
*
* Normally fs0 faults dont cause dependencies because its faults
* are handled by the boot pager, which is part of mm0. BUT: It may
* cause deadlocks because fs0 may fault while serving a request
* from mm0.(Which is expected to also handle the fault).
*/
int task_prefault_regions(struct tcb *task, struct vm_file *f)
{
struct vm_area *vma;
list_for_each_entry(vma, &task->vm_area_list, list) {
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
VM_READ | VM_WRITE) < 0);
}
return 0;
}
/*
* Main entry point for the creation, initialisation and
* execution of a new task.
@@ -278,6 +300,10 @@ int task_exec(struct vm_file *f, unsigned long task_region_start,
if ((err = task_mmap_regions(task, f)) < 0)
return err;
if (ids->tid == VFS_TID)
if ((err = task_prefault_regions(task, f)) < 0)
return err;
if ((err = task_setup_registers(task, 0, 0, 0)) < 0)
return err;