More progress on VMA merge/shadow collapse.

Done the merging, but deleting pages from different pagers is the
current issue.
This commit is contained in:
Bahadir Balban
2008-03-10 18:38:08 +00:00
parent 584815a4db
commit 47a6a31249
5 changed files with 133 additions and 108 deletions

View File

@@ -111,7 +111,6 @@ struct vm_object {
int npages; /* Number of pages in memory */
int refcnt; /* Number of shadows (or vmas) that refer */
struct list_head shadowers; /* List of vm objects that shadow this one */
struct list_head shref; /* Shadow referees use this to track us */
struct vm_object *orig_obj; /* Original object that this one shadows */
unsigned int flags; /* Defines the type and flags of the object */
struct list_head list; /* List of all vm objects in memory */
@@ -132,6 +131,7 @@ struct vm_file {
/* To create per-vma vm_object lists */
struct vma_obj_link {
struct list_head list;
struct list_head shref; /* Ref to shadowers by original objects */
struct vm_object *obj;
};

View File

@@ -18,12 +18,6 @@
#include <shm.h>
#include <file.h>
/* FIXME: FIXME: FIXME: FIXME: FIXME: FIXME: FIXME: FIXME: TODO:
* For every page that is allocated, (read-only file pages) and anon pages
* etc. Page cache for that page's file must be visited first, before
* allocation.
*/
unsigned long fault_to_file_offset(struct fault_data *fault)
{
/* Fault's offset in its vma */
@@ -122,6 +116,93 @@ int vma_drop_link(struct vm_obj_link *shadower,
list_del(&shadower->shref);
}
/*
* Checks if page cache pages of lesser is a subset of those of copier.
* Note this just checks the page cache, so if any objects have pages
* swapped to disk, this function does not rule.
*/
int vm_object_check_subset(struct vm_object *copier,
struct vm_object *lesser)
{
struct page *pc, *pl;
/* Copier must have equal or more pages to overlap lesser */
if (copier->npages < lesser->npages)
return 1;
/*
* Do a page by page comparison. Every lesser page
* must be in copier for overlap.
*/
list_for_each_entry(pl, &lesser->page_cache, list)
if (!(pc = find_page(copier, pl->offset)))
return 1;
/*
* For all pages of lesser vmo, there seems to be a page
* in the copier vmo. So lesser is a subset of copier
*/
return 0;
}
/* Merges link 1 to link 2 */
int do_vma_merge_link(struct vm_obj_link *link1, struct vm_obj_link *link2)
{
struct vm_object *obj1 = link1->obj;
struct vm_object *obj2 = link2->obj;
struct page *p1, *p2;
/*
* Move all non-intersecting pages to link2. Free all
* intersecting pages.
*/
list_for_each_entry(p1, &obj1->page_cache, list) {
/* Page doesn't exist, move it to shadower */
if (!(p2 = find_page(obj2, p1->offset))) {
list_del(&p1->list);
spin_lock(&page->lock);
p1->owner = obj2;
spin_unlock(&page->lock);
insert_page_olist(p1, obj2);
obj2->npages++;
}
}
/* Delete the object along with all its pages. */
vm_object_delete(obj1);
}
/*
* Merges a vma's shadow object with its shadower. Note this
* must be called when merge is decided.
*/
int vma_merge_link(struct vm_obj_link *vmo_link)
{
struct vm_obj_link *sh_link;
/* Check refcount */
BUG_ON(vmo_link->obj->refcnt != 1);
/* Get the last shadower entry */
sh_link = list_entry(&vmo_link->obj->shadowers.next,
struct vm_obj_link, shref);
/* Remove it */
list_del(&sh_link->shref);
/* Check that there really was one shadower left */
BUG_ON(!list_empty(&vmo_link->obj->shadowers));
/* Do the actual merge */
do_vma_merge_link(vmo_link, sh_link);
}
/*
* To merge, the object should use the last shadow
* reference left, but this reference must be changed
* to point at the vm_obj_link rather than the object
* itself, because it's not possible to find the link
* from the object.
*/
struct vm_obj_link *vma_create_shadow(void)
{
@@ -166,34 +247,6 @@ struct page *copy_page(struct page *orig)
return new;
}
/*
* Checks if page cache pages of lesser is a subset of those of copier.
* Note this just checks the page cache, so if any objects have pages
* swapped to disk, this function does not rule.
*/
int vm_object_check_subset(struct vm_object *copier,
struct vm_object *lesser)
{
struct page *pc, *pl;
/* Copier must have equal or more pages to overlap lesser */
if (copier->npages < lesser->npages)
return 1;
/*
* Do a page by page comparison. Every lesser page
* must be in copier for overlap.
*/
list_for_each_entry(pl, &lesser->page_cache, list)
if (!(pc = find_page(copier, pl->offset)))
return 1;
/*
* For all pages of lesser vmo, there seems to be a page
* in the copier vmo. So lesser is a subset of copier
*/
return 0;
}
/* TODO:
* - Why not allocate a swap descriptor in vma_create_shadow() rather than
* a bare vm_object? It will be needed.
@@ -313,16 +366,9 @@ int copy_on_write(struct fault_data *fault)
/* vm object reference down to one? */
if (vmo_link->obj->refcnt == 1) {
BUG(); /* Check that the mergers are both shadows!!! */
/* TODO: Fill this in! Merge with the only reference */
vm_object_merge(vmo_link);
/*
* To merge, the object should use the last shadow
* reference left, but this reference must be changed
* to point at the vm_obj_link rather than the object
* itself, because it's not possible to find the link
* from the object, and only the link matters.
*/
}
}
}

View File

@@ -116,26 +116,26 @@ int vfs_receive_sys_open(l4id_t sender, l4id_t opener, int fd,
* Inserts the page to vmfile's list in order of page frame offset.
* We use an ordered list instead of a radix or btree for now.
*/
int insert_page_olist(struct page *this, struct vm_file *f)
int insert_page_olist(struct page *this, struct vm_object *vmo)
{
struct page *before, *after;
/* Add if list is empty */
if (list_empty(&f->page_cache_list)) {
list_add_tail(&this->list, &f->page_cache_list);
if (list_empty(&vmo->page_cache)) {
list_add_tail(&this->list, &vmo->page_cache);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, &f->page_cache_list, list) {
list_for_each_entry(before, &vmo->page_cache, list) {
after = list_entry(before->list.next, struct page, list);
/* If there's only one in list */
if (before->list.next == &f->page_cache_list) {
if (before->list.next == &vmo->page_cache) {
/* Add to end if greater */
if (this->f_offset > before->f_offset)
if (this->offset > before->offset)
list_add_tail(&this->list, &before->list);
/* Add to beginning if smaller */
else if (this->f_offset < before->f_offset)
else if (this->offset < before->offset)
list_add(&this->list, &before->list);
else
BUG();
@@ -143,13 +143,13 @@ int insert_page_olist(struct page *this, struct vm_file *f)
}
/* If this page is in-between two other, insert it there */
if (before->f_offset < this->f_offset &&
after->f_offset > this->f_offset) {
if (before->offset < this->offset &&
after->offset > this->offset) {
list_add_tail(&this->list, &before->list);
return 0;
}
BUG_ON(this->f_offset == before->f_offset);
BUG_ON(this->f_offset == after->f_offset);
BUG_ON(this->offset == before->offset);
BUG_ON(this->offset == after->offset);
}
BUG();
}

View File

@@ -23,56 +23,6 @@ struct page *find_page(struct vm_object *obj, unsigned long pfn)
return 0;
}
struct page *copy_on_write_page_in(struct vm_object *vm_obj, unsigned long page_offset)
{
struct vm_object *orig = vma_get_next_object(vm_obj);
struct page *page;
void *vaddr, *paddr;
int err;
vm_object_to_file(vm_obj);
/* The page is not resident in page cache. */
if (!(page = find_page(vm_obj, page_offset))) {
/* Allocate a new page */
paddr = alloc_page(1);
vaddr = phys_to_virt(paddr);
page = phys_to_page(paddr);
/* Map the page to vfs task */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, VFS_TID);
/* Syscall to vfs to read into the page. */
if ((err = vfs_read(f->vnum, page_offset, 1, vaddr)) < 0)
goto out_err;
/* Unmap it from vfs */
l4_unmap(vaddr, 1, VFS_TID);
/* Update vm object details */
vm_obj->npages++;
/* Update page details */
spin_lock(&page->lock);
page->count++;
page->owner = vm_obj;
page->offset = page_offset;
page->virtual = 0;
/* Add the page to owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
insert_page_olist(page, vm_obj);
spin_unlock(&page->lock);
}
return page;
out_err:
l4_unmap(vaddr, 1, VFS_TID);
free_page(paddr);
return PTR_ERR(err);
}
struct page *file_page_in(struct vm_object *vm_obj, unsigned long page_offset)
{
struct vm_file *f = vm_object_to_file(vm_obj);
@@ -181,20 +131,22 @@ struct vm_pager swap_pager = {
},
};
/* Returns the page with given offset in this vm_object */
struct page *bootfile_page_in(struct vm_object *vm_obj,
unsigned long pfn_offset)
unsigned long offset)
{
struct vm_file *boot_file = vm_object_to_file(vm_obj);
struct svc_image *img = boot_file->priv_data;
struct page *page = phys_to_page(img->phys_start +
__pfn_to_addr(pfn_offset));
__pfn_to_addr(offset));
spin_lock(&page->lock);
page->count++;
spin_unlock(&page->lock);
/* FIXME: Why not add pages to linked list and update npages? */
vm_obj->npages++;
return page;
}

View File

@@ -1,5 +1,5 @@
/*
* VM Objects.
* vm objects.
*
* Copyright (C) 2008 Bahadir Balban
*/
@@ -48,3 +48,30 @@ struct vm_file *vm_file_alloc_init(void)
return f;
}
/* Deletes the object via its base, along with all its pages */
int vm_object_delete(struct vm_object *vmo)
{
struct vm_file *f;
/* Release all pages */
vmo->pager.ops->release_pages(vmo);
/* Remove from global list */
list_del(&vmo->list);
/* Check any references */
BUG_ON(vmo->refcnt);
BUG_ON(!list_empty(&vmo->shadowers));
BUG_ON(!list_emtpy(&vmo->page_cache));
/* Obtain and free via the base object */
if (vmo->flags & VM_OBJ_FILE) {
f = vm_object_to_file(vmo);
kfree(f);
} else if (vmo->flags & VM_OBJ_SHADOW)
kfree(obj);
else BUG();
return 0;
}