mirror of
https://github.com/drasko/codezero.git
synced 2026-01-25 01:03:16 +01:00
Fixed most compiler errors. Need to do more.
This commit is contained in:
@@ -13,6 +13,7 @@
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include <arch/mm.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include <string.h>
|
||||
#include <memory.h>
|
||||
#include <shm.h>
|
||||
@@ -29,6 +30,404 @@ unsigned long fault_to_file_offset(struct fault_data *fault)
|
||||
return f_off_pfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a reference to a vm_object link, returns the next link.
|
||||
* If back to given head, returns 0.
|
||||
*
|
||||
* vma->link1->link2->link3
|
||||
* | | |
|
||||
* V V V
|
||||
* vmo1 vmo2 vmo3|vm_file
|
||||
*
|
||||
* Example:
|
||||
* Given a reference to link = vma, head = vma, returns link1.
|
||||
* Given a reference to link = link3, head = vma, returns 0.
|
||||
*/
|
||||
struct vm_obj_link *vma_next_link(struct list_head *link,
|
||||
struct list_head *head)
|
||||
{
|
||||
BUG_ON(list_empty(link));
|
||||
if (link == head)
|
||||
return 0;
|
||||
else
|
||||
return list_entry(link->next, struct vm_obj_link, list);
|
||||
}
|
||||
|
||||
/* Unlinks obj_link from its vma and deletes it but keeps the object. */
|
||||
int vma_drop_link(struct vm_obj_link *shadower_link,
|
||||
struct vm_obj_link *orig_link)
|
||||
{
|
||||
/* Remove object link from vma's list */
|
||||
list_del(&orig_link->list);
|
||||
|
||||
/* Reduce object's ref count */
|
||||
BUG_ON(--orig_link->obj->refcnt <= 0);
|
||||
|
||||
/*
|
||||
* Remove the shadower from original's shadower list.
|
||||
* We know shadower is deleted from original's list
|
||||
* because each shadow can shadow a single object.
|
||||
*/
|
||||
list_del(&shadower_link->shref);
|
||||
|
||||
/* Delete the original link */
|
||||
kfree(orig_link);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks if page cache pages of lesser is a subset of those of copier.
|
||||
* Note this just checks the page cache, so if any objects have pages
|
||||
* swapped to disk, this function does not rule.
|
||||
*/
|
||||
int vm_object_check_subset(struct vm_object *copier,
|
||||
struct vm_object *lesser)
|
||||
{
|
||||
struct page *pc, *pl;
|
||||
|
||||
/* Copier must have equal or more pages to overlap lesser */
|
||||
if (copier->npages < lesser->npages)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Do a page by page comparison. Every lesser page
|
||||
* must be in copier for overlap.
|
||||
*/
|
||||
list_for_each_entry(pl, &lesser->page_cache, list)
|
||||
if (!(pc = find_page(copier, pl->offset)))
|
||||
return 1;
|
||||
/*
|
||||
* For all pages of lesser vmo, there seems to be a page
|
||||
* in the copier vmo. So lesser is a subset of copier
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Merges link 1 to link 2 */
|
||||
int vma_do_merge_link(struct vm_obj_link *link1, struct vm_obj_link *link2)
|
||||
{
|
||||
struct vm_object *obj1 = link1->obj;
|
||||
struct vm_object *obj2 = link2->obj;
|
||||
struct page *p1, *p2;
|
||||
|
||||
/* Move all non-intersecting pages to link2. */
|
||||
list_for_each_entry(p1, &obj1->page_cache, list) {
|
||||
/* Page doesn't exist, move it to shadower */
|
||||
if (!(p2 = find_page(obj2, p1->offset))) {
|
||||
list_del(&p1->list);
|
||||
spin_lock(&p1->lock);
|
||||
p1->owner = obj2;
|
||||
spin_unlock(&p1->lock);
|
||||
insert_page_olist(p1, obj2);
|
||||
obj2->npages++;
|
||||
}
|
||||
}
|
||||
/* Delete the object along with all its pages. */
|
||||
vm_object_delete(obj1);
|
||||
|
||||
/* Delete the last link for the object */
|
||||
kfree(link1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finds the only shadower of a vm object, finds the link to
|
||||
* the object that is in the same vma as the shadower, and
|
||||
* merges the two in the shadower object, frees the link and
|
||||
* the original object. Note this must be called when merge
|
||||
* is decided.
|
||||
*/
|
||||
int vma_merge_link(struct vm_object *vmo)
|
||||
{
|
||||
struct vm_obj_link *sh_link, *vmo_link;
|
||||
|
||||
/* Check refcount */
|
||||
BUG_ON(vmo->refcnt != 1);
|
||||
|
||||
/* Get the last shadower entry */
|
||||
sh_link = list_entry(vmo->shadowers.next,
|
||||
struct vm_obj_link, shref);
|
||||
|
||||
/* Remove it from original's shadow list */
|
||||
list_del(&sh_link->shref);
|
||||
|
||||
/* Check that there really was one shadower left */
|
||||
BUG_ON(!list_empty(&vmo_link->obj->shadowers));
|
||||
|
||||
/*
|
||||
* Get the link to vmo that is in the same list as
|
||||
* the only shadower
|
||||
*/
|
||||
vmo_link = list_entry(sh_link->list.next,
|
||||
struct vm_obj_link, list);
|
||||
|
||||
/*
|
||||
* Check that we got the right link. Since it is
|
||||
* an ordered list, the link must be the following
|
||||
* entry after its shadower.
|
||||
*/
|
||||
BUG_ON(vmo_link->obj != vmo);
|
||||
|
||||
/*
|
||||
* Now that we got the consecutive links in the
|
||||
* same vma, do the actual merge.
|
||||
*/
|
||||
vma_do_merge_link(vmo_link, sh_link);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates a bare vm_object along with its vma link, since
|
||||
* the shadow will be immediately used in a vma object list.
|
||||
*/
|
||||
struct vm_obj_link *vma_create_shadow(void)
|
||||
{
|
||||
struct vm_object *vmo;
|
||||
struct vm_obj_link *vmo_link;
|
||||
|
||||
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
|
||||
return 0;
|
||||
|
||||
if (!(vmo = vm_object_create())) {
|
||||
kfree(vmo_link);
|
||||
return 0;
|
||||
}
|
||||
INIT_LIST_HEAD(&vmo_link->list);
|
||||
vmo->flags = VM_OBJ_SHADOW;
|
||||
vmo_link->obj = vmo;
|
||||
|
||||
return vmo_link;
|
||||
}
|
||||
|
||||
/* Allocates a new page, copies the original onto it and returns. */
|
||||
struct page *copy_page(struct page *orig)
|
||||
{
|
||||
void *new_vaddr, *vaddr, *paddr;
|
||||
struct page *new;
|
||||
|
||||
if (!(paddr = alloc_page(1)))
|
||||
return 0;
|
||||
|
||||
new = phys_to_page(paddr);
|
||||
|
||||
/* Map the new and orig page to self */
|
||||
new_vaddr = l4_map_helper(paddr, 1);
|
||||
vaddr = l4_map_helper((void *)page_to_phys(orig), 1);
|
||||
|
||||
/* Copy the page into new page */
|
||||
memcpy(new_vaddr, vaddr, PAGE_SIZE);
|
||||
|
||||
/* Unmap both pages from current task. */
|
||||
l4_unmap_helper(vaddr, 1);
|
||||
l4_unmap_helper(new_vaddr, 1);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
* - Why not allocate a swap descriptor in vma_create_shadow() rather than
|
||||
* a bare vm_object? It will be needed.
|
||||
* - Does vm_write clash with any other object flags???
|
||||
* - Check refcounting of shadows, their references, page refs,
|
||||
* reduces increases etc.
|
||||
*/
|
||||
int copy_on_write(struct fault_data *fault)
|
||||
{
|
||||
struct vm_obj_link *vmo_link, *shadow_link, *copier_link;
|
||||
struct vm_object *vmo, *shadow;
|
||||
struct page *page, *new_page;
|
||||
struct vm_area *vma = fault->vma;
|
||||
unsigned int reason = fault->reason;
|
||||
unsigned long file_offset = fault_to_file_offset(fault);
|
||||
|
||||
/* Get the first object, either original file or a shadow */
|
||||
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
|
||||
printf("%s:%s: No vm object in vma!\n",
|
||||
__TASKNAME__, __FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Is the object read-only? Create a shadow object if so.
|
||||
*
|
||||
* NOTE: Whenever the topmost object is read-only, a new shadow
|
||||
* object must be created. When there are no shadows one is created
|
||||
* because, its the original vm_object that is not writeable, and
|
||||
* when there are shadows one is created because a fork had just
|
||||
* happened, in which case all shadows are rendered read-only.
|
||||
*/
|
||||
if (!(vmo->flags & VM_WRITE)) {
|
||||
if (!(shadow_link = vma_create_shadow()))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Initialise the shadow */
|
||||
shadow = shadow_link->obj;
|
||||
shadow->refcnt = 1;
|
||||
shadow->orig_obj = vmo_link->obj;
|
||||
shadow->flags = VM_OBJ_SHADOW | VM_WRITE;
|
||||
shadow->pager = &swap_pager;
|
||||
|
||||
/*
|
||||
* Add the shadow in front of the original:
|
||||
*
|
||||
* vma->link0->link1
|
||||
* | |
|
||||
* V V
|
||||
* shadow original
|
||||
*/
|
||||
list_add(&shadow_link->list, &vma->vm_obj_list);
|
||||
|
||||
/* Shadow is the copier object */
|
||||
copier_link = shadow_link;
|
||||
} else {
|
||||
/* No new shadows, the topmost r/w vmo is the copier object */
|
||||
copier_link = vmo_link;
|
||||
|
||||
/*
|
||||
* We start page search on read-only objects. If the first
|
||||
* one was writable, go to next which must be read-only.
|
||||
*/
|
||||
BUG_ON(!(vmo_link = vma_next_link(&vmo_link->list,
|
||||
&vma->vm_obj_list)));
|
||||
BUG_ON(vmo_link->obj->flags & VM_WRITE);
|
||||
}
|
||||
|
||||
/* Traverse the list of read-only vm objects and search for the page */
|
||||
while (!(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
|
||||
file_offset))) {
|
||||
if (!(vmo_link = vma_next_link(&vmo_link->list,
|
||||
&vma->vm_obj_list))) {
|
||||
printf("%s:%s: Traversed all shadows and the original "
|
||||
"file's vm_object, but could not find the "
|
||||
"faulty page in this vma.\n",__TASKNAME__,
|
||||
__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the page. This traverse and copy is like a page-in operation
|
||||
* of a pager, except that the page is moving along vm_objects.
|
||||
*/
|
||||
new_page = copy_page(page);
|
||||
|
||||
/* Update page details */
|
||||
spin_lock(&new_page->lock);
|
||||
new_page->refcnt = 1;
|
||||
new_page->owner = copier_link->obj;
|
||||
new_page->offset = file_offset;
|
||||
new_page->virtual = 0;
|
||||
|
||||
/* Add the page to owner's list of in-memory pages */
|
||||
BUG_ON(!list_empty(&new_page->list));
|
||||
insert_page_olist(new_page, new_page->owner);
|
||||
spin_unlock(&page->lock);
|
||||
|
||||
/* Map the new page to faulty task */
|
||||
l4_map((void *)page_to_phys(new_page),
|
||||
(void *)page_align(fault->address), 1,
|
||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||
fault->task->tid);
|
||||
|
||||
/*
|
||||
* Finished handling the actual fault, now check for possible
|
||||
* shadow collapses. Does the copier completely shadow the one
|
||||
* underlying it?
|
||||
*/
|
||||
if (!(vmo_link = vma_next_link(&copier_link->list, &vma->vm_obj_list))) {
|
||||
/* Copier must have an object under it */
|
||||
printf("Copier must have had an object under it!\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Compare whether page caches overlap */
|
||||
if (vm_object_check_subset(copier_link->obj, vmo_link->obj)) {
|
||||
/*
|
||||
* They do overlap, so keep reference to object but
|
||||
* drop and delete the vma link.
|
||||
*/
|
||||
vmo = vmo_link->obj;
|
||||
vma_drop_link(copier_link, vmo_link);
|
||||
vmo_link = 0;
|
||||
|
||||
/* vm object reference down to one and object is mergeable? */
|
||||
if ((vmo->refcnt == 1) &&
|
||||
(vmo->flags != VM_OBJ_FILE))
|
||||
vma_merge_link(vmo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Handles the page fault, all entries here are assumed *legal* faults,
|
||||
* i.e. do_page_fault() should have already checked for illegal accesses.
|
||||
*/
|
||||
int __do_page_fault(struct fault_data *fault)
|
||||
{
|
||||
unsigned int reason = fault->reason;
|
||||
unsigned int vma_flags = fault->vma->flags;
|
||||
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
||||
struct vm_area *vma = fault->vma;
|
||||
unsigned long file_offset;
|
||||
struct vm_obj_link *vmo_link;
|
||||
struct vm_object *vmo;
|
||||
struct page *page;
|
||||
|
||||
/* Handle read */
|
||||
if ((reason & VM_READ) && (pte_flags & VM_NONE)) {
|
||||
file_offset = fault_to_file_offset(fault);
|
||||
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
|
||||
&vma->vm_obj_list)));
|
||||
vmo = vmo_link->obj;
|
||||
|
||||
/* Get the page from its pager */
|
||||
if (IS_ERR(page = vmo->pager->ops.page_in(vmo, file_offset))) {
|
||||
printf("%s: Could not obtain faulty page.\n",
|
||||
__TASKNAME__);
|
||||
BUG();
|
||||
}
|
||||
/* Map it to faulty task */
|
||||
l4_map((void *)page_to_phys(page),
|
||||
(void *)page_align(fault->address), 1,
|
||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||
fault->task->tid);
|
||||
}
|
||||
|
||||
/* Handle write */
|
||||
if ((reason & VM_WRITE) && (pte_flags & VM_READ)) {
|
||||
/* Copy-on-write */
|
||||
if (vma_flags & VMA_PRIVATE) {
|
||||
copy_on_write(fault);
|
||||
}
|
||||
/* FIXME: Just do fs files for now, anon shm objects later. */
|
||||
if (vma_flags & VMA_SHARED) {
|
||||
file_offset = fault_to_file_offset(fault);
|
||||
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
|
||||
&vma->vm_obj_list)));
|
||||
vmo = vmo_link->obj;
|
||||
|
||||
/* Get the page from its pager */
|
||||
if (IS_ERR(page = vmo->pager->ops.page_in(vmo, file_offset))) {
|
||||
printf("%s: Could not obtain faulty page.\n",
|
||||
__TASKNAME__);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Map it to faulty task */
|
||||
l4_map((void *)page_to_phys(page),
|
||||
(void *)page_align(fault->address), 1,
|
||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||
fault->task->tid);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Old function, likely to be ditched.
|
||||
*
|
||||
@@ -79,337 +478,6 @@ struct vm_area *copy_on_write_vma(struct fault_data *fault)
|
||||
return shadow;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a reference to a vm_object link, returns the next link.
|
||||
* If back to given head, returns 0.
|
||||
*
|
||||
* vma->link1->link2->link3
|
||||
* | | |
|
||||
* V V V
|
||||
* vmo1 vmo2 vmo3|vm_file
|
||||
*
|
||||
* Example:
|
||||
* Given a reference to link = vma, head = vma, returns link1.
|
||||
* Given a reference to link = link3, head = vma, returns 0.
|
||||
*/
|
||||
struct vm_object *vma_next_link(struct list_head *link,
|
||||
struct list_head *head)
|
||||
{
|
||||
BUG_ON(list_empty(link));
|
||||
if (link == head)
|
||||
return 0;
|
||||
else
|
||||
return list_entry(link.next, struct vm_obj_link, list);
|
||||
}
|
||||
|
||||
/* Unlinks obj_link from its vma. */
|
||||
int vma_drop_link(struct vm_obj_link *shadower,
|
||||
struct vm_obj_link *obj_link)
|
||||
{
|
||||
/* Remove object link from vma's list */
|
||||
list_del(&obj_link->list);
|
||||
|
||||
/* Reduce object's ref count */
|
||||
obj_link->obj->refcnt--;
|
||||
|
||||
/* Remove the copier from obj_link's shadower list */
|
||||
list_del(&shadower->shref);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks if page cache pages of lesser is a subset of those of copier.
|
||||
* Note this just checks the page cache, so if any objects have pages
|
||||
* swapped to disk, this function does not rule.
|
||||
*/
|
||||
int vm_object_check_subset(struct vm_object *copier,
|
||||
struct vm_object *lesser)
|
||||
{
|
||||
struct page *pc, *pl;
|
||||
|
||||
/* Copier must have equal or more pages to overlap lesser */
|
||||
if (copier->npages < lesser->npages)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Do a page by page comparison. Every lesser page
|
||||
* must be in copier for overlap.
|
||||
*/
|
||||
list_for_each_entry(pl, &lesser->page_cache, list)
|
||||
if (!(pc = find_page(copier, pl->offset)))
|
||||
return 1;
|
||||
/*
|
||||
* For all pages of lesser vmo, there seems to be a page
|
||||
* in the copier vmo. So lesser is a subset of copier
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Merges link 1 to link 2 */
|
||||
int vma_do_merge_link(struct vm_obj_link *link1, struct vm_obj_link *link2)
|
||||
{
|
||||
struct vm_object *obj1 = link1->obj;
|
||||
struct vm_object *obj2 = link2->obj;
|
||||
struct page *p1, *p2;
|
||||
|
||||
/*
|
||||
* Move all non-intersecting pages to link2. Free all
|
||||
* intersecting pages.
|
||||
*/
|
||||
list_for_each_entry(p1, &obj1->page_cache, list) {
|
||||
/* Page doesn't exist, move it to shadower */
|
||||
if (!(p2 = find_page(obj2, p1->offset))) {
|
||||
list_del(&p1->list);
|
||||
spin_lock(&page->lock);
|
||||
p1->owner = obj2;
|
||||
spin_unlock(&page->lock);
|
||||
insert_page_olist(p1, obj2);
|
||||
obj2->npages++;
|
||||
}
|
||||
}
|
||||
/* Delete the object along with all its pages. */
|
||||
vm_object_delete(obj1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Merges a vma's shadow object with its shadower. Note this
|
||||
* must be called when merge is decided.
|
||||
*/
|
||||
int vma_merge_link(struct vm_obj_link *vmo_link)
|
||||
{
|
||||
struct vm_obj_link *sh_link;
|
||||
|
||||
/* Check refcount */
|
||||
BUG_ON(vmo_link->obj->refcnt != 1);
|
||||
|
||||
/* Get the last shadower entry */
|
||||
sh_link = list_entry(&vmo_link->obj->shadowers.next,
|
||||
struct vm_obj_link, shref);
|
||||
|
||||
/* Remove it */
|
||||
list_del(&sh_link->shref);
|
||||
|
||||
/* Check that there really was one shadower left */
|
||||
BUG_ON(!list_empty(&vmo_link->obj->shadowers));
|
||||
|
||||
/* Do the actual merge */
|
||||
vma_do_merge_link(vmo_link, sh_link);
|
||||
|
||||
}
|
||||
|
||||
struct vm_obj_link *vma_create_shadow(void)
|
||||
{
|
||||
struct vm_object *vmo;
|
||||
struct vm_obj_link *vmo_link;
|
||||
|
||||
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
|
||||
return 0;
|
||||
|
||||
if (!(vmo = vm_object_create())) {
|
||||
kfree(vmo_link);
|
||||
return 0;
|
||||
}
|
||||
INIT_LIST_HEAD(&vmo_link->list);
|
||||
vmo->type = VM_OBJ_SHADOW;
|
||||
vmo_link->obj = vmo;
|
||||
|
||||
return vmo_link;
|
||||
}
|
||||
|
||||
/* Allocates a new page, copies the original onto it and returns. */
|
||||
struct page *copy_page(struct page *orig)
|
||||
{
|
||||
void *new_vaddr, *vaddr, *paddr;
|
||||
struct page *new;
|
||||
|
||||
if (!(paddr = alloc_page(1)))
|
||||
return 0;
|
||||
|
||||
new = phys_to_page(paddr);
|
||||
|
||||
/* Map the new and orig page to self */
|
||||
new_vaddr = l4_map_helper(paddr, 1);
|
||||
vaddr = l4_map_helper(page_to_phys(orig), 1);
|
||||
|
||||
/* Copy the page into new page */
|
||||
memcpy(new_vaddr, vaddr, PAGE_SIZE);
|
||||
|
||||
/* Unmap both pages from current task. */
|
||||
l4_unmap_helper(vaddr, 1);
|
||||
l4_unmap_helper(new_vaddr, 1);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
* - Why not allocate a swap descriptor in vma_create_shadow() rather than
|
||||
* a bare vm_object? It will be needed.
|
||||
* - Does vm_write clash with any other object flags???
|
||||
* - Check refcounting of shadows, their references, page refs,
|
||||
* reduces increases etc.
|
||||
*/
|
||||
int copy_on_write(struct fault_data *fault)
|
||||
{
|
||||
struct vm_obj_link *vmo_link, *shadow_link, *copier_link;
|
||||
struct vm_object *vmo, *shadow, *copier;
|
||||
struct page *page, *new_page;
|
||||
unsigned int reason = fault->reason;
|
||||
unsigned int vma_flags = fault->vma->flags;
|
||||
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
||||
unsigned long file_offset = fault_to_file_offset(fault);
|
||||
|
||||
/* Get the first object, either original file or a shadow */
|
||||
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
|
||||
printf("%s:%s: No vm object in vma!\n",
|
||||
__TASKNAME__, __FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Is the object read-only? Create a shadow object if so.
|
||||
*
|
||||
* NOTE: Whenever the topmost object is read-only, a new shadow
|
||||
* object must be created. When there are no shadows one is created
|
||||
* because, its the original vm_object that is not writeable, and
|
||||
* when there are shadows one is created because a fork had just
|
||||
* happened, in which case all shadows are rendered read-only.
|
||||
*/
|
||||
if (!(vmo->flags & VM_WRITE)) {
|
||||
if (!(shadow_link = vma_create_shadow()))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Initialise the shadow */
|
||||
shadow = shadow_link->obj;
|
||||
shadow->vma_refcnt = 1;
|
||||
shadow->orig_obj = vmo_link->obj;
|
||||
shadow->type = VM_OBJ_SHADOW | VM_WRITE;
|
||||
shadow->pager = swap_pager;
|
||||
|
||||
/*
|
||||
* Add the shadow in front of the original:
|
||||
*
|
||||
* vma->link0->link1
|
||||
* | |
|
||||
* V V
|
||||
* shadow original
|
||||
*/
|
||||
list_add(&shadow_link->list, &vma->vm_obj_list);
|
||||
|
||||
/* Shadow is the copier object */
|
||||
copier_link = shadow_link;
|
||||
} else {
|
||||
/* No new shadows, the topmost r/w vmo is the copier object */
|
||||
copier_link = vmo_link;
|
||||
|
||||
/*
|
||||
* We start page search on read-only objects. If the first
|
||||
* one was writable, go to next which must be read-only.
|
||||
*/
|
||||
BUG_ON(!(vmo_link = vma_next_object(vmo_link,
|
||||
&vma->vm_obj_list)));
|
||||
BUG_ON(vmo_link->obj->flags & VM_WRITE);
|
||||
}
|
||||
|
||||
/* Traverse the list of read-only vm objects and search for the page */
|
||||
while (!(page = vmo_link->obj->pager.ops->page_in(vmo_link->obj,
|
||||
file_offset))) {
|
||||
if (!(vmo_link = vma_next_object(vmo_link,
|
||||
&vma->vm_obj_list))) {
|
||||
printf("%s:%s: Traversed all shadows and the original "
|
||||
"file's vm_object, but could not find the "
|
||||
"faulty page in this vma.\n",__TASKNAME__,
|
||||
__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the page. This traverse and copy is like a page-in operation
|
||||
* of a pager, except that the page is moving along vm_objects.
|
||||
*/
|
||||
new_page = copy_page(page);
|
||||
|
||||
/* Update page details */
|
||||
spin_lock(&new_page->lock);
|
||||
new_page->count = 1;
|
||||
new_page->owner = copier_link->obj;
|
||||
new_page->offset = page_offset;
|
||||
new_page->virtual = 0;
|
||||
|
||||
/* Add the page to owner's list of in-memory pages */
|
||||
BUG_ON(!list_empty(&new_page->list));
|
||||
insert_page_olist(new_page, new_page->owner->obj);
|
||||
spin_unlock(&page->lock);
|
||||
|
||||
/* Map the new page to faulty task */
|
||||
l4_map(page_to_phys(new_page), (void *)page_align(fault->address), 1,
|
||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||
fault->task->tid);
|
||||
|
||||
/*
|
||||
* Finished handling the actual fault, now check for possible
|
||||
* shadow collapses. Does the copier shadow completely shadow
|
||||
* the one underlying it?
|
||||
*/
|
||||
if (!(vmo_link = vma_next_object(copier_link, &vma->vm_obj_list))) {
|
||||
/* Copier must have an object under it */
|
||||
printf("Copier must have had an object under it!\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Compare whether page caches overlap */
|
||||
if (vm_object_check_subset(copier_link->obj, vmo_link->obj)) {
|
||||
/* They do overlap, so drop reference to lesser shadow */
|
||||
vma_drop_link(copier_link, vmo_link);
|
||||
|
||||
/* vm object reference down to one? */
|
||||
if (vmo_link->obj->refcnt == 1)
|
||||
/* The object is mergeable? i.e. not a file? */
|
||||
if (vmo_link->type != VM_OBJ_FILE)
|
||||
/* Merge it with its only shadower */
|
||||
vm_object_merge(vmo_link);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Handles the page fault, all entries here are assumed *legal* faults,
|
||||
* i.e. do_page_fault() should have already checked for illegal accesses.
|
||||
*/
|
||||
int __do_page_fault(struct fault_data *fault)
|
||||
{
|
||||
unsigned int reason = fault->reason;
|
||||
unsigned int vma_flags = fault->vma->flags;
|
||||
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
||||
struct vm_object *vmo;
|
||||
struct page *page;
|
||||
|
||||
/* Handle read */
|
||||
if ((reason & VM_READ) && (pte_flags & VM_NONE)) {
|
||||
unsigned long file_offset = fault_to_file_offset(fault);
|
||||
vmo = vma_get_next_object(&vma->vm_obj_list);
|
||||
|
||||
/* Get the page from its pager */
|
||||
if (IS_ERR(page = vmo->pager.ops->page_in(vmo, file_offset))) {
|
||||
printf("%s: Could not obtain faulty page.\n",
|
||||
__TASKNAME__);
|
||||
BUG();
|
||||
}
|
||||
/* Map it to faulty task */
|
||||
l4_map(page_to_phys(page), (void *)page_align(fault->address),1,
|
||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||
fault->task->tid);
|
||||
}
|
||||
|
||||
/* Handle write */
|
||||
if ((reason & VM_WRITE) && (pte_flags & VM_READ)) {
|
||||
/* Copy-on-write */
|
||||
if (vma_flags & VMA_PRIVATE) {
|
||||
copy_on_write(fault);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles any page ownership change or allocation for file-backed pages.
|
||||
*/
|
||||
@@ -679,6 +747,7 @@ int do_anon_page(struct fault_data *fault)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Page fault model:
|
||||
@@ -743,10 +812,8 @@ int do_page_fault(struct fault_data *fault)
|
||||
BUG(); /* Can't handle this yet. */
|
||||
}
|
||||
|
||||
if (vma_flags & VMA_ANON)
|
||||
err = do_anon_page(fault);
|
||||
else
|
||||
err = do_file_page(fault);
|
||||
/* Handle legitimate faults */
|
||||
__do_page_fault(fault);
|
||||
|
||||
/* Return the ipc and by doing so restart the faulty thread */
|
||||
l4_ipc_return(err);
|
||||
|
||||
Reference in New Issue
Block a user