From 1cc6a875475576a9faeb590814fc9930642c0733 Mon Sep 17 00:00:00 2001 From: Bahadir Balban Date: Fri, 14 Mar 2008 18:53:53 +0000 Subject: [PATCH] Previous commit. --- tasks/mm0/include/vm_area.h | 1 + tasks/mm0/src/fault.c | 19 ++++++++++++--- tasks/mm0/src/mmap.c | 48 ++++++++++++++++++++----------------- 3 files changed, 43 insertions(+), 25 deletions(-) diff --git a/tasks/mm0/include/vm_area.h b/tasks/mm0/include/vm_area.h index 4e7b8cc..2a5b754 100644 --- a/tasks/mm0/include/vm_area.h +++ b/tasks/mm0/include/vm_area.h @@ -186,6 +186,7 @@ extern struct vm_pager swap_pager; extern struct list_head vm_object_list; /* vm object link related functions */ +struct vm_obj_link *vm_objlink_create(void); struct vm_obj_link *vma_next_link(struct list_head *link, struct list_head *head); diff --git a/tasks/mm0/src/fault.c b/tasks/mm0/src/fault.c index 1cc5940..b17c827 100644 --- a/tasks/mm0/src/fault.c +++ b/tasks/mm0/src/fault.c @@ -31,8 +31,8 @@ unsigned long fault_to_file_offset(struct fault_data *fault) } /* - * Given a reference to a vm_object link, returns the next link. - * If back to given head, returns 0. + * Given a reference to a vm_object link, returns the next link but + * avoids wrapping around back to head. If next is head, returns 0. * * vma->link1->link2->link3 * | | | @@ -47,7 +47,7 @@ struct vm_obj_link *vma_next_link(struct list_head *link, struct list_head *head) { BUG_ON(list_empty(link)); - if (link == head) + if (link->next == head) return 0; else return list_entry(link->next, struct vm_obj_link, list); @@ -179,6 +179,18 @@ int vma_merge_link(struct vm_object *vmo) return 0; } +struct vm_obj_link *vm_objlink_create(void) +{ + struct vm_obj_link *vmo_link; + + if (!(vmo_link = kzalloc(sizeof(*vmo_link)))) + return PTR_ERR(-ENOMEM); + INIT_LIST_HEAD(&vmo_link->list); + INIT_LIST_HEAD(&vmo_link->shref); + + return vmo_link; +} + /* * Creates a bare vm_object along with its vma link, since * the shadow will be immediately used in a vma object list. @@ -196,6 +208,7 @@ struct vm_obj_link *vma_create_shadow(void) return 0; } INIT_LIST_HEAD(&vmo_link->list); + INIT_LIST_HEAD(&vmo_link->shref); vmo->flags = VM_OBJ_SHADOW; vmo_link->obj = vmo; diff --git a/tasks/mm0/src/mmap.c b/tasks/mm0/src/mmap.c index b6e9217..07e2362 100644 --- a/tasks/mm0/src/mmap.c +++ b/tasks/mm0/src/mmap.c @@ -481,12 +481,14 @@ int mmap_address_validate(unsigned long map_address, unsigned int vm_flags) * The actual paging in/out of the file from/into memory pages is handled by * the file's pager upon page faults. */ -int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task, - unsigned long map_address, unsigned int flags, unsigned int npages) +int do_mmap(struct vm_file *mapfile, unsigned long file_offset, + struct tcb *task, unsigned long map_address, unsigned int flags, + unsigned int npages) { - unsigned long file_npages; unsigned long map_pfn = __pfn(map_address); + unsigned long file_npages; struct vm_area *new, *mapped; + struct vm_obj_link *vmo_link; /* Set up devzero if none given */ if (!mapfile) { @@ -497,7 +499,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task BUG(); } - /* Get total file pages, check if mappin is within file size */ + /* Get total file pages, check if mapping is within file size */ file_npages = __pfn(page_align_up(mapfile->length)); if (npages > file_npages - file_offset) { printf("%s: Trying to map %d pages from page %d, " @@ -518,33 +520,35 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task /* Check invalid map address */ if (!mmap_address_validate(map_address, flags)) { - /* Get new map address for region of this size */ - if ((int)(map_address = - find_unmapped_area(npages, task)) < 0) + map_address = find_unmapped_area(npages, task); + if ((int)map_address < 0) return (int)map_address; - - /* Create a new vma for newly allocated address */ - else if (!(new = vma_new(__pfn(map_address), npages, - flags, file_offset, mapfile))) - return -ENOMEM; - /* Successful? Add it to list and return */ - goto out_success; + } else { + /* + * FIXME: Currently we don't allow overlapping vmas. + * To be fixed soon. We need to handle intersection, + * splitting, shrink/grow etc. + */ + list_for_each_entry(mapped, &task->vm_area_list, list) + BUG_ON(vma_intersect(map_pfn, map_pfn + npages, + mapped)); } - /* - * FIXME: Currently we don't allow overlapping vmas. To be fixed soon - * We need to handle intersection, splitting, shrink/grow etc. - */ - list_for_each_entry(mapped, &task->vm_area_list, list) - BUG_ON(vma_intersect(map_pfn, map_pfn + npages, mapped)); - /* For valid regions that aren't allocated by us, create the vma. */ if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset, mapfile))) return -ENOMEM; -out_success: + /* Attach the file as the first vm object of this vma */ + if (!(vmo_link = vm_objlink_create())) { + kfree(new); + return -ENOMEM; + } + vmo_link->obj = &mapfile->vm_obj; + list_add_tail(&vmo_link->list, &new->vm_obj_list); + + /* Finished initialising the vma, add it to task */ printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__, map_address, map_address + npages * PAGE_SIZE); list_add(&new->list, &task->vm_area_list);