diff --git a/tasks/mm0/src/mmap.c b/tasks/mm0/src/mmap.c index 6cdbea9..7bd2ac1 100644 --- a/tasks/mm0/src/mmap.c +++ b/tasks/mm0/src/mmap.c @@ -147,9 +147,10 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset, unsigned int flags, unsigned int npages) { unsigned long map_pfn = __pfn(map_address); + unsigned long file_npages; struct vm_area *new, *mapped; struct vm_obj_link *vmo_link, *vmo_link2; - unsigned long file_npages; + int err; /* Set up devzero if none given */ if (!mapfile) { @@ -185,18 +186,12 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset, return PTR_ERR(-EINVAL); else if (!(map_address = mmap_new_address(task, flags, npages))) return PTR_ERR(-ENOMEM); - } else { - /* - * FIXME: Currently we don't allow overlapping vmas. - * To be fixed soon. We need to handle intersection, - * splitting, shrink/grow etc. - */ - list_for_each_entry(mapped, &task->vm_area_head->list, list) - BUG_ON(set_intersection(map_pfn, map_pfn + npages, - mapped->pfn_start, - mapped->pfn_end)); } + /* Unmap any existing vmas that overlap with the new mapping */ + if ((err = do_munmap(task, map_address, npages)) < 0) + return PTR_ERR(err); + /* For valid regions that aren't allocated by us, create the vma. */ if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset))) return PTR_ERR(-ENOMEM); diff --git a/tasks/mm0/src/munmap.c b/tasks/mm0/src/munmap.c index 7ce5914..5f22a3c 100644 --- a/tasks/mm0/src/munmap.c +++ b/tasks/mm0/src/munmap.c @@ -133,7 +133,7 @@ int vma_flush_pages(struct vm_area *vma) * may span into zero or more vmas, and may involve shrinking, splitting * and destruction of multiple vmas. */ -int do_munmap(struct tcb *task, void *vaddr, unsigned long npages) +int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages) { const unsigned long munmap_start = __pfn(vaddr); const unsigned long munmap_end = munmap_start + npages; @@ -167,10 +167,11 @@ int do_munmap(struct tcb *task, void *vaddr, unsigned long npages) int sys_munmap(struct tcb *task, void *start, unsigned long length) { /* Must be aligned on a page boundary */ - if ((unsigned long)start & PAGE_MASK) + if (!is_page_aligned(start)) return -EINVAL; - return do_munmap(task, start, __pfn(page_align_up(length))); + return do_munmap(task, (unsigned long)start, + __pfn(page_align_up(length))); }