mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 19:03:15 +01:00
for sys_mmap(), added unmapping of existing vmas for the area that is soon to be mmap'ed.
This commit is contained in:
@@ -147,9 +147,10 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
unsigned int flags, unsigned int npages)
|
||||
{
|
||||
unsigned long map_pfn = __pfn(map_address);
|
||||
unsigned long file_npages;
|
||||
struct vm_area *new, *mapped;
|
||||
struct vm_obj_link *vmo_link, *vmo_link2;
|
||||
unsigned long file_npages;
|
||||
int err;
|
||||
|
||||
/* Set up devzero if none given */
|
||||
if (!mapfile) {
|
||||
@@ -185,18 +186,12 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
return PTR_ERR(-EINVAL);
|
||||
else if (!(map_address = mmap_new_address(task, flags, npages)))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
} else {
|
||||
/*
|
||||
* FIXME: Currently we don't allow overlapping vmas.
|
||||
* To be fixed soon. We need to handle intersection,
|
||||
* splitting, shrink/grow etc.
|
||||
*/
|
||||
list_for_each_entry(mapped, &task->vm_area_head->list, list)
|
||||
BUG_ON(set_intersection(map_pfn, map_pfn + npages,
|
||||
mapped->pfn_start,
|
||||
mapped->pfn_end));
|
||||
}
|
||||
|
||||
/* Unmap any existing vmas that overlap with the new mapping */
|
||||
if ((err = do_munmap(task, map_address, npages)) < 0)
|
||||
return PTR_ERR(err);
|
||||
|
||||
/* For valid regions that aren't allocated by us, create the vma. */
|
||||
if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset)))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
@@ -133,7 +133,7 @@ int vma_flush_pages(struct vm_area *vma)
|
||||
* may span into zero or more vmas, and may involve shrinking, splitting
|
||||
* and destruction of multiple vmas.
|
||||
*/
|
||||
int do_munmap(struct tcb *task, void *vaddr, unsigned long npages)
|
||||
int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages)
|
||||
{
|
||||
const unsigned long munmap_start = __pfn(vaddr);
|
||||
const unsigned long munmap_end = munmap_start + npages;
|
||||
@@ -167,10 +167,11 @@ int do_munmap(struct tcb *task, void *vaddr, unsigned long npages)
|
||||
int sys_munmap(struct tcb *task, void *start, unsigned long length)
|
||||
{
|
||||
/* Must be aligned on a page boundary */
|
||||
if ((unsigned long)start & PAGE_MASK)
|
||||
if (!is_page_aligned(start))
|
||||
return -EINVAL;
|
||||
|
||||
return do_munmap(task, start, __pfn(page_align_up(length)));
|
||||
return do_munmap(task, (unsigned long)start,
|
||||
__pfn(page_align_up(length)));
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user