diff --git a/tasks/mm0/include/vm_area.h b/tasks/mm0/include/vm_area.h index f424755..dd7004c 100644 --- a/tasks/mm0/include/vm_area.h +++ b/tasks/mm0/include/vm_area.h @@ -183,6 +183,7 @@ struct vm_area { unsigned long file_offset; /* File offset in pfns */ }; +/* Finds 'a' vma that is in this range. Only useful for munmap() */ static inline struct vm_area * find_vma_byrange(unsigned long pfn_start, unsigned long pfn_end, struct list_head *vm_area_list) @@ -199,6 +200,11 @@ find_vma_byrange(unsigned long pfn_start, } +/* + * Finds the vma that has the given address. + * TODO: In the future a lot of use cases may need to traverse each vma + * rather than searching the address. E.g. munmap/msync + */ static inline struct vm_area *find_vma(unsigned long addr, struct list_head *vm_area_list) { diff --git a/tasks/mm0/src/munmap.c b/tasks/mm0/src/munmap.c index 1394667..ba28c4f 100644 --- a/tasks/mm0/src/munmap.c +++ b/tasks/mm0/src/munmap.c @@ -169,23 +169,31 @@ int do_msync(struct tcb *task, void *vaddr, unsigned long npages, int flags) const unsigned long msync_start = __pfn(vaddr); const unsigned long msync_end = msync_start + npages; struct vm_area *vma; + unsigned long addr = (unsigned long)vaddr; int err; /* Find a vma that overlaps with this address range */ - while ((vma = find_vma_byrange(msync_start, msync_end, - &task->vm_area_head->list))) { + while ((vma = find_vma(addr, &task->vm_area_head->list))) { /* Flush pages if vma is writable, dirty and file-backed. */ if ((err = vma_flush_pages(vma)) < 0) return err; + + /* Update address to next vma */ + addr = __pfn_to_addr(vma->pfn_end); + + /* Are we still good to go? */ + if (addr >= msync_end) + break; } + return 0; } int sys_msync(struct tcb *task, void *start, unsigned long length, int flags) { /* Must be aligned on a page boundary */ - if ((unsigned long)start & PAGE_MASK) + if (!is_page_aligned(start)) return -EINVAL; /*