Fixed an msync() issue.

msync used find_vma_byrange() instead of traversing each vma. Fixed now.
This commit is contained in:
Bahadir Balban
2008-11-07 15:10:19 +02:00
parent 3cb36b632a
commit f0348cc356
2 changed files with 17 additions and 3 deletions

View File

@@ -183,6 +183,7 @@ struct vm_area {
unsigned long file_offset; /* File offset in pfns */
};
/* Finds 'a' vma that is in this range. Only useful for munmap() */
static inline struct vm_area *
find_vma_byrange(unsigned long pfn_start,
unsigned long pfn_end, struct list_head *vm_area_list)
@@ -199,6 +200,11 @@ find_vma_byrange(unsigned long pfn_start,
}
/*
* Finds the vma that has the given address.
* TODO: In the future a lot of use cases may need to traverse each vma
* rather than searching the address. E.g. munmap/msync
*/
static inline struct vm_area *find_vma(unsigned long addr,
struct list_head *vm_area_list)
{

View File

@@ -169,23 +169,31 @@ int do_msync(struct tcb *task, void *vaddr, unsigned long npages, int flags)
const unsigned long msync_start = __pfn(vaddr);
const unsigned long msync_end = msync_start + npages;
struct vm_area *vma;
unsigned long addr = (unsigned long)vaddr;
int err;
/* Find a vma that overlaps with this address range */
while ((vma = find_vma_byrange(msync_start, msync_end,
&task->vm_area_head->list))) {
while ((vma = find_vma(addr, &task->vm_area_head->list))) {
/* Flush pages if vma is writable, dirty and file-backed. */
if ((err = vma_flush_pages(vma)) < 0)
return err;
/* Update address to next vma */
addr = __pfn_to_addr(vma->pfn_end);
/* Are we still good to go? */
if (addr >= msync_end)
break;
}
return 0;
}
int sys_msync(struct tcb *task, void *start, unsigned long length, int flags)
{
/* Must be aligned on a page boundary */
if ((unsigned long)start & PAGE_MASK)
if (!is_page_aligned(start))
return -EINVAL;
/*