Added flushing of file pages to do_munmap() for file-backed mappings.

This commit is contained in:
Bahadir Balban
2008-10-29 19:03:19 +02:00
parent 94a126dcde
commit 51af9968c2
4 changed files with 49 additions and 6 deletions

View File

@@ -6,9 +6,6 @@
#include <posix/sys/types.h>
#include <task.h>
void vmfile_init(void);
struct vm_file *vmfile_alloc_init(void);
int vfs_read(unsigned long vnum, unsigned long f_offset, unsigned long npages,
void *pagebuf);
int vfs_write(unsigned long vnum, unsigned long f_offset, unsigned long npages,
@@ -19,6 +16,7 @@ int sys_lseek(struct tcb *sender, int fd, off_t offset, int whence);
int sys_close(struct tcb *sender, int fd);
int sys_fsync(struct tcb *sender, int fd);
int file_open(struct tcb *opener, int fd);
int flush_file_pages(struct vm_file *f);
struct vfs_file_data {
unsigned long vnum;

View File

@@ -604,6 +604,8 @@ struct page *copy_on_write(struct fault_data *fault)
* First access from first process simply writes to the pages
* of that file. All subsequent accesses by other processes
* do so as well.
*
* FIXME: Add VM_DIRTY bit for every page that has write-faulted.
*/
int __do_page_fault(struct fault_data *fault)

View File

@@ -355,9 +355,13 @@ int write_file_pages(struct vm_file *f, unsigned long pfn_start,
/* Flush all dirty file pages and update file stats */
int flush_file_pages(struct vm_file *f)
{
write_file_pages(f, 0, __pfn(page_align_up(f->length)));
int err;
vfs_update_file_stats(f);
if ((err = write_file_pages(f, 0, __pfn(page_align_up(f->length)))) < 0)
return err;
if ((err = vfs_update_file_stats(f)) < 0)
return err;
return 0;
}
@@ -647,7 +651,7 @@ int sys_read(struct tcb *task, int fd, void *buf, int count)
*
* Error:
* We find the page buffer is in, and then copy from the *start* of the page
* rather than buffer's offset in that page.
* rather than buffer's offset in that page. - I think this is fixed.
*/
int sys_write(struct tcb *task, int fd, void *buf, int count)
{

View File

@@ -4,6 +4,7 @@
* Copyright (C) 2008 Bahadir Balban
*/
#include <mmap.h>
#include <file.h>
#include <l4/api/errno.h>
#include <l4lib/arch/syslib.h>
@@ -87,6 +88,40 @@ int vma_unmap(struct vm_area *vma, struct tcb *task,
return 0;
}
/* Checks vma and vm_object type and flushes its pages accordingly */
int vma_flush_pages(struct vm_area *vma)
{
struct vm_object *vmo;
int err;
/* Read-only vmas need not flush objects */
/* FIXME: Ensure pf_handler sets VM_DIRTY on write-faulted pages */
if (!(vma->flags & VM_WRITE))
return 0;
/*
* We just check the first object under the vma, since there
* could only be a single VM_SHARED file-backed object in the chain.
*/
BUG_ON(list_empty(&vma->list));
vmo = list_entry(vma->list.next, struct vm_object, list);
/* Only vfs file objects are flushed */
if (vmo->flags & VM_OBJ_FILE &&
vmo->flags & VMA_SHARED &&
!(vmo->flags & VMA_ANONYMOUS)) {
/* Only vfs files ought to match above criteria */
BUG_ON(vm_object_to_file(vmo)->type != VM_FILE_VFS);
/* Flush the pages */
if ((err = flush_file_pages(vm_object_to_file(vmo))) < 0)
return err;
}
return 0;
}
/*
* Unmaps the given virtual address range from the task, the region
* may span into zero or more vmas, and may involve shrinking, splitting
@@ -103,6 +138,10 @@ int do_munmap(struct tcb *task, void *vaddr, unsigned long npages)
while ((vma = find_vma_byrange(munmap_start, munmap_end,
&task->vm_area_head->list))) {
/* Flush pages if vma is writable, dirty and file-backed. */
if ((err = vma_flush_pages(vma)) < 0)
return err;
/* Unmap the vma accordingly */
if ((err = vma_unmap(vma, task, munmap_start,
munmap_end)) < 0)