munmap compiling.

do_munmap currently shrinks, splits, destroys vmas and unmaps the given
virtual address range from the task. Unmapped pages may go completely unused
but page reclamation will be done in another part of the pager rather than
directly on the munmap instance.
This commit is contained in:
Bahadir Balban
2008-10-29 16:59:06 +02:00
parent 3421a29693
commit 94a126dcde
4 changed files with 34 additions and 301 deletions

View File

@@ -24,7 +24,7 @@
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long file_offset);
int do_munmap(void *vaddr, unsigned long size, struct tcb *task);
int do_munmap(struct tcb *task, void *vaddr, unsigned long size);
void *do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages);

View File

@@ -188,7 +188,6 @@ find_vma_byrange(unsigned long pfn_start,
unsigned long pfn_end, struct list_head *vm_area_list)
{
struct vm_area *vma;
unsigned long pfn = __pfn(addr);
list_for_each_entry(vma, vm_area_list, list) {
if ((pfn_start >= vma->pfn_start) && (pfn_start < vma->pfn_end))