Mapping precision fixes on l4_unmap and do_munmap()

l4_unmap now returns -1 if given range was only partially unmapped.

do_munmap() now only unmaps address ranges that have correspondence in
the unmapped vmas. Trying to unmap regions with no correspondent vmas
causes problems in corner cases, e.g. mm0 that tries to mmap its own
address space during initialisation would unmap its whole address space
and fail to execute.
This commit is contained in:
Bahadir Balban
2008-11-08 10:18:35 +02:00
parent 0e3f069713
commit 94daebd0c5
7 changed files with 82 additions and 39 deletions

View File

@@ -111,8 +111,8 @@ void add_mapping_pgd(unsigned int paddr, unsigned int vaddr,
pgd_table_t *pgd);
void add_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
void remove_mapping(unsigned long vaddr);
void remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd);
int remove_mapping(unsigned long vaddr);
int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd);
void prealloc_phys_pagedesc(void);
int check_mapping_pgd(unsigned long vaddr, unsigned long size,

View File

@@ -40,26 +40,30 @@ found:
return 0;
}
/*
* Unmaps given range from given task. If the complete range is unmapped
* sucessfully, returns 0. If part of the range was found to be already
* unmapped, returns -1. This is may or may not be an error.
*/
int sys_unmap(syscall_context_t *regs)
{
unsigned long virt = regs->r0;
unsigned long virtual = regs->r0;
unsigned long npages = regs->r1;
unsigned int tid = regs->r2;
struct ktcb *target;
int ret, retval;
if (tid == current->tid) { /* The easiest case */
if (tid == current->tid)
target = current;
goto found;
} else /* else search the tcb from its hash list */
if ((target = find_task(tid)))
goto found;
BUG();
return -EINVAL;
found:
for (int i = 0; i < npages; i++)
remove_mapping_pgd(virt, target->pgd);
else if (!(target = find_task(tid)))
return -ESRCH;
return 0;
for (int i = 0; i < npages; i++) {
ret = remove_mapping_pgd(virtual + i * PAGE_SIZE, target->pgd);
if (ret)
retval = ret;
}
return retval;
}

View File

@@ -274,31 +274,38 @@ int check_mapping(unsigned long vaddr, unsigned long size,
}
/* FIXME: Empty PMDs should be returned here !!! */
void __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
int __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
{
pmd_t pmd_i = PMD_INDEX(vaddr);
int ret;
switch (pmd->entry[pmd_i] & PMD_TYPE_MASK) {
case PMD_TYPE_FAULT:
ret = -1;
break;
case PMD_TYPE_LARGE:
pmd->entry[pmd_i] = 0;
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
ret = 0;
break;
case PMD_TYPE_SMALL:
pmd->entry[pmd_i] = 0;
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
ret = 0;
break;
default:
printk("Unknown page mapping in pmd. Assuming bug.\n");
BUG();
}
return;
return ret;
}
void remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
{
pgd_t pgd_i = PGD_INDEX(vaddr);
pmd_table_t *pmd;
pmd_t pmd_i;
int ret;
/*
* Clean the cache to main memory before removing the mapping. Otherwise
@@ -321,20 +328,19 @@ void remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
phys_to_virt((pgd->entry[pgd_i]
& PGD_COARSE_ALIGN_MASK));
pmd_i = PMD_INDEX(vaddr);
__remove_mapping(pmd, vaddr);
ret = __remove_mapping(pmd, vaddr);
break;
case PGD_TYPE_FAULT:
dprintk("Attempting to remove fault mapping. "
"Assuming bug.\n", vaddr);
BUG();
ret = -1;
break;
case PGD_TYPE_SECTION:
printk("Removing section mapping for 0x%lx",
vaddr);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
printk("Removing section mapping for 0x%lx",
vaddr);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
ret = 0;
break;
case PGD_TYPE_FINE:
@@ -352,11 +358,13 @@ void remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
/* The tlb must be invalidated here because it might have cached the
* old translation for this mapping. */
arm_invalidate_tlb();
return ret;
}
void remove_mapping(unsigned long vaddr)
int remove_mapping(unsigned long vaddr)
{
remove_mapping_pgd(vaddr, current->pgd);
return remove_mapping_pgd(vaddr, current->pgd);
}
/*

View File

@@ -24,7 +24,7 @@
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long file_offset);
int do_munmap(struct tcb *task, void *vaddr, unsigned long size);
int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long size);
void *do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages);

View File

@@ -146,10 +146,9 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
struct tcb *task, unsigned long map_address,
unsigned int flags, unsigned int npages)
{
unsigned long map_pfn = __pfn(map_address);
unsigned long file_npages;
struct vm_area *new, *mapped;
struct vm_obj_link *vmo_link, *vmo_link2;
unsigned long file_npages;
struct vm_area *new;
int err;
/* Set up devzero if none given */

View File

@@ -9,12 +9,14 @@
#include <l4/lib/math.h>
#include <l4lib/arch/syslib.h>
#include <vm_area.h>
#include <lib/malloc.h>
/* This splits a vma, splitter region must be in the *middle* of original vma */
int vma_split(struct vm_area *vma, struct tcb *task,
const unsigned long pfn_start, const unsigned long pfn_end)
{
struct vm_area *new;
unsigned long unmap_start = pfn_start, unmap_end = pfn_end;
/* Allocate an uninitialised vma first */
if (!(new = vma_new(0, 0, 0, 0)))
@@ -43,6 +45,10 @@ int vma_split(struct vm_area *vma, struct tcb *task,
/* Add new one next to original vma */
list_add_tail(&new->list, &vma->list);
/* Unmap the removed portion */
BUG_ON(l4_unmap((void *)__pfn_to_addr(unmap_start),
unmap_end - unmap_start, task->tid) < 0);
return 0;
}
@@ -50,20 +56,49 @@ int vma_split(struct vm_area *vma, struct tcb *task,
int vma_shrink(struct vm_area *vma, struct tcb *task,
const unsigned long pfn_start, const unsigned long pfn_end)
{
unsigned long diff;
unsigned long diff, unmap_start, unmap_end;
/* Shrink from the end */
if (vma->pfn_start < pfn_start) {
BUG_ON(pfn_start >= vma->pfn_end);
unmap_start = pfn_start;
unmap_end = vma->pfn_end;
vma->pfn_end = pfn_start;
/* Shrink from the beginning */
} else if (vma->pfn_end > pfn_end) {
BUG_ON(pfn_end <= vma->pfn_start);
unmap_start = vma->pfn_start;
unmap_end = pfn_end;
diff = pfn_end - vma->pfn_start;
vma->file_offset += diff;
vma->pfn_start = pfn_end;
} else BUG();
} else
BUG();
/* Unmap the shrinked portion */
BUG_ON(l4_unmap((void *)__pfn_to_addr(unmap_start),
unmap_end - unmap_start, task->tid) < 0);
return 0;
}
/* Destroys a single vma from a task and unmaps its range from task space */
int vma_destroy_single(struct tcb *task, struct vm_area *vma)
{
int ret;
/* Release all object links */
if ((ret = vma_drop_merge_delete_all(vma)) < 0)
return ret;
/* Unmap the whole vma address range */
BUG_ON(l4_unmap((void *)__pfn_to_addr(vma->pfn_start),
vma->pfn_end - vma->pfn_start, task->tid) < 0);
/* Unlink and delete vma */
list_del(&vma->list);
kfree(vma);
return 0;
}
@@ -84,7 +119,7 @@ int vma_unmap(struct vm_area *vma, struct tcb *task,
return vma_shrink(vma, task, pfn_start, pfn_end);
/* Destroy needed? */
else if ((vma->pfn_start >= pfn_start) && (vma->pfn_end <= pfn_end))
return vma_drop_merge_delete_all(vma);
return vma_destroy_single(task, vma);
else
BUG();
@@ -158,9 +193,6 @@ int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages)
}
}
/* Unmap those pages from the task address space */
l4_unmap(vaddr, npages, task->tid);
return 0;
}

View File

@@ -146,7 +146,7 @@ int do_shmdt(struct tcb *task, struct vm_file *shm)
int err;
if ((err = do_munmap(task,
shm_file_to_desc(shm)->shm_addr,
(unsigned long)shm_file_to_desc(shm)->shm_addr,
shm_file_to_desc(shm)->npages)) < 0)
return err;