drop the minix_ prefixes for mmap and munmap

also cleanup of various minix-specific changes, cleanup of
mmap-related testing.

Change-Id: I289a4fc50cf8a13df4a6082038d860853a4bd024
This commit is contained in:
Ben Gras
2013-11-22 16:38:29 +01:00
committed by Lionel Sambuc
parent b0cab62bd2
commit dda632a24f
34 changed files with 197 additions and 180 deletions

View File

@@ -923,8 +923,8 @@
#define truncate _truncate
#define write _write
#define writev _writev
#define minix_mmap _minix_mmap
#define minix_munmap _minix_munmap
#define mmap _mmap
#define munmap _munmap
#define vfork __vfork14
#endif /* __minix */

View File

@@ -12,9 +12,6 @@
#include <sys/mman.h>
#include <unistd.h>
#define mmap minix_mmap
#define munmap minix_munmap
#include "malloc-debug.h"
#if 0

View File

@@ -13,11 +13,9 @@
*/
#ifdef __minix
#include <machine/vmparam.h>
#define mmap minix_mmap
#define munmap minix_munmap
#ifdef _LIBSYS
#include <minix/sysutil.h>
#include <machine/vmparam.h>
#define MALLOC_NO_SYSCALLS
#define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
#define wrterror(w) panic("libminc malloc error: %s\n", w)
@@ -98,8 +96,8 @@ void utrace(struct ut *, int);
* This is necessary for VM to be able to define its own versions, and
* use this malloc.
*/
#undef minix_mmap
#undef minix_munmap
#undef mmap
#undef munmap
#include <sys/types.h>
#if defined(__NetBSD__)

View File

@@ -12,8 +12,8 @@ __weak_alias(vm_remap, _vm_remap)
__weak_alias(vm_unmap, _vm_unmap)
__weak_alias(vm_getphys, _vm_getphys)
__weak_alias(vm_getrefcount, _vm_getrefcount)
__weak_alias(minix_mmap, _minix_mmap)
__weak_alias(minix_munmap, _minix_munmap)
__weak_alias(mmap, _mmap)
__weak_alias(munmap, _munmap)
#endif
@@ -71,13 +71,13 @@ int minix_vfs_mmap(endpoint_t who, off_t offset, size_t len,
return _syscall(VM_PROC_NR, VM_VFS_MMAP, &m);
}
void *minix_mmap(void *addr, size_t len, int prot, int flags,
void *mmap(void *addr, size_t len, int prot, int flags,
int fd, off_t offset)
{
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
}
int minix_munmap(void *addr, size_t len)
int munmap(void *addr, size_t len)
{
message m;

View File

@@ -168,10 +168,10 @@ lmfs_alloc_block(struct buf *bp)
len = roundup(fs_block_size, PAGE_SIZE);
if((bp->data = minix_mmap(0, fs_block_size,
if((bp->data = mmap(0, fs_block_size,
PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
free_unused_blocks();
if((bp->data = minix_mmap(0, fs_block_size, PROT_READ|PROT_WRITE,
if((bp->data = mmap(0, fs_block_size, PROT_READ|PROT_WRITE,
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
panic("libminixfs: could not allocate block");
}
@@ -190,7 +190,7 @@ struct buf *lmfs_get_block(register dev_t dev, register block_t block,
return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
}
void minix_munmap_t(void *a, int len)
void munmap_t(void *a, int len)
{
vir_bytes av = (vir_bytes) a;
assert(a);
@@ -202,7 +202,7 @@ void minix_munmap_t(void *a, int len)
assert(!(len % PAGE_SIZE));
if(minix_munmap(a, len) < 0)
if(munmap(a, len) < 0)
panic("libminixfs cache: munmap failed");
}
@@ -240,7 +240,7 @@ static void freeblock(struct buf *bp)
MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */
if(bp->lmfs_bytes > 0) {
assert(bp->data);
minix_munmap_t(bp->data, bp->lmfs_bytes);
munmap_t(bp->data, bp->lmfs_bytes);
bp->lmfs_bytes = 0;
bp->data = NULL;
} else assert(!bp->data);
@@ -571,7 +571,7 @@ void lmfs_invalidate(
if (bp->lmfs_dev == device) {
assert(bp->data);
assert(bp->lmfs_bytes > 0);
minix_munmap_t(bp->data, bp->lmfs_bytes);
munmap_t(bp->data, bp->lmfs_bytes);
bp->lmfs_dev = NO_DEV;
bp->lmfs_bytes = 0;
bp->data = NULL;
@@ -862,7 +862,7 @@ void lmfs_buf_pool(int new_nr_bufs)
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
if(bp->data) {
assert(bp->lmfs_bytes > 0);
minix_munmap_t(bp->data, bp->lmfs_bytes);
munmap_t(bp->data, bp->lmfs_bytes);
}
}
}

View File

@@ -403,7 +403,7 @@ void *arg;
char *guard_start, *guard_end;
stacksize = round_page(stacksize + MTHREAD_GUARDSIZE);
stackaddr = minix_mmap(NULL, stacksize,
stackaddr = mmap(NULL, stacksize,
PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
-1, 0);
if (stackaddr == MAP_FAILED)
@@ -431,7 +431,7 @@ void *arg;
# error "Unsupported platform"
#endif
stacksize = guarded_stacksize;
if (minix_munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
if (munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
mthread_panic("unable to unmap stack space for guard");
tcb->m_context.uc_stack.ss_sp = guard_end;
} else
@@ -465,7 +465,7 @@ mthread_thread_t thread;
rt->m_cond = NULL;
if (rt->m_attr.ma_stackaddr == NULL) { /* We allocated stack space */
if (rt->m_context.uc_stack.ss_sp) {
if (minix_munmap(rt->m_context.uc_stack.ss_sp,
if (munmap(rt->m_context.uc_stack.ss_sp,
rt->m_context.uc_stack.ss_size) != 0) {
mthread_panic("unable to unmap memory");
}

View File

@@ -184,7 +184,7 @@ slowccalloc(struct puffs_usermount *pu)
if (puffs_fakecc)
return &fakecc;
sp = minix_mmap(NULL, stacksize, PROT_READ|PROT_WRITE,
sp = mmap(NULL, stacksize, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, -1, 0);
if (sp == MAP_FAILED)
return NULL;
@@ -194,11 +194,11 @@ slowccalloc(struct puffs_usermount *pu)
/* initialize both ucontext's */
if (getcontext(&pcc->pcc_uc) == -1) {
minix_munmap(pcc, stacksize);
munmap(pcc, stacksize);
return NULL;
}
if (getcontext(&pcc->pcc_uc_ret) == -1) {
minix_munmap(pcc, stacksize);
munmap(pcc, stacksize);
return NULL;
}
@@ -280,7 +280,7 @@ cc_free(struct puffs_cc *pcc)
DPRINTF(("invalidating pcc %p\n", pcc));
assert(!puffs_fakecc);
minix_munmap(pcc, stacksize);
munmap(pcc, stacksize);
}
void

View File

@@ -16,38 +16,17 @@ void *alloc_contig(size_t len, int flags, phys_bytes *phys)
if(flags & AC_LOWER1M)
mmapflags |= MAP_LOWER1M;
if(flags & AC_ALIGN64K)
mmapflags |= MAP_ALIGN64K;
mmapflags |= MAP_ALIGNMENT_64KB;
/* First try to get memory with minix_mmap. This is guaranteed
/* First try to get memory with mmap. This is guaranteed
* to be page-aligned, and we can tell VM it has to be
* pre-allocated and contiguous.
*/
errno = 0;
buf = (vir_bytes) minix_mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0);
buf = (vir_bytes) mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0);
/* If that failed, maybe we're not running in paged mode.
* If that's the case, ENXIO will be returned.
* Memory returned with malloc() will be preallocated and
* contiguous, so fallback on that, and ask for a little extra
* so we can page align it ourselves.
*/
if(buf == (vir_bytes) MAP_FAILED) {
u32_t align = 0;
if(errno != (_SIGN ENXIO)) {
return NULL;
}
if(flags & AC_ALIGN4K)
align = 4*1024;
if(flags & AC_ALIGN64K)
align = 64*1024;
if(len + align < len)
return NULL;
len += align;
if(!(buf = (vir_bytes) malloc(len))) {
return NULL;
}
if(align)
buf += align - (buf % align);
return NULL;
}
/* Get physical address, if requested. */
@@ -59,6 +38,6 @@ void *alloc_contig(size_t len, int flags, phys_bytes *phys)
int free_contig(void *addr, size_t len)
{
return minix_munmap(addr, len);
return munmap(addr, len);
}