exit() seems to work, but needs more testing.

- Added cleaner allocation of shm addresses by moving the allocation to do_mmap().
- Added deletion routine for all objects: shadow, vm_file of type vfs_file, shm_file, etc.
  - Need to make sure objects get deleted properly after exit().
- Currently we allow a single, unique virtual address for each shm segment.
This commit is contained in:
Bahadir Balban
2008-10-21 14:17:58 +03:00
parent aa2be891cd
commit b20fda9341
11 changed files with 166 additions and 67 deletions

View File

@@ -41,9 +41,10 @@ struct shm_descriptor {
#define SHM_SHMMAX 10
/* Initialises shared memory bookkeeping structures */
void shm_init();
int shm_pool_init();
void *shmat_shmget_internal(struct tcb *task, key_t key, void *shmaddr);
struct vm_file *shm_new(key_t key, unsigned long npages);
void *shm_new_address(int npages);
#endif /* __SHM_H__ */

View File

@@ -3,9 +3,9 @@
#include <l4lib/types.h>
#include <task.h>
void *utcb_vaddr_new(void);
int utcb_pool_init(void);
int utcb_vaddr_del(void *utcb_addr);
void *utcb_new_address(void);
int utcb_delete_address(void *utcb_addr);
/* IPC to send utcb address information to tasks */

View File

@@ -143,6 +143,7 @@ struct vm_file {
unsigned long length;
unsigned int type;
struct vm_object vm_obj;
void (*destroy_priv_data)(struct vm_file *f);
void *priv_data; /* Device pagers use to access device info */
};

View File

@@ -80,7 +80,7 @@ int sys_fork(struct tcb *parent)
BUG();
/* Create new utcb for child since it can't use its parent's */
child->utcb = utcb_vaddr_new();
child->utcb = utcb_new_address();
/*
* Create the utcb shared memory segment
@@ -90,8 +90,6 @@ int sys_fork(struct tcb *parent)
__pfn(DEFAULT_UTCB_SIZE))))
return (int)utcb_shm;
/* FIXME: Should we munmap() parent's utcb page from child? */
/*
* Map and prefault child utcb to vfs so that vfs need not
* call us to map it.
@@ -126,7 +124,7 @@ int sys_clone(struct tcb *parent, void *child_stack, unsigned int flags)
return (int)child;
/* Allocate a unique utcb address for child */
child->utcb = utcb_vaddr_new();
child->utcb = utcb_new_address();
/*
* Create the utcb shared memory segment

View File

@@ -245,6 +245,53 @@ struct page *copy_to_new_page(struct page *orig)
return new;
}
/*
* Determine if an object is deletable.
*
* Shadows are deleted if nlinks = 0, and
* merged if they have nlinks = 1, shadows = 1.
* See below for explanation.
*
* vfs-type vmfiles are deleted if their
* openers = 0, and their nlinks
* (i.e. mappers) = 0.
*
* shm-type vmfiles are deleted if their
* nlinks = 0, since they only have map count.
*/
int vm_object_is_deletable(struct vm_object *obj)
{
struct vm_file *f;
if (obj->nlinks != 0)
return 0;
BUG_ON(obj->shadows != 0);
if (obj->flags & VM_OBJ_SHADOW)
return 1;
f = vm_object_to_file(obj);
/* Devzero should probably never have 0 refs left */
if (f->type == VM_FILE_DEVZERO)
return 0;
else if (f->type == VM_FILE_SHM)
return 1;
else if (f->type == VM_FILE_BOOTFILE ||
f->type == VM_FILE_VFS) {
if (f->openers == 0)
return 1;
else
return 0;
}
/* To make gcc happy */
BUG();
return 0;
}
/*
* Drops a link to an object if possible, and if it has dropped it,
* decides and takes action on the dropped object, depending on
@@ -297,20 +344,10 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
}
}
/* Now deal with the object itself */
/* If it's not a shadow, we're not to touch it.
*
* TODO: In the future we can check if a vm_file's
* openers are 0 and take action here. (i.e. keep,
* delete or swap it)
/*
* Now deal with the object itself:
*/
if (!(obj->flags & VM_OBJ_SHADOW))
return 0;
/* If the object has no links left, we can delete it */
if (obj->nlinks == 0) {
BUG_ON(obj->shadows != 0);
if(vm_object_is_deletable(obj)) {
dprintf("Deleting object:\n");
vm_object_print(obj);
vm_object_delete(obj);
@@ -328,7 +365,8 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
* we will go back to identical mirroring instead of merging the
* last shadow, since most unused pages would be swapped out.
*/
if (obj->nlinks == 1 &&
if ((obj->flags & VM_OBJ_SHADOW) &&
obj->nlinks == 1 &&
obj->shadows == 1) {
dprintf("Merging object:\n");
vm_object_print(obj);

View File

@@ -165,7 +165,10 @@ void init_mm(struct initdata *initdata)
init_boot_files(initdata);
// printf("%s: Initialised in-memory boot files.\n", __TASKNAME__);
shm_init();
if (shm_pool_init() < 0) {
printf("SHM initialisation failed.\n");
BUG();
}
// printf("%s: Initialised shm structures.\n", __TASKNAME__);
if (utcb_pool_init() < 0) {

View File

@@ -9,10 +9,11 @@
#include <posix/sys/types.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <memory.h>
#include <task.h>
#include <mmap.h>
#include <file.h>
#include <memory.h>
#include <shm.h>
#if 0
/* TODO: This is to be implemented when fs0 is ready. */
@@ -380,12 +381,7 @@ int vma_intersect(unsigned long pfn_start, unsigned long pfn_end,
return 0;
}
/*
* FIXME: PASS THIS A VM_SHARED FLAG SO THAT IT CAN SEARCH FOR AN EMPTY
* SEGMENT FOR SHM, instead of shmat() searching for one.
*
* Search an empty space in the task's mmapable address region.
*/
/* Search an empty space in the task's mmapable address region. */
unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
{
unsigned long pfn_start = __pfn(task->map_start);
@@ -467,6 +463,20 @@ int mmap_address_validate(struct tcb *task, unsigned long map_address,
BUG();
}
/*
* Returns a suitably mmap'able address. It allocates
* differently for shared and private areas.
*/
unsigned long mmap_new_address(struct tcb *task, unsigned int flags,
unsigned int npages)
{
if (flags & VMA_SHARED)
return (unsigned long)shm_new_address(npages);
else
return find_unmapped_area(npages, task);
}
/*
* Maps the given file with given flags at the given page offset to the given
* task's address space at the specified virtual memory address and length.
@@ -475,8 +485,8 @@ int mmap_address_validate(struct tcb *task, unsigned long map_address,
* the file's pager upon page faults.
*/
void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
struct tcb *task, unsigned long map_address, unsigned int flags,
unsigned int npages)
struct tcb *task, unsigned long map_address,
unsigned int flags, unsigned int npages)
{
unsigned long map_pfn = __pfn(map_address);
struct vm_area *new, *mapped;
@@ -513,8 +523,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
/* Check invalid map address */
if (!mmap_address_validate(task, map_address, flags)) {
/* Get new map address for region of this size */
if(!(map_address = find_unmapped_area(npages, task)))
if (!(map_address = mmap_new_address(task, flags, npages)))
return PTR_ERR(-ENOMEM);
} else {
/*

View File

@@ -37,13 +37,33 @@ static struct id_pool *shm_ids;
/* Globally disjoint shm virtual address pool */
static struct address_pool shm_vaddr_pool;
void shm_init()
void *shm_new_address(int npages)
{
return address_new(&shm_vaddr_pool, npages);
}
int shm_delete_address(void *shm_addr, int npages)
{
return address_del(&shm_vaddr_pool, shm_addr, npages);
}
int shm_pool_init()
{
int err;
/* Initialise shm id pool */
shm_ids = id_pool_new_init(SHM_AREA_MAX);
if(IS_ERR(shm_ids = id_pool_new_init(SHM_AREA_MAX))) {
printf("SHM id pool initialisation failed.\n");
return (int)shm_ids;
}
/* Initialise the global shm virtual address pool */
address_pool_init(&shm_vaddr_pool, SHM_AREA_START, SHM_AREA_END);
if ((err = address_pool_init(&shm_vaddr_pool,
SHM_AREA_START, SHM_AREA_END)) < 0) {
printf("SHM Address pool initialisation failed.\n");
return err;
}
return 0;
}
/*
@@ -64,45 +84,48 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
BUG();
}
if (shm_addr)
shm_addr = (void *)page_align(shm_addr);
if ((unsigned long)shm_addr & PAGE_MASK) {
if (shmflg & SHM_RND)
shm_addr = (void *)page_align(shm_addr);
else
return PTR_ERR(-EINVAL);
}
/* Determine mmap flags for segment */
if (shmflg & SHM_RDONLY)
vmflags = VM_READ | VMA_SHARED | VMA_ANONYMOUS;
else
vmflags = VM_READ | VM_WRITE |
VMA_SHARED | VMA_ANONYMOUS;
/* Set mmap flags for segment */
vmflags = VM_READ | VMA_SHARED | VMA_ANONYMOUS;
vmflags |= (shmflg & SHM_RDONLY) ? 0 : VM_WRITE;
/*
* The first user of the segment who supplies a valid
* address sets the base address of the segment. Currently
* all tasks use the same address for each unique segment.
* Currently all tasks use the same address for each unique segment.
* If address is already assigned, the supplied address must match
* the original address. We don't look for object map count because
* utcb addresses are assigned before being mapped. NOTE: We may do
* all this in a specific shm_mmap() call in do_mmap() in the future.
*/
/* First user? */
if (!shm_file->vm_obj.nlinks)
if (mmap_address_validate(task, (unsigned long)shm_addr,
vmflags))
shm->shm_addr = shm_addr;
else /* FIXME: Do this in do_mmap/find_unmapped_area !!! */
shm->shm_addr = address_new(&shm_vaddr_pool,
shm->npages);
else /* Address must be already assigned */
BUG_ON(!shm->shm_addr);
if (shm_file_to_desc(shm_file)->shm_addr) {
if (shm_addr && (shm->shm_addr != shm_addr))
return PTR_ERR(-EINVAL);
}
/*
* mmap the area to the process as shared. Page fault handler would
* handle allocating and paging-in the shared pages.
* mmap the area to the process as shared. Page fault
* handler would handle allocating and paging-in the
* shared pages.
*/
if (IS_ERR(mapped = do_mmap(shm_file, 0, task,
(unsigned long)shm->shm_addr,
(unsigned long)shm_addr,
vmflags, shm->npages))) {
printf("do_mmap: Mapping shm area failed with %d.\n",
(int)mapped);
BUG();
return PTR_ERR(mapped);
}
/* Assign new shm address if not assigned */
if (!shm->shm_addr)
shm->shm_addr = mapped;
else
BUG_ON(shm->shm_addr != mapped);
return shm->shm_addr;
}
@@ -148,6 +171,27 @@ int sys_shmdt(struct tcb *task, const void *shmaddr)
return -EINVAL;
}
/*
* This finds out what address pool the shm area came from and
* returns the address back to that pool. There are 2 pools,
* one for utcbs and one for regular shm segments.
*/
void shm_destroy_priv_data(struct vm_file *shm_file)
{
struct shm_descriptor *shm_desc = shm_file_to_desc(shm_file);
if ((unsigned long)shm_desc->shm_addr >= UTCB_AREA_START &&
(unsigned long)shm_desc->shm_addr < UTCB_AREA_END)
utcb_delete_address(shm_desc->shm_addr);
else if ((unsigned long)shm_desc->shm_addr >= SHM_AREA_START &&
(unsigned long)shm_desc->shm_addr < SHM_AREA_END)
shm_delete_address(shm_desc->shm_addr,
shm_file->vm_obj.npages);
else
BUG();
/* Now delete the private data itself */
kfree(shm_file->priv_data);
}
/* Creates an shm area and glues its details with shm pager and devzero */
struct vm_file *shm_new(key_t key, unsigned long npages)
@@ -179,6 +223,7 @@ struct vm_file *shm_new(key_t key, unsigned long npages)
shm_file->length = __pfn_to_addr(npages);
shm_file->type = VM_FILE_SHM;
shm_file->priv_data = shm_desc;
shm_file->destroy_priv_data = shm_destroy_priv_data;
/* Initialise the vm object */
shm_file->vm_obj.pager = &swap_pager;

View File

@@ -293,7 +293,7 @@ int task_mmap_regions(struct tcb *task, struct vm_file *file)
}
/* Task's utcb */
task->utcb = utcb_vaddr_new();
task->utcb = utcb_new_address();
/* Create a shared memory segment available for shmat() */
if (IS_ERR(shm = shm_new((key_t)task->utcb, __pfn(DEFAULT_UTCB_SIZE))))

View File

@@ -27,12 +27,12 @@ int utcb_pool_init()
return 0;
}
void *utcb_vaddr_new(void)
void *utcb_new_address(void)
{
return address_new(&utcb_vaddr_pool, 1);
}
int utcb_vaddr_del(void *utcb_addr)
int utcb_delete_address(void *utcb_addr)
{
return address_del(&utcb_vaddr_pool, utcb_addr, 1);
}

View File

@@ -134,8 +134,12 @@ int vm_object_delete(struct vm_object *vmo)
if (vmo->flags & VM_OBJ_FILE) {
f = vm_object_to_file(vmo);
BUG_ON(!list_empty(&f->list));
if (f->priv_data)
kfree(f->priv_data);
if (f->priv_data) {
if (f->destroy_priv_data)
f->destroy_priv_data(f);
else
kfree(f->priv_data);
}
kfree(f);
} else if (vmo->flags & VM_OBJ_SHADOW)
kfree(vmo);