mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Added copy-on-write shared memory but untested yet.
For anonymous shm, mmap now adds a shm_file and devzero behind it as two vm_objects. Faults are handled by copy_on_write(). Just as shadows copy r/w pages from original files, it should copy r/w pages from devzero into the shm_file in front. shmat/shmget uses mmap to set-up their areas. Untested yet so bugs expected. modified: tasks/libl4/src/init.c modified: tasks/mm0/include/shm.h modified: tasks/mm0/include/vm_area.h modified: tasks/mm0/src/fault.c modified: tasks/mm0/src/mmap.c modified: tasks/mm0/src/shm.c
This commit is contained in:
@@ -96,12 +96,10 @@ int utcb_init(void)
|
||||
/* Use it as a key to create a shared memory region */
|
||||
BUG_ON((shmid = shmget((key_t)utcb_page,
|
||||
PAGE_SIZE, IPC_CREAT)) < 0);
|
||||
printf("Shmget success. shmid: %d\n", shmid);
|
||||
|
||||
/* Attach to the region */
|
||||
BUG_ON((shmaddr = shmat(shmid, utcb_page, 0)) < 0);
|
||||
BUG_ON(shmaddr != utcb_page);
|
||||
printf("Shmat success. Attached %d @ 0x%x\n", shmid, (unsigned long)shmaddr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1,11 +1,23 @@
|
||||
/*
|
||||
* Copyright (C) 2008 Bahadir Balban
|
||||
*/
|
||||
#ifndef __SHM_H__
|
||||
#define __SHM_H__
|
||||
|
||||
#include <l4/api/space.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/api/space.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4lib/types.h>
|
||||
|
||||
struct shm_descriptor {
|
||||
int key;
|
||||
l4id_t shmid;
|
||||
void *shm_addr;
|
||||
unsigned long npages;
|
||||
struct vm_file *devzero;
|
||||
};
|
||||
|
||||
#if 0
|
||||
struct shm_descriptor {
|
||||
int key; /* IPC key supplied by user task */
|
||||
l4id_t shmid; /* SHM area id, allocated by mm0 */
|
||||
@@ -16,12 +28,13 @@ struct shm_descriptor {
|
||||
unsigned int flags;
|
||||
int refcnt;
|
||||
};
|
||||
#endif
|
||||
|
||||
#define SHM_AREA_MAX 64 /* Up to 64 shm areas */
|
||||
|
||||
/* Up to 10 pages per area, and at least 1 byte (implies 1 page) */
|
||||
#define SHM_SHMMIN 1
|
||||
#define SHM_SHMMAX (PAGE_SIZE * 10)
|
||||
#define SHM_SHMMAX 10
|
||||
|
||||
/* Initialises shared memory bookkeeping structures */
|
||||
void shm_init();
|
||||
|
||||
@@ -42,6 +42,7 @@ enum VM_FILE_TYPE {
|
||||
VM_FILE_DEVZERO = 1,
|
||||
VM_FILE_REGULAR,
|
||||
VM_FILE_BOOTFILE,
|
||||
VM_FILE_SHM,
|
||||
};
|
||||
|
||||
/* Defines the type of object. A file? Just a standalone object? */
|
||||
|
||||
@@ -302,7 +302,7 @@ int copy_on_write(struct fault_data *fault)
|
||||
*
|
||||
* vma->link0->link1
|
||||
* | |
|
||||
* V V
|
||||
* v v
|
||||
* shadow original
|
||||
*/
|
||||
list_add(&shadow_link->list, &vma->vm_obj_list);
|
||||
@@ -313,7 +313,8 @@ int copy_on_write(struct fault_data *fault)
|
||||
/* Shadow is the copier object */
|
||||
copier_link = shadow_link;
|
||||
} else {
|
||||
dprintf("No shadows. Going to add to topmost r/w shadow object\n");
|
||||
dprintf("No new shadows. Going to add to "
|
||||
"topmost r/w shadow object\n");
|
||||
/* No new shadows, the topmost r/w vmo is the copier object */
|
||||
copier_link = vmo_link;
|
||||
|
||||
@@ -328,7 +329,7 @@ int copy_on_write(struct fault_data *fault)
|
||||
|
||||
/* Traverse the list of read-only vm objects and search for the page */
|
||||
while (IS_ERR(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
|
||||
file_offset))) {
|
||||
file_offset))) {
|
||||
if (!(vmo_link = vma_next_link(&vmo_link->list,
|
||||
&vma->vm_obj_list))) {
|
||||
printf("%s:%s: Traversed all shadows and the original "
|
||||
@@ -367,6 +368,10 @@ int copy_on_write(struct fault_data *fault)
|
||||
page_align(fault->address), fault->task->tid);
|
||||
vm_object_print(new_page->owner);
|
||||
|
||||
/* Shm faults don't have shadows so we're done here. */
|
||||
if (vma->flags & VMA_SHARED)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Finished handling the actual fault, now check for possible
|
||||
* shadow collapses. Does the copier completely shadow the one
|
||||
@@ -408,8 +413,8 @@ int __do_page_fault(struct fault_data *fault)
|
||||
unsigned int vma_flags = fault->vma->flags;
|
||||
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
||||
struct vm_area *vma = fault->vma;
|
||||
unsigned long file_offset;
|
||||
struct vm_obj_link *vmo_link;
|
||||
unsigned long file_offset;
|
||||
struct page *page;
|
||||
|
||||
/* Handle read */
|
||||
@@ -449,8 +454,8 @@ int __do_page_fault(struct fault_data *fault)
|
||||
|
||||
/* Handle write */
|
||||
if ((reason & VM_WRITE) && (pte_flags & VM_READ)) {
|
||||
/* Copy-on-write */
|
||||
if (vma_flags & VMA_PRIVATE)
|
||||
/* Copy-on-write. For all private 'union' all anonymous vmas. */
|
||||
if ((vma_flags & VMA_PRIVATE) || (vma_flags & VMA_ANONYMOUS))
|
||||
copy_on_write(fault);
|
||||
|
||||
/* Regular files */
|
||||
@@ -479,13 +484,6 @@ int __do_page_fault(struct fault_data *fault)
|
||||
page_align(fault->address), fault->task->tid);
|
||||
vm_object_print(vmo_link->obj);
|
||||
}
|
||||
/* FIXME: Just do fs files for now, anon shm objects later. */
|
||||
/* Things to think about:
|
||||
* - Is utcb a shm memory really? Then each task must map it in via
|
||||
* shmget(). FS0 must map all user tasks' utcb via shmget() as well.
|
||||
* For example to pass on pathnames etc.
|
||||
*/
|
||||
BUG_ON((vma_flags & VMA_SHARED) && (vma_flags & VMA_ANONYMOUS));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -284,9 +284,8 @@ int vma_unmap(struct vm_area **actual, struct vm_area **split,
|
||||
}
|
||||
|
||||
/* Unmaps given address range from its vma. Releases those pages in that vma. */
|
||||
int do_munmap(void *vaddr, unsigned long size, struct tcb *task)
|
||||
int do_munmap(void *vaddr, unsigned long npages, struct tcb *task)
|
||||
{
|
||||
unsigned long npages = __pfn(size);
|
||||
unsigned long pfn_start = __pfn(vaddr);
|
||||
unsigned long pfn_end = pfn_start + npages;
|
||||
struct vm_area *vma, *vma_new = 0;
|
||||
@@ -332,7 +331,7 @@ pgtable_unmap:
|
||||
#endif
|
||||
|
||||
|
||||
int do_munmap(void *vaddr, unsigned long size, struct tcb *task)
|
||||
int do_munmap(void *vaddr, unsigned long npages, struct tcb *task)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -343,7 +342,7 @@ int sys_munmap(l4id_t sender, void *vaddr, unsigned long size)
|
||||
|
||||
BUG_ON(!(task = find_task(sender)));
|
||||
|
||||
return do_munmap(vaddr, size, task);
|
||||
return do_munmap(vaddr, __pfn(page_align_up(size)), task);
|
||||
}
|
||||
|
||||
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
|
||||
@@ -351,18 +350,11 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
|
||||
struct vm_file *mapfile)
|
||||
{
|
||||
struct vm_area *vma;
|
||||
struct vm_obj_link *obj_link;
|
||||
|
||||
/* Allocate new area */
|
||||
if (!(vma = kzalloc(sizeof(struct vm_area))))
|
||||
return 0;
|
||||
|
||||
/* Allocate vm object link */
|
||||
if (!(obj_link = kzalloc(sizeof(struct vm_obj_link)))) {
|
||||
kfree(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&vma->list);
|
||||
INIT_LIST_HEAD(&vma->vm_obj_list);
|
||||
|
||||
@@ -371,11 +363,6 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
|
||||
vma->flags = flags;
|
||||
vma->file_offset = file_offset;
|
||||
|
||||
INIT_LIST_HEAD(&obj_link->list);
|
||||
INIT_LIST_HEAD(&obj_link->shref);
|
||||
obj_link->obj = &mapfile->vm_obj;
|
||||
list_add(&obj_link->list, &vma->vm_obj_list);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
@@ -469,7 +456,7 @@ int mmap_address_validate(unsigned long map_address, unsigned int vm_flags)
|
||||
(map_address >= SHM_AREA_START &&
|
||||
map_address < SHM_AREA_END))
|
||||
return 1;
|
||||
else
|
||||
else
|
||||
return 0;
|
||||
} else
|
||||
BUG();
|
||||
@@ -487,9 +474,9 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
unsigned int npages)
|
||||
{
|
||||
unsigned long map_pfn = __pfn(map_address);
|
||||
unsigned long file_npages;
|
||||
struct vm_area *new, *mapped;
|
||||
struct vm_obj_link *vmo_link;
|
||||
struct vm_obj_link *vmo_link, *vmo_link2;
|
||||
unsigned long file_npages;
|
||||
|
||||
/* Set up devzero if none given */
|
||||
if (!mapfile) {
|
||||
@@ -550,6 +537,30 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
mapfile->vm_obj.refcnt++;
|
||||
list_add_tail(&vmo_link->list, &new->vm_obj_list);
|
||||
|
||||
/*
|
||||
* If the file is a shm file, also map devzero behind it. i.e.
|
||||
* vma -> vm_link -> vm_link
|
||||
* | |
|
||||
* v v
|
||||
* shm_file devzero
|
||||
*
|
||||
* So that faults go through shm file and then devzero, as in
|
||||
* the shadow object copy_on_write setup in fault.c
|
||||
*/
|
||||
if (mapfile->type == VM_FILE_SHM) {
|
||||
struct vm_file *dzero = get_devzero();
|
||||
|
||||
/* Attach the file as the first vm object of this vma */
|
||||
if (!(vmo_link2 = vm_objlink_create())) {
|
||||
kfree(new);
|
||||
kfree(vmo_link);
|
||||
return -ENOMEM;
|
||||
}
|
||||
vmo_link2->obj = &dzero->vm_obj;
|
||||
dzero->vm_obj.refcnt++;
|
||||
list_add_tail(&vmo_link2->list, &new->vm_obj_list);
|
||||
}
|
||||
|
||||
/* Finished initialising the vma, add it to task */
|
||||
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__,
|
||||
map_address, map_address + npages * PAGE_SIZE);
|
||||
|
||||
@@ -25,8 +25,11 @@
|
||||
#include <posix/sys/shm.h>
|
||||
#include <posix/sys/types.h>
|
||||
|
||||
#define shm_file_to_desc(shm_file) \
|
||||
((struct shm_descriptor *)shm_file->priv_data)
|
||||
|
||||
/* The list of shared memory areas that are already set up and working */
|
||||
static struct list_head shm_desc_list;
|
||||
static LIST_HEAD(shm_file_list);
|
||||
|
||||
/* Unique shared memory ids */
|
||||
static struct id_pool *shm_ids;
|
||||
@@ -36,8 +39,6 @@ static struct address_pool shm_vaddr_pool;
|
||||
|
||||
void shm_init()
|
||||
{
|
||||
INIT_LIST_HEAD(&shm_desc_list);
|
||||
|
||||
/* Initialise shm id pool */
|
||||
shm_ids = id_pool_new_init(SHM_AREA_MAX);
|
||||
|
||||
@@ -49,11 +50,11 @@ void shm_init()
|
||||
* Attaches to given shm segment mapped at shm_addr if the shm descriptor
|
||||
* does not already have a base address assigned. If neither shm_addr nor
|
||||
* the descriptor has an address, allocates one from the shm address pool.
|
||||
* FIXME: This pool is currently outside the range of mmap'able addresses.
|
||||
*/
|
||||
static void *do_shmat(struct shm_descriptor *shm, void *shm_addr, int shmflg,
|
||||
static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
|
||||
l4id_t tid)
|
||||
{
|
||||
struct shm_descriptor *shm = shm_file_to_desc(shm_file);
|
||||
struct tcb *task = find_task(tid);
|
||||
unsigned int vmflags;
|
||||
int err;
|
||||
@@ -81,12 +82,12 @@ static void *do_shmat(struct shm_descriptor *shm, void *shm_addr, int shmflg,
|
||||
*/
|
||||
|
||||
/* First user? */
|
||||
if (!shm->refcnt)
|
||||
if (!shm_file->vm_obj.refcnt)
|
||||
if (mmap_address_validate((unsigned long)shm_addr, vmflags))
|
||||
shm->shm_addr = shm_addr;
|
||||
else
|
||||
shm->shm_addr = address_new(&shm_vaddr_pool,
|
||||
__pfn(shm->size));
|
||||
shm->npages);
|
||||
else /* Address must be already assigned */
|
||||
BUG_ON(!shm->shm_addr);
|
||||
|
||||
@@ -94,34 +95,34 @@ static void *do_shmat(struct shm_descriptor *shm, void *shm_addr, int shmflg,
|
||||
* mmap the area to the process as shared. Page fault handler would
|
||||
* handle allocating and paging-in the shared pages.
|
||||
*/
|
||||
if ((err = do_mmap(0, 0, task, (unsigned long)shm->shm_addr,
|
||||
vmflags, shm->size)) < 0) {
|
||||
if ((err = do_mmap(shm_file, 0, task, (unsigned long)shm->shm_addr,
|
||||
vmflags, shm->npages)) < 0) {
|
||||
printf("do_mmap: Mapping shm area failed with %d.\n", err);
|
||||
BUG();
|
||||
}
|
||||
/* Now update the shared memory descriptor */
|
||||
shm->refcnt++;
|
||||
|
||||
return shm->shm_addr;
|
||||
}
|
||||
|
||||
int sys_shmat(l4id_t requester, l4id_t shmid, void *shmaddr, int shmflg)
|
||||
{
|
||||
struct shm_descriptor *shm_desc, *n;
|
||||
struct vm_file *shm_file, *n;
|
||||
|
||||
list_for_each_entry_safe(shm_desc, n, &shm_desc_list, list) {
|
||||
if (shm_desc->shmid == shmid) {
|
||||
shmaddr = do_shmat(shm_desc, shmaddr,
|
||||
list_for_each_entry_safe(shm_file, n, &shm_file_list, list) {
|
||||
if (shm_file_to_desc(shm_file)->shmid == shmid) {
|
||||
shmaddr = do_shmat(shm_file, shmaddr,
|
||||
shmflg, requester);
|
||||
|
||||
l4_ipc_return((int)shmaddr);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
l4_ipc_return(-EINVAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int do_shmdt(struct shm_descriptor *shm, l4id_t tid)
|
||||
int do_shmdt(struct vm_file *shm, l4id_t tid)
|
||||
{
|
||||
struct tcb *task = find_task(tid);
|
||||
int err;
|
||||
@@ -131,64 +132,91 @@ int do_shmdt(struct shm_descriptor *shm, l4id_t tid)
|
||||
__TASKNAME__, __FUNCTION__, tid);
|
||||
BUG();
|
||||
}
|
||||
if ((err = do_munmap(shm->shm_addr, shm->size, task)) < 0) {
|
||||
if ((err = do_munmap(shm_file_to_desc(shm)->shm_addr,
|
||||
shm_file_to_desc(shm)->npages, task)) < 0) {
|
||||
printf("do_munmap: Unmapping shm segment failed with %d.\n",
|
||||
err);
|
||||
BUG();
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int sys_shmdt(l4id_t requester, const void *shmaddr)
|
||||
{
|
||||
struct shm_descriptor *shm_desc, *n;
|
||||
struct vm_file *shm_file, *n;
|
||||
int err;
|
||||
|
||||
list_for_each_entry_safe(shm_desc, n, &shm_desc_list, list) {
|
||||
if (shm_desc->shm_addr == shmaddr) {
|
||||
if ((err = do_shmdt(shm_desc, requester) < 0)) {
|
||||
list_for_each_entry_safe(shm_file, n, &shm_file_list, list) {
|
||||
if (shm_file_to_desc(shm_file)->shm_addr == shmaddr) {
|
||||
if ((err = do_shmdt(shm_file, requester) < 0)) {
|
||||
l4_ipc_return(err);
|
||||
return 0;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
l4_ipc_return(0);
|
||||
|
||||
l4_ipc_return(-EINVAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shm_descriptor *shm_new(key_t key, unsigned long npages)
|
||||
{
|
||||
/* It doesn't exist, so create a new one */
|
||||
struct shm_descriptor *shm_desc;
|
||||
|
||||
if ((shm_desc = kzalloc(sizeof(struct shm_descriptor))) < 0)
|
||||
/* Creates an shm area and glues its details with shm pager and devzero */
|
||||
static struct vm_file *shm_new(key_t key, unsigned long npages)
|
||||
{
|
||||
struct shm_descriptor *shm_desc;
|
||||
struct vm_file *shm_file;
|
||||
|
||||
BUG_ON(!npages);
|
||||
|
||||
/* Allocate file and shm structures */
|
||||
if (IS_ERR(shm_file = vm_file_create()))
|
||||
return PTR_ERR(shm_file);
|
||||
|
||||
if (!(shm_desc = kzalloc(sizeof(struct shm_descriptor)))) {
|
||||
kfree(shm_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialise the shm descriptor */
|
||||
if ((shm_desc->shmid = id_new(shm_ids)) < 0) {
|
||||
kfree(shm_file);
|
||||
kfree(shm_desc);
|
||||
return 0;
|
||||
}
|
||||
BUG_ON(!npages);
|
||||
shm_desc->key = (int)key;
|
||||
shm_desc->size = npages;
|
||||
INIT_LIST_HEAD(&shm_desc->list);
|
||||
list_add(&shm_desc->list, &shm_desc_list);
|
||||
shm_desc->npages = npages;
|
||||
|
||||
return shm_desc;
|
||||
/* Initialise the file */
|
||||
shm_file->length = __pfn_to_addr(npages);
|
||||
shm_file->type = VM_FILE_SHM;
|
||||
shm_file->priv_data = shm_desc;
|
||||
|
||||
/* Initialise the vm object */
|
||||
shm_file->vm_obj.pager = &swap_pager;
|
||||
shm_file->vm_obj.flags = VM_OBJ_FILE;
|
||||
|
||||
list_add(&shm_file->list, &shm_file_list);
|
||||
list_add(&shm_file->vm_obj.list, &vm_object_list);
|
||||
|
||||
return shm_file;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Make sure hostile tasks don't subvert other tasks' utcbs
|
||||
* by early-registring their utcb address here.
|
||||
*/
|
||||
int sys_shmget(key_t key, int size, int shmflg)
|
||||
{
|
||||
struct shm_descriptor *shm_desc;
|
||||
unsigned long npages;
|
||||
unsigned long npages = __pfn(page_align_up(size));
|
||||
struct vm_file *shm;
|
||||
|
||||
/* First check argument validity */
|
||||
if (size > SHM_SHMMAX || size < SHM_SHMMIN) {
|
||||
if (npages > SHM_SHMMAX || npages < SHM_SHMMIN) {
|
||||
l4_ipc_return(-EINVAL);
|
||||
return 0;
|
||||
} else
|
||||
npages = __pfn(page_align_up(size));
|
||||
|
||||
/*
|
||||
* IPC_PRIVATE means create a no-key shm area, i.e. private to this
|
||||
@@ -196,14 +224,16 @@ int sys_shmget(key_t key, int size, int shmflg)
|
||||
*/
|
||||
if (key == IPC_PRIVATE) {
|
||||
key = -1; /* Our meaning of no key */
|
||||
if (!(shm_desc = shm_new(key, npages)))
|
||||
if (!(shm = shm_new(key, npages)))
|
||||
l4_ipc_return(-ENOSPC);
|
||||
else
|
||||
l4_ipc_return(shm_desc->shmid);
|
||||
l4_ipc_return(shm_file_to_desc(shm)->shmid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
list_for_each_entry(shm_desc, &shm_desc_list, list) {
|
||||
list_for_each_entry(shm, &shm_file_list, list) {
|
||||
struct shm_descriptor *shm_desc = shm_file_to_desc(shm);
|
||||
|
||||
if (shm_desc->key == key) {
|
||||
/*
|
||||
* Exclusive means create request
|
||||
@@ -213,7 +243,7 @@ int sys_shmget(key_t key, int size, int shmflg)
|
||||
l4_ipc_return(-EEXIST);
|
||||
else
|
||||
/* Found it but do we have a size problem? */
|
||||
if (shm_desc->size < size)
|
||||
if (shm_desc->npages < npages)
|
||||
l4_ipc_return(-EINVAL);
|
||||
else /* Return shmid of the existing key */
|
||||
l4_ipc_return(shm_desc->shmid);
|
||||
@@ -223,14 +253,13 @@ int sys_shmget(key_t key, int size, int shmflg)
|
||||
|
||||
/* Key doesn't exist and create is set, so we create */
|
||||
if (shmflg & IPC_CREAT)
|
||||
if (!(shm_desc = shm_new(key, npages)))
|
||||
if (!(shm = shm_new(key, npages)))
|
||||
l4_ipc_return(-ENOSPC);
|
||||
else
|
||||
l4_ipc_return(shm_desc->shmid);
|
||||
l4_ipc_return(shm_file_to_desc(shm)->shmid);
|
||||
else /* Key doesn't exist, yet create isn't set, its an -ENOENT */
|
||||
l4_ipc_return(-ENOENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user