Files
codezero/tasks/mm0/mmap.extra.c.code
Bahadir Balban e2b791a3d8 Initial commit
2008-01-13 13:53:52 +00:00

259 lines
8.2 KiB
Plaintext

#if 0
void shm_request_handler(struct shm_request *request, struct id_pool **shm_ids)
{
struct shm_descriptor *pending, *new;
struct shm_kdata *kdata;
if (request->type == SHM_SENDER) {
list_for_each_entry(pending, &shm_pending_list, list) {
kdata = &pending->kdata;
/*
* The receiver request should have set this, and also
* few other parameters should match with this request.
*/
if (kdata->receiver == request->pair &&
kdata->npages == request->npages &&
kdata->sender == request->self) {
/* Fill in rest of the incomplete information */
kdata->send_pfn = request->pfn;
/* Allocate a new id for the shm setup */
pending->shmid = id_new(*shm_ids);
/* Add it to completed shm area list */
list_del(&pending->list);
list_add(&pending->list, &shm_desc_list);
/* Arrange the actual shm setup with the kernel */
do_shm_setup(pending);
return;
}
}
/*
* If no matching pending shm descriptor is found, a new one
* should be allocated.
*/
new = kzalloc(sizeof(struct shm_descriptor));
kdata = &new->kdata;
/* Fill in all information available from the request */
kdata->sender = request->self;
kdata->receiver = request->pair;
kdata->send_pfn = request->pfn;
kdata->npages = request->npages;
/* Add the descriptor to pending list. */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &shm_pending_list);
} else if (request->type == SHM_RECEIVER) {
list_for_each_entry(pending, &shm_pending_list, list) {
kdata = &pending->kdata;
/*
* The sender request should have already setup, and
* few other parameters should match with this request.
*/
if (kdata->receiver == request->self &&
kdata->npages == request->npages &&
kdata->sender == request->pair) {
/* Fill in rest of the incomplete information */
kdata->recv_pfn = request->pfn;
/* Allocate a new id for the shm setup */
pending->shmid = id_new(*shm_ids);
list_del(&pending->list);
/* Add it to completed shm area list */
list_add(&pending->list, &shm_desc_list);
/* Arrange the actual shm setup with the kernel */
do_shm_setup(pending);
return;
}
}
/*
* If no matching pending shm descriptor is found, a new one
* should be allocated.
*/
new = kzalloc(sizeof(struct shm_descriptor));
kdata = &new->kdata;
/* Fill in all information available from the request */
kdata->sender = request->pair;
kdata->receiver = request->self;
kdata->recv_pfn = request->pfn;
kdata->npages = request->npages;
/* Add the descriptor to pending list. */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &shm_pending_list);
}
}
#endif
/* Original incomplete untested code for do_munmap */
/* Unmaps given address from its vma. Releases those pages in that vma. */
int do_munmap(unsigned long addr, unsigned long size, struct tcb *task)
{
unsigned long npages = __pfn(size);
unsigned long pfn_start = __pfn(addr);
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma = 0, *shadow = 0, *vma_new = 0, *shadow_new = 0;
struct list_head *n;
if (!(vma = find_vma(addr, &task->vm_area_list)))
return -EINVAL;
/* Do the real file's vma. Split needed? */
if (vma->pfn_start < pfn_start && vma->pfn_end > pfn_end) {
if (!(vma_new = kzalloc(sizeof(struct vm_area))))
return -ENOMEM;
vma = vma_split(vma, vma_new, pfn_start, pfn_end);
INIT_LIST_HEAD(&vma_new->list);
INIT_LIST_HEAD(&vma_new->shadow_list);
list_add(&vma_new->list, &vma->list);
/* Shrink needed? */
} else if (((vma->pfn_start == pfn_start) && (vma->pfn_end > pfn_end))
|| ((vma->pfn_start < pfn_start) && (vma->pfn_end == pfn_end)))
vma = vma_shrink(vma, pfn_start, pfn_end);
/* Destroy needed? */
else if ((vma->pfn_start == pfn_start) && (vma->pfn_end == pfn_end)) {
/* NOTE: VMA can't be referred after this point. */
vma_destroy(vma);
goto pgtable_unmap;
} else
BUG();
/* Sort out the shadows, if any. non-cow mappings would skip this. */
list_for_each_entry(shadow, vma->shadow_list, shadow_list) {
/* Split needed? */
if (shadow->pfn_start < pfn_start && shadow->pfn_end > pfn_end) {
shadow_new = kzalloc(sizeof(struct vm_area));
vma = vma_split(shadow, shadow_new, pfn_start, pfn_end);
INIT_LIST_HEAD(&shadow_new->list);
list_add_tail(&shadow_new->list, &shadow->list);
/* Destroy needed? */
} else if ((shadow->pfn_start == pfn_start) && (shadow->pfn_end == pfn_end)) {
/* NOTE: vma can't be referred after this point. */
vma_destroy(shadow);
/* Shrink needed? */
} else if (((shadow->pfn_start == pfn_start) && (shadow->pfn_end > pfn_end))
|| ((shadow->pfn_start < pfn_start) && (shadow->pfn_end == pfn_end)))
shadow = vma_shrink(shadow, pfn_start, pfn_end);
else
BUG();
}
/*
* If the real file was COW and its vma had split, the shadows must be
* separated into the two new vmas according to which one they
* belong to.
*/
if (vma_new)
list_for_each_entry_safe(shadow, n, vma->shadow_list, shadow_list) {
if (shadow->pfn_start >= vma_new->pfn_start &&
shadow->pfn_end <= vma_new->pfn_end) {
list_del_init(&shadow->list);
list_add(&shadow->list, &vma_new->shadow_list);
} else
BUG_ON(!(shadow->pfn_start >= vma->pfn_start &&
shadow->pfn_end <= vma->pfn_end));
}
/* The stage where the actual pages are unmapped from the page tables */
pgtable_unmap:
/* TODO:
* - Find out if the vma is cow, and contains shadow vmas.
* - Remove and free shadow vmas or the real vma, or shrink them if applicable.
* - Free the swap file segment for the vma if vma is private (cow).
* - Reduce refcount for the in-memory pages.
* - If refcount is zero (they could be shared!), either add pages to some page
* cache, or simpler the better, free the actual pages back to the page allocator.
* - l4_unmap() the corresponding virtual region from the page tables.
*/
}
/*
* TODO: UNUSED: This used to be used during mmap to map swap for anon
* areas.
* Now same code might be used *during swapping* to set up the swapfile
* for the very same areas. Also the fault handler might find the swap
* slot on the file using info from this code.
*
* For an anonymous and/or private region this provides a per-task swap
* file for backing. For shared memory it attaches shm regions to a
* global shm swap file.
*/
struct vm_file *setup_swap_file(unsigned long map_address,
unsigned int flags, unsigned long *f_offset,
struct tcb *t, int pages)
{
struct vm_file *swap_file;
unsigned long shm_f_offset;
BUG_ON(!(flags & VMA_ANON))
BUG_ON(!is_page_aligned(map_address));
/*
* All anon shared memory is kept on a single global swap file. Shm
* addresses are globally the same among processes, so the file mapping
* is a function of shm segment address.
*/
if (flags & VMA_SHARED) {
swap_file = shm_swap_file;
*file = swap_file;
/*
* The file offset is the shm segment's page offset in the shm
* virtual region, which is unique among all processes.
*/
*f_offset = __pfn(map_address) - SHM_AREA_START;
/*
* Extend the file if this shm vaddr lies beyond file end.
*/
if (swap_file->length < __pfn_to_addr(*f_offset + pages))
swap_file->length = __pfn_to_addr(*f_offset + npages);
/* Regular non-shareable anonymous regions */
} else {
swap_file = t->swap_file;
/*
* Anonymous vmas are mapped into process' swap during mmap.
* Copy-on-write vmas are mapped into swap as shadow vmas when
* they get copy-on-write'ed.
*/
*file = swap_file;
BUG_ON(!is_page_aligned(swap_file->length));
/*
* vmas map into the next available swap slot. The whole vma is
* mapped so that it has a contiguous existence on swap file.
* Swap slots are allocated by the per-task swap offset pool.
*/
*f_offset = vaddr_pool_new(task->swap_file_offset_pool, pages);
/* If new offset is greater than current file size, update. */
if (__pfn_to_addr(*f_offset + pages) > swap_file->length)
swap_file->length = __pfn_to_addr(*f_offset + pages);
BUG_ON(swap_file->length > TASK_SWAPFILE_MAXSIZE);
}
printf("Set up swapfile for anon%svma @ pfn offset %d. ",
(flags & VMA_SHARED) ? "/shared " : " ",
"Swap file size: %d bytes.\n", *f_offset, swap_file->length);
return 0;
}