Removed linux linked list dependency.

This commit is contained in:
Bahadir Balban
2009-06-02 13:19:17 +03:00
parent 4757f46f71
commit 276b4643c6
69 changed files with 455 additions and 885 deletions

View File

@@ -10,7 +10,7 @@
* of how mmaped devices would be mapped with a pager.
*/
struct mmap_device {
struct list_head page_list; /* Dyn-allocated page list */
struct link page_list; /* Dyn-allocated page list */
unsigned long pfn_start; /* Physical pfn start */
unsigned long pfn_end; /* Physical pfn end */
};
@@ -27,17 +27,17 @@ struct page *memdev_page_in(struct vm_object *vm_obj,
return PTR_ERR(-1);
/* Simply return the page if found */
list_for_each_entry(page, &memdev->page_list, list)
list_foreach_struct(page, &memdev->page_list, list)
if (page->offset == pfn_offset)
return page;
/* Otherwise allocate one of our own for that offset and return it */
page = kzalloc(sizeof(struct page));
INIT_LIST_HEAD(&page->list);
link_init(&page->list);
spin_lock_init(&page->lock);
page->offset = pfn_offset;
page->owner = vm_obj;
list_add(&page->list, &memdev->page_list);
list_insert(&page->list, &memdev->page_list);
return page;
}

View File

@@ -75,7 +75,7 @@ int do_execve(struct tcb *sender, char *filename, struct args_struct *args,
BUG_ON(!(tgleader = find_task(sender->tgid)));
/* Destroy all children threads. */
list_for_each_entry(thread, &tgleader->children, child_ref)
list_foreach_struct(thread, &tgleader->children, child_ref)
do_exit(thread, 0);
} else {
/* Otherwise group leader is same as sender */

View File

@@ -96,7 +96,7 @@ int execve_recycle_task(struct tcb *new, struct tcb *orig)
/* Copy parent relationship */
BUG_ON(new->parent);
new->parent = orig->parent;
list_add(&new->child_ref, &orig->parent->children);
list_insert(&new->child_ref, &orig->parent->children);
/* Flush all IO on task's files and close fds */
task_close_files(orig);

View File

@@ -56,14 +56,14 @@ unsigned long fault_to_file_offset(struct fault_data *fault)
* Given a reference to link = vma, head = vma, returns link1.
* Given a reference to link = link3, head = vma, returns 0.
*/
struct vm_obj_link *vma_next_link(struct list_head *link,
struct list_head *head)
struct vm_obj_link *vma_next_link(struct link *link,
struct link *head)
{
BUG_ON(list_empty(link));
if (link->next == head)
return 0;
else
return list_entry(link->next, struct vm_obj_link, list);
return link_to_struct(link->next, struct vm_obj_link, list);
}
/* Unlinks orig_link from its vma and deletes it but keeps the object. */
@@ -72,7 +72,7 @@ struct vm_object *vma_drop_link(struct vm_obj_link *link)
struct vm_object *dropped;
/* Remove object link from vma's list */
list_del(&link->list);
list_remove(&link->list);
/* Unlink the link from object */
dropped = vm_unlink_object(link);
@@ -104,7 +104,7 @@ int vm_object_is_subset(struct vm_object *shadow,
* Do a page by page comparison. Every lesser page
* must be in copier for overlap.
*/
list_for_each_entry(pl, &original->page_cache, list)
list_foreach_struct(pl, &original->page_cache, list)
if (!(pc = find_page(shadow, pl->offset)))
return 0;
/*
@@ -160,14 +160,14 @@ int vma_merge_object(struct vm_object *redundant)
BUG_ON(redundant->shadows != 1);
/* Get the last shadower object in front */
front = list_entry(redundant->shdw_list.next,
front = link_to_struct(redundant->shdw_list.next,
struct vm_object, shref);
/* Move all non-intersecting pages to front shadow. */
list_for_each_entry_safe(p1, n, &redundant->page_cache, list) {
list_foreach_removable_struct(p1, n, &redundant->page_cache, list) {
/* Page doesn't exist in front, move it there */
if (!(p2 = find_page(front, p1->offset))) {
list_del_init(&p1->list);
list_remove_init(&p1->list);
spin_lock(&p1->lock);
p1->owner = front;
spin_unlock(&p1->lock);
@@ -179,20 +179,20 @@ int vma_merge_object(struct vm_object *redundant)
/* Sort out shadow relationships after the merge: */
/* Front won't be a shadow of the redundant shadow anymore */
list_del_init(&front->shref);
list_remove_init(&front->shref);
/* Check that there really was one shadower of redundant left */
BUG_ON(!list_empty(&redundant->shdw_list));
/* Redundant won't be a shadow of its next object */
list_del_init(&redundant->shref);
list_remove_init(&redundant->shref);
/* Front is now a shadow of redundant's next object */
list_add(&front->shref, &redundant->orig_obj->shdw_list);
list_insert(&front->shref, &redundant->orig_obj->shdw_list);
front->orig_obj = redundant->orig_obj;
/* Find last link for the object */
last_link = list_entry(redundant->link_list.next,
last_link = link_to_struct(redundant->link_list.next,
struct vm_obj_link, linkref);
/* Drop the last link to the object */
@@ -213,8 +213,8 @@ struct vm_obj_link *vm_objlink_create(void)
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&vmo_link->list);
INIT_LIST_HEAD(&vmo_link->linkref);
link_init(&vmo_link->list);
link_init(&vmo_link->linkref);
return vmo_link;
}
@@ -274,7 +274,7 @@ int vma_copy_links(struct vm_area *new_vma, struct vm_area *vma)
/* Get the first object on the vma */
BUG_ON(list_empty(&vma->vm_obj_list));
vmo_link = list_entry(vma->vm_obj_list.next,
vmo_link = link_to_struct(vma->vm_obj_list.next,
struct vm_obj_link, list);
do {
/* Create a new link */
@@ -284,7 +284,7 @@ int vma_copy_links(struct vm_area *new_vma, struct vm_area *vma)
vm_link_object(new_link, vmo_link->obj);
/* Add the new link to vma in object order */
list_add_tail(&new_link->list, &new_vma->vm_obj_list);
list_insert_tail(&new_link->list, &new_vma->vm_obj_list);
/* Continue traversing links, doing the same copying */
} while((vmo_link = vma_next_link(&vmo_link->list,
@@ -361,10 +361,10 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* Get previous and next links, if they exist */
prev = (link->list.prev == &vma->vm_obj_list) ? 0 :
list_entry(link->list.prev, struct vm_obj_link, list);
link_to_struct(link->list.prev, struct vm_obj_link, list);
next = (link->list.next == &vma->vm_obj_list) ? 0 :
list_entry(link->list.next, struct vm_obj_link, list);
link_to_struct(link->list.next, struct vm_obj_link, list);
/* Drop the link */
obj = vma_drop_link(link);
@@ -378,7 +378,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* Remove prev from current object's shadow list */
BUG_ON(list_empty(&prev->obj->shref));
list_del_init(&prev->obj->shref);
list_remove_init(&prev->obj->shref);
/*
* We don't allow dropping non-shadow objects yet,
@@ -387,7 +387,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
BUG_ON(!next);
/* prev is now shadow of next */
list_add(&prev->obj->shref,
list_insert(&prev->obj->shref,
&next->obj->shdw_list);
prev->obj->orig_obj = next->obj;
@@ -397,7 +397,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
*/
if (obj->nlinks == 0) {
BUG_ON(obj->orig_obj != next->obj);
list_del_init(&obj->shref);
list_remove_init(&obj->shref);
} else {
/*
* Dropped object still has referrers, which
@@ -421,7 +421,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
BUG_ON(obj->orig_obj != next->obj);
BUG_ON(--next->obj->shadows < 0);
// vm_object_print(next->obj);
list_del_init(&obj->shref);
list_remove_init(&obj->shref);
}
}
}
@@ -475,7 +475,7 @@ int vma_drop_merge_delete_all(struct vm_area *vma)
BUG_ON(list_empty(&vma->vm_obj_list));
/* Traverse and get rid of all links */
list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list)
list_foreach_removable_struct(vmo_link, n, &vma->vm_obj_list, list)
vma_drop_merge_delete(vma, vmo_link);
return 0;
@@ -541,10 +541,10 @@ struct page *copy_on_write(struct fault_data *fault)
* v v
* shadow original
*/
list_add(&shadow_link->list, &vma->vm_obj_list);
list_insert(&shadow_link->list, &vma->vm_obj_list);
/* Add object to original's shadower list */
list_add(&shadow->shref, &shadow->orig_obj->shdw_list);
list_insert(&shadow->shref, &shadow->orig_obj->shdw_list);
/* Add to global object list */
global_add_vm_object(shadow);
@@ -758,7 +758,7 @@ int vm_freeze_shadows(struct tcb *task)
struct vm_object *vmo;
struct page *p;
list_for_each_entry(vma, &task->vm_area_head->list, list) {
list_foreach_struct(vma, &task->vm_area_head->list, list) {
/* Shared vmas don't have shadows */
if (vma->flags & VMA_SHARED)
@@ -766,7 +766,7 @@ int vm_freeze_shadows(struct tcb *task)
/* Get the first object */
BUG_ON(list_empty(&vma->vm_obj_list));
vmo_link = list_entry(vma->vm_obj_list.next,
vmo_link = link_to_struct(vma->vm_obj_list.next,
struct vm_obj_link, list);
vmo = vmo_link->obj;
@@ -789,7 +789,7 @@ int vm_freeze_shadows(struct tcb *task)
* Make all pages on it read-only
* in the page tables.
*/
list_for_each_entry(p, &vmo->page_cache, list) {
list_foreach_struct(p, &vmo->page_cache, list) {
/* Find virtual address of each page */
virtual = vma_page_to_virtual(vma, p);

View File

@@ -190,7 +190,7 @@ struct vm_file *do_open2(struct tcb *task, int fd, unsigned long vnum, unsigned
}
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &global_vm_files.list, list) {
list_foreach_struct(vmfile, &global_vm_files.list, list) {
/* Check whether it is a vfs file and if so vnums match. */
if ((vmfile->type & VM_FILE_VFS) &&
@@ -240,7 +240,7 @@ int do_open(struct tcb *task, int fd, unsigned long vnum, unsigned long length)
task->files->fd[fd].cursor = 0;
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &global_vm_files.list, list) {
list_foreach_struct(vmfile, &global_vm_files.list, list) {
/* Check whether it is a vfs file and if so vnums match. */
if ((vmfile->type & VM_FILE_VFS) &&
@@ -301,22 +301,22 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* Add if list is empty */
if (list_empty(&vmo->page_cache)) {
list_add_tail(&this->list, &vmo->page_cache);
list_insert_tail(&this->list, &vmo->page_cache);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, &vmo->page_cache, list) {
after = list_entry(before->list.next, struct page, list);
list_foreach_struct(before, &vmo->page_cache, list) {
after = link_to_struct(before->list.next, struct page, list);
/* If there's only one in list */
if (before->list.next == &vmo->page_cache) {
/* Add as next if greater */
if (this->offset > before->offset)
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->offset < before->offset)
list_add_tail(&this->list, &before->list);
list_insert_tail(&this->list, &before->list);
else
BUG();
return 0;
@@ -325,7 +325,7 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* If this page is in-between two other, insert it there */
if (before->offset < this->offset &&
after->offset > this->offset) {
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
return 0;
}
BUG_ON(this->offset == before->offset);
@@ -603,7 +603,7 @@ int write_cache_pages_orig(struct vm_file *vmfile, struct tcb *task, void *buf,
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list)
if (head->offset == pfn_start)
goto copy;
@@ -627,7 +627,7 @@ copy:
last_pgoff = head->offset;
/* Map the rest, copy and unmap. */
list_for_each_entry(this, &head->list, list) {
list_foreach_struct(this, &head->list, list) {
if (left == 0 || this->offset == pfn_end)
break;
@@ -666,7 +666,7 @@ int write_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list) {
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list) {
/* First page */
if (head->offset == pfn_start) {
left = count;
@@ -726,7 +726,7 @@ int read_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
unsigned long copy_offset; /* Current copy offset on the buffer */
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list)
if (head->offset == pfn_start)
goto copy;
@@ -745,7 +745,7 @@ copy:
last_pgoff = head->offset;
/* Map the rest, copy and unmap. */
list_for_each_entry(this, &head->list, list) {
list_foreach_struct(this, &head->list, list) {
if (left == 0 || this->offset == pfn_end)
break;

View File

@@ -21,7 +21,7 @@
#include <utcb.h>
/* A separate list than the generic file list that keeps just the boot files */
LIST_HEAD(boot_file_list);
LINK_DECLARE(boot_file_list);
/*
* A specialised function for setting up the task environment of mm0.
@@ -53,7 +53,7 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
return err;
/* Set pager as child and parent of itself */
list_add(&task->child_ref, &task->children);
list_insert(&task->child_ref, &task->children);
task->parent = task;
/*
@@ -78,9 +78,9 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
struct vm_file *initdata_next_bootfile(struct initdata *initdata)
{
struct vm_file *file, *n;
list_for_each_entry_safe(file, n, &initdata->boot_file_list,
list_foreach_removable_struct(file, n, &initdata->boot_file_list,
list) {
list_del_init(&file->list);
list_remove_init(&file->list);
return file;
}
return 0;
@@ -96,10 +96,10 @@ int start_boot_tasks(struct initdata *initdata)
struct tcb *fs0_task;
struct svc_image *img;
struct task_ids ids;
struct list_head other_files;
struct link other_files;
int total = 0;
INIT_LIST_HEAD(&other_files);
link_init(&other_files);
/* Separate out special server tasks and regular files */
do {
@@ -113,7 +113,7 @@ int start_boot_tasks(struct initdata *initdata)
else if (!strcmp(img->name, __VFSNAME__))
fs0_file = file;
else
list_add(&file->list, &other_files);
list_insert(&file->list, &other_files);
} else
break;
} while (1);
@@ -138,12 +138,12 @@ int start_boot_tasks(struct initdata *initdata)
total++;
/* Initialise other tasks */
list_for_each_entry_safe(file, n, &other_files, list) {
list_foreach_removable_struct(file, n, &other_files, list) {
// printf("%s: Initialising new boot task.\n", __TASKNAME__);
ids.tid = TASK_ID_INVALID;
ids.spid = TASK_ID_INVALID;
ids.tgid = TASK_ID_INVALID;
list_del_init(&file->list);
list_remove_init(&file->list);
BUG_ON(IS_ERR(boottask_exec(file, USER_AREA_START, USER_AREA_END, &ids)));
total++;
}

View File

@@ -79,7 +79,7 @@ void init_physmem(struct initdata *initdata, struct membank *membank)
/* Initialise the page array */
for (int i = 0; i < npages; i++) {
INIT_LIST_HEAD(&membank[0].page_array[i].list);
link_init(&membank[0].page_array[i].list);
/* Set use counts for pages the kernel has already used up */
if (!(pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))

View File

@@ -28,8 +28,8 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
if (!(vma = kzalloc(sizeof(struct vm_area))))
return 0;
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->vm_obj_list);
link_init(&vma->list);
link_init(&vma->vm_obj_list);
vma->pfn_start = pfn_start;
vma->pfn_end = pfn_start + npages;
@@ -45,19 +45,19 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
* The new vma is assumed to have been correctly set up not to intersect
* with any other existing vma.
*/
int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
int task_insert_vma(struct vm_area *this, struct link *vma_list)
{
struct vm_area *before, *after;
/* Add if list is empty */
if (list_empty(vma_list)) {
list_add_tail(&this->list, vma_list);
list_insert_tail(&this->list, vma_list);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, vma_list, list) {
after = list_entry(before->list.next, struct vm_area, list);
list_foreach_struct(before, vma_list, list) {
after = link_to_struct(before->list.next, struct vm_area, list);
/* If there's only one in list */
if (before->list.next == vma_list) {
@@ -69,10 +69,10 @@ int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
/* Add as next if greater */
if (this->pfn_start > before->pfn_start)
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->pfn_start < before->pfn_start)
list_add_tail(&this->list, &before->list);
list_insert_tail(&this->list, &before->list);
else
BUG();
@@ -90,7 +90,7 @@ int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
BUG_ON(set_intersection(this->pfn_start, this->pfn_end,
after->pfn_start,
after->pfn_end));
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
return 0;
}
@@ -122,7 +122,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
return task->start;
/* First vma to check our range against */
vma = list_entry(task->vm_area_head->list.next, struct vm_area, list);
vma = link_to_struct(task->vm_area_head->list.next, struct vm_area, list);
/* Start searching from task's end of data to start of stack */
while (pfn_end <= __pfn(task->end)) {
@@ -147,7 +147,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
}
/* Otherwise get next vma entry */
vma = list_entry(vma->list.next,
vma = link_to_struct(vma->list.next,
struct vm_area, list);
continue;
}
@@ -282,7 +282,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
vm_link_object(vmo_link, &mapfile->vm_obj);
/* Add link to vma list */
list_add_tail(&vmo_link->list, &new->vm_obj_list);
list_insert_tail(&vmo_link->list, &new->vm_obj_list);
/*
* If the file is a shm file, also map devzero behind it. i.e.
@@ -304,7 +304,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
return PTR_ERR(-ENOMEM);
}
vm_link_object(vmo_link2, &dzero->vm_obj);
list_add_tail(&vmo_link2->list, &new->vm_obj_list);
list_insert_tail(&vmo_link2->list, &new->vm_obj_list);
}
/* Finished initialising the vma, add it to task */

View File

@@ -43,7 +43,7 @@ int vma_split(struct vm_area *vma, struct tcb *task,
vma_copy_links(new, vma);
/* Add new one next to original vma */
list_add_tail(&new->list, &vma->list);
list_insert_tail(&new->list, &vma->list);
/* Unmap the removed portion */
BUG_ON(l4_unmap((void *)__pfn_to_addr(unmap_start),
@@ -102,7 +102,7 @@ int vma_destroy_single(struct tcb *task, struct vm_area *vma)
vma->pfn_end - vma->pfn_start, task->tid);
/* Unlink and delete vma */
list_del(&vma->list);
list_remove(&vma->list);
kfree(vma);
return 0;
@@ -149,7 +149,7 @@ int vma_flush_pages(struct vm_area *vma)
* could only be a single VM_SHARED file-backed object in the chain.
*/
BUG_ON(list_empty(&vma->list));
vmo_link = list_entry(vma->vm_obj_list.next, struct vm_obj_link, list);
vmo_link = link_to_struct(vma->vm_obj_list.next, struct vm_obj_link, list);
vmo = vmo_link->obj;
/* Only dirty objects would need flushing */
@@ -187,7 +187,7 @@ int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages)
struct vm_area *vma, *n;
int err;
list_for_each_entry_safe(vma, n, &task->vm_area_head->list, list) {
list_foreach_removable_struct(vma, n, &task->vm_area_head->list, list) {
/* Check for intersection */
if (set_intersection(munmap_start, munmap_end,
vma->pfn_start, vma->pfn_end)) {

View File

@@ -21,7 +21,7 @@ struct page *page_init(struct page *page)
memset(page, 0, sizeof(*page));
page->refcnt = -1;
spin_lock_init(&page->lock);
INIT_LIST_HEAD(&page->list);
link_init(&page->list);
return page;
@@ -30,7 +30,7 @@ struct page *find_page(struct vm_object *obj, unsigned long pfn)
{
struct page *p;
list_for_each_entry(p, &obj->page_cache, list)
list_foreach_struct(p, &obj->page_cache, list)
if (p->offset == pfn)
return p;
@@ -46,8 +46,8 @@ int default_release_pages(struct vm_object *vm_obj)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
list_del_init(&p->list);
list_foreach_removable_struct(p, n, &vm_obj->page_cache, list) {
list_remove_init(&p->list);
BUG_ON(p->refcnt);
/* Reinitialise the page */
@@ -219,8 +219,8 @@ int bootfile_release_pages(struct vm_object *vm_obj)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
list_del(&p->list);
list_foreach_removable_struct(p, n, &vm_obj->page_cache, list) {
list_remove(&p->list);
BUG_ON(p->refcnt);
/* Reinitialise the page */
@@ -295,7 +295,7 @@ int init_boot_files(struct initdata *initdata)
struct vm_file *boot_file;
struct svc_image *img;
INIT_LIST_HEAD(&initdata->boot_file_list);
link_init(&initdata->boot_file_list);
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
@@ -311,7 +311,7 @@ int init_boot_files(struct initdata *initdata)
boot_file->vm_obj.pager = &bootfile_pager;
/* Add the file to initdata's bootfile list */
list_add_tail(&boot_file->list, &initdata->boot_file_list);
list_insert_tail(&boot_file->list, &initdata->boot_file_list);
}
return 0;
@@ -345,7 +345,7 @@ struct vm_file *get_devzero(void)
{
struct vm_file *f;
list_for_each_entry(f, &global_vm_files.list, list)
list_foreach_struct(f, &global_vm_files.list, list)
if (f->type == VM_FILE_DEVZERO)
return f;
return 0;

View File

@@ -130,7 +130,7 @@ void *sys_shmat(struct tcb *task, l4id_t shmid, void *shmaddr, int shmflg)
{
struct vm_file *shm_file, *n;
list_for_each_entry_safe(shm_file, n, &global_vm_files.list, list) {
list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list) {
if (shm_file->type == VM_FILE_SHM &&
shm_file_to_desc(shm_file)->shmid == shmid)
return do_shmat(shm_file, shmaddr,
@@ -156,7 +156,7 @@ int sys_shmdt(struct tcb *task, const void *shmaddr)
{
struct vm_file *shm_file, *n;
list_for_each_entry_safe(shm_file, n, &global_vm_files.list, list)
list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list)
if (shm_file->type == VM_FILE_SHM &&
shm_file_to_desc(shm_file)->shm_addr == shmaddr)
return do_shmdt(task, shm_file);
@@ -235,7 +235,7 @@ void *shmat_shmget_internal(struct tcb *task, key_t key, void *shmaddr)
struct vm_file *shm_file;
struct shm_descriptor *shm_desc;
list_for_each_entry(shm_file, &global_vm_files.list, list) {
list_foreach_struct(shm_file, &global_vm_files.list, list) {
if(shm_file->type == VM_FILE_SHM) {
shm_desc = shm_file_to_desc(shm_file);
/* Found the key, shmat that area */
@@ -274,7 +274,7 @@ int sys_shmget(key_t key, int size, int shmflg)
return shm_file_to_desc(shm)->shmid;
}
list_for_each_entry(shm, &global_vm_files.list, list) {
list_foreach_struct(shm, &global_vm_files.list, list) {
if (shm->type != VM_FILE_SHM)
continue;

View File

@@ -45,7 +45,7 @@ void print_tasks(void)
{
struct tcb *task;
printf("Tasks:\n========\n");
list_for_each_entry(task, &global_tasks.list, list) {
list_foreach_struct(task, &global_tasks.list, list) {
printf("Task tid: %d, spid: %d\n", task->tid, task->spid);
}
}
@@ -53,14 +53,14 @@ void print_tasks(void)
void global_add_task(struct tcb *task)
{
BUG_ON(!list_empty(&task->list));
list_add_tail(&task->list, &global_tasks.list);
list_insert_tail(&task->list, &global_tasks.list);
global_tasks.total++;
}
void global_remove_task(struct tcb *task)
{
BUG_ON(list_empty(&task->list));
list_del_init(&task->list);
list_remove_init(&task->list);
BUG_ON(--global_tasks.total < 0);
}
@@ -68,7 +68,7 @@ struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &global_tasks.list, list)
list_foreach_struct(t, &global_tasks.list, list)
if (t->tid == tid)
return t;
return 0;
@@ -89,7 +89,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
return PTR_ERR(-ENOMEM);
}
task->vm_area_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->vm_area_head->list);
link_init(&task->vm_area_head->list);
/* Also allocate a utcb head for new address space */
if (!(task->utcb_head =
@@ -99,7 +99,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
return PTR_ERR(-ENOMEM);
}
task->utcb_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->utcb_head->list);
link_init(&task->utcb_head->list);
}
/* Allocate file structures if not shared */
@@ -120,9 +120,9 @@ struct tcb *tcb_alloc_init(unsigned int flags)
task->tgid = TASK_ID_INVALID;
/* Initialise list structure */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->child_ref);
INIT_LIST_HEAD(&task->children);
link_init(&task->list);
link_init(&task->child_ref);
link_init(&task->children);
return task;
}
@@ -180,15 +180,15 @@ int tcb_destroy(struct tcb *task)
* All children of the current task becomes children
* of the parent of this task.
*/
list_for_each_entry_safe(child, n, &task->children,
list_foreach_removable_struct(child, n, &task->children,
child_ref) {
list_del_init(&child->child_ref);
list_add_tail(&child->child_ref,
list_remove_init(&child->child_ref);
list_insert_tail(&child->child_ref,
&task->parent->children);
child->parent = task->parent;
}
/* The task is not a child of its parent */
list_del_init(&task->child_ref);
list_remove_init(&task->child_ref);
/* Now task deletion make sure task is in no list */
BUG_ON(!list_empty(&task->list));
@@ -209,7 +209,7 @@ int task_copy_vmas(struct tcb *to, struct tcb *from)
{
struct vm_area *vma, *new_vma;
list_for_each_entry(vma, &from->vm_area_head->list, list) {
list_foreach_struct(vma, &from->vm_area_head->list, list) {
/* Create a new vma */
new_vma = vma_new(vma->pfn_start, vma->pfn_end - vma->pfn_start,
@@ -233,12 +233,12 @@ int task_release_vmas(struct task_vma_head *vma_head)
{
struct vm_area *vma, *n;
list_for_each_entry_safe(vma, n, &vma_head->list, list) {
list_foreach_removable_struct(vma, n, &vma_head->list, list) {
/* Release all links */
vma_drop_merge_delete_all(vma);
/* Delete the vma from task's vma list */
list_del(&vma->list);
list_remove(&vma->list);
/* Free the vma */
kfree(vma);
@@ -358,11 +358,11 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
* On these conditions child shares
* the parent of the caller
*/
list_add_tail(&task->child_ref,
list_insert_tail(&task->child_ref,
&parent->parent->children);
task->parent = parent->parent;
} else {
list_add_tail(&task->child_ref,
list_insert_tail(&task->child_ref,
&parent->children);
task->parent = parent;
}
@@ -370,7 +370,7 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
struct tcb *pager = find_task(PAGER_TID);
/* All parentless tasks are children of the pager */
list_add_tail(&task->child_ref, &pager->children);
list_insert_tail(&task->child_ref, &pager->children);
task->parent = pager;
}
@@ -674,7 +674,7 @@ int vfs_send_task_data(struct tcb *vfs)
tdata_head->total = global_tasks.total;
/* Write per-task data for all tasks */
list_for_each_entry(t, &global_tasks.list, list) {
list_foreach_struct(t, &global_tasks.list, list) {
tdata_head->tdata[li].tid = t->tid;
tdata_head->tdata[li].shpage_address = (unsigned long)t->shared_page;
li++;
@@ -697,7 +697,7 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f)
{
struct vm_area *vma;
list_for_each_entry(vma, &task->vm_area_head->list, list) {
list_foreach_struct(vma, &task->vm_area_head->list, list) {
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
VM_READ | VM_WRITE) < 0);

View File

@@ -30,7 +30,7 @@ int vm_object_test_link_count(struct vm_object *vmo)
int links = 0;
struct vm_obj_link *l;
list_for_each_entry(l, &vmo->link_list, linkref)
list_foreach_struct(l, &vmo->link_list, linkref)
links++;
BUG_ON(links != vmo->nlinks);
@@ -42,7 +42,7 @@ int vm_object_test_shadow_count(struct vm_object *vmo)
struct vm_object *sh;
int shadows = 0;
list_for_each_entry(sh, &vmo->shdw_list, shref)
list_foreach_struct(sh, &vmo->shdw_list, shref)
shadows++;
BUG_ON(shadows != vmo->shadows);
@@ -64,7 +64,7 @@ int mm0_test_global_vm_integrity(void)
memset(&vmstat, 0, sizeof(vmstat));
/* Count all shadow and file objects */
list_for_each_entry(vmo, &global_vm_objects.list, list) {
list_foreach_struct(vmo, &global_vm_objects.list, list) {
vmstat.shadows_referred += vmo->shadows;
if (vmo->flags & VM_OBJ_SHADOW)
vmstat.shadow_objects++;
@@ -76,7 +76,7 @@ int mm0_test_global_vm_integrity(void)
}
/* Count all registered vmfiles */
list_for_each_entry(f, &global_vm_files.list, list) {
list_foreach_struct(f, &global_vm_files.list, list) {
vmstat.vm_files++;
if (f->type == VM_FILE_SHM)
vmstat.shm_files++;
@@ -116,7 +116,7 @@ int mm0_test_global_vm_integrity(void)
BUG_ON(vmstat.shadow_objects != vmstat.shadows_referred);
/* Count all tasks */
list_for_each_entry(task, &global_tasks.list, list)
list_foreach_struct(task, &global_tasks.list, list)
vmstat.tasks++;
if (vmstat.tasks != global_tasks.total) {

View File

@@ -67,7 +67,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
if (!(d = kzalloc(sizeof(*d))))
return 0;
INIT_LIST_HEAD(&d->list);
link_init(&d->list);
/* We currently assume UTCB is smaller than PAGE_SIZE */
BUG_ON(UTCB_SIZE > PAGE_SIZE);
@@ -80,7 +80,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
d->utcb_base = (unsigned long)utcb_new_address(1);
/* Add descriptor to tcb's chain */
list_add(&d->list, &task->utcb_head->list);
list_insert(&d->list, &task->utcb_head->list);
/* Obtain and return first slot */
return utcb_new_slot(d);
@@ -89,7 +89,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
int task_delete_utcb_desc(struct tcb *task, struct utcb_desc *d)
{
/* Unlink desc from its list */
list_del_init(&d->list);
list_remove_init(&d->list);
/* Unmap the descriptor region */
do_munmap(task, d->utcb_base, 1);
@@ -104,7 +104,7 @@ int task_delete_utcb_desc(struct tcb *task, struct utcb_desc *d)
}
/*
* Upon fork, the utcb descriptor list is replaced by a new one, since it is a new
* Upon fork, the utcb descriptor list is origaced by a new one, since it is a new
* address space. A new utcb is allocated and mmap'ed for the child task
* running in the newly created address space.
*
@@ -126,7 +126,7 @@ int task_setup_utcb(struct tcb *task)
BUG_ON(task->utcb_address);
/* Search for an empty utcb slot already allocated to this space */
list_for_each_entry(udesc, &task->utcb_head->list, list)
list_foreach_struct(udesc, &task->utcb_head->list, list)
if ((slot = utcb_new_slot(udesc)))
goto out;
@@ -163,7 +163,7 @@ int task_destroy_utcb(struct tcb *task)
// printf("UTCB: Destroying 0x%x\n", task->utcb_address);
/* Find the utcb descriptor slot first */
list_for_each_entry(udesc, &task->utcb_head->list, list) {
list_foreach_struct(udesc, &task->utcb_head->list, list) {
/* FIXME: Use variable alignment than a page */
/* Detect matching slot */
if (page_align(task->utcb_address) == udesc->utcb_base) {

View File

@@ -26,21 +26,21 @@ struct global_list global_vm_objects = {
void global_add_vm_object(struct vm_object *obj)
{
BUG_ON(!list_empty(&obj->list));
list_add(&obj->list, &global_vm_objects.list);
list_insert(&obj->list, &global_vm_objects.list);
global_vm_objects.total++;
}
void global_remove_vm_object(struct vm_object *obj)
{
BUG_ON(list_empty(&obj->list));
list_del_init(&obj->list);
list_remove_init(&obj->list);
BUG_ON(--global_vm_objects.total < 0);
}
void global_add_vm_file(struct vm_file *f)
{
BUG_ON(!list_empty(&f->list));
list_add(&f->list, &global_vm_files.list);
list_insert(&f->list, &global_vm_files.list);
global_vm_files.total++;
global_add_vm_object(&f->vm_obj);
@@ -49,7 +49,7 @@ void global_add_vm_file(struct vm_file *f)
void global_remove_vm_file(struct vm_file *f)
{
BUG_ON(list_empty(&f->list));
list_del_init(&f->list);
list_remove_init(&f->list);
BUG_ON(--global_vm_files.total < 0);
global_remove_vm_object(&f->vm_obj);
@@ -62,7 +62,7 @@ void print_cache_pages(struct vm_object *vmo)
if (!list_empty(&vmo->page_cache))
printf("Pages:\n======\n");
list_for_each_entry(p, &vmo->page_cache, list) {
list_foreach_struct(p, &vmo->page_cache, list) {
dprintf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
p->virtual, p->refcnt);
}
@@ -97,29 +97,29 @@ void vm_object_print(struct vm_object *vmo)
// printf("\n");
}
void vm_print_files(struct list_head *files)
void vm_print_files(struct link *files)
{
struct vm_file *f;
list_for_each_entry(f, files, list)
list_foreach_struct(f, files, list)
vm_object_print(&f->vm_obj);
}
void vm_print_objects(struct list_head *objects)
void vm_print_objects(struct link *objects)
{
struct vm_object *vmo;
list_for_each_entry(vmo, objects, list)
list_foreach_struct(vmo, objects, list)
vm_object_print(vmo);
}
struct vm_object *vm_object_init(struct vm_object *obj)
{
INIT_LIST_HEAD(&obj->list);
INIT_LIST_HEAD(&obj->shref);
INIT_LIST_HEAD(&obj->shdw_list);
INIT_LIST_HEAD(&obj->page_cache);
INIT_LIST_HEAD(&obj->link_list);
link_init(&obj->list);
link_init(&obj->shref);
link_init(&obj->shdw_list);
link_init(&obj->page_cache);
link_init(&obj->link_list);
return obj;
}
@@ -142,7 +142,7 @@ struct vm_file *vm_file_create(void)
if (!(f = kzalloc(sizeof(*f))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&f->list);
link_init(&f->list);
vm_object_init(&f->vm_obj);
f->vm_obj.flags = VM_OBJ_FILE;