Removed linux linked list dependency.

This commit is contained in:
Bahadir Balban
2009-06-02 13:19:17 +03:00
parent 4757f46f71
commit 276b4643c6
69 changed files with 455 additions and 885 deletions

View File

@@ -85,10 +85,10 @@ struct dentry {
int refcnt;
char name[VFS_DNAME_MAX];
struct dentry *parent; /* Parent dentry */
struct list_head child; /* List of dentries with same parent */
struct list_head children; /* List of children dentries */
struct list_head vref; /* For vnode's dirent reference list */
struct list_head cache_list; /* Dentry cache reference */
struct link child; /* List of dentries with same parent */
struct link children; /* List of children dentries */
struct link vref; /* For vnode's dirent reference list */
struct link cache_list; /* Dentry cache reference */
struct vnode *vnode; /* The vnode associated with dentry */
struct dentry_ops ops;
};
@@ -120,8 +120,8 @@ struct vnode {
struct superblock *sb; /* Reference to superblock */
struct vnode_ops ops; /* Operations on this vnode */
struct file_ops fops; /* File-related operations on this vnode */
struct list_head dentries; /* Dirents that refer to this vnode */
struct list_head cache_list; /* For adding the vnode to vnode cache */
struct link dentries; /* Dirents that refer to this vnode */
struct link cache_list; /* For adding the vnode to vnode cache */
struct dirbuf dirbuf; /* Only directory buffers are kept */
u32 mode; /* Permissions and vnode type */
u32 owner; /* Owner */
@@ -149,15 +149,15 @@ struct file_system_type {
char name[VFS_FSNAME_MAX];
unsigned long magic;
struct fstype_ops ops;
struct list_head list; /* Member of list of all fs types */
struct list_head sblist; /* List of superblocks with this type */
struct link list; /* Member of list of all fs types */
struct link sblist; /* List of superblocks with this type */
};
struct superblock *get_superblock(void *buf);
struct superblock {
u64 fssize;
unsigned int blocksize;
struct list_head list;
struct link list;
struct file_system_type *fs;
struct superblock_ops *ops;
struct vnode *root;

View File

@@ -3,7 +3,7 @@
struct global_list {
int total;
struct list_head list;
struct link list;
};
extern struct global_list global_vm_files;

View File

@@ -66,8 +66,8 @@ struct memfs_superblock {
u64 fmaxblocks; /* Maximum number of blocks per file */
u64 fssize; /* Total size of filesystem */
unsigned long root_vnum; /* The root vnum of this superblock */
struct list_head inode_cache_list; /* Chain of alloc caches */
struct list_head block_cache_list; /* Chain of alloc caches */
struct link inode_cache_list; /* Chain of alloc caches */
struct link block_cache_list; /* Chain of alloc caches */
struct id_pool *ipool; /* Index pool for inodes */
struct id_pool *bpool; /* Index pool for blocks */
struct memfs_inode *inode[MEMFS_TOTAL_INODES]; /* Table of inodes */
@@ -88,7 +88,7 @@ extern struct file_ops memfs_file_operations;
int memfs_format_filesystem(void *buffer);
struct memfs_inode *memfs_create_inode(struct memfs_superblock *sb);
void memfs_register_fstype(struct list_head *);
void memfs_register_fstype(struct link *);
struct superblock *memfs_get_superblock(void *block);
int memfs_generate_superblock(void *block);

View File

@@ -21,12 +21,12 @@
#define VFS_STR_XATDIR "...."
struct pathdata {
struct list_head list;
struct link list;
struct vnode *vstart;
};
struct pathcomp {
struct list_head list;
struct link list;
const char *str;
};

View File

@@ -32,7 +32,7 @@ struct task_fs_data {
/* Thread control block, fs0 portion */
struct tcb {
l4id_t tid;
struct list_head list;
struct link list;
unsigned long shpage_address;
struct task_fd_head *files;
struct task_fs_data *fs_data;

View File

@@ -10,11 +10,11 @@
#include <task.h>
#include <path.h>
extern struct list_head vnode_cache;
extern struct list_head dentry_cache;
extern struct link vnode_cache;
extern struct link dentry_cache;
/*
* This is a temporary replacement for page cache support provided by mm0.
* This is a temporary origacement for page cache support provided by mm0.
* Normally mm0 tracks all vnode pages, but this is used to track pages in
* directory vnodes, which are normally never mapped by tasks.
*/
@@ -36,10 +36,10 @@ static inline struct dentry *vfs_alloc_dentry(void)
{
struct dentry *d = kzalloc(sizeof(struct dentry));
INIT_LIST_HEAD(&d->child);
INIT_LIST_HEAD(&d->children);
INIT_LIST_HEAD(&d->vref);
INIT_LIST_HEAD(&d->cache_list);
link_init(&d->child);
link_init(&d->children);
link_init(&d->vref);
link_init(&d->cache_list);
return d;
}
@@ -53,8 +53,8 @@ static inline struct vnode *vfs_alloc_vnode(void)
{
struct vnode *v = kzalloc(sizeof(struct vnode));
INIT_LIST_HEAD(&v->dentries);
INIT_LIST_HEAD(&v->cache_list);
link_init(&v->dentries);
link_init(&v->cache_list);
return v;
}
@@ -62,14 +62,14 @@ static inline struct vnode *vfs_alloc_vnode(void)
static inline void vfs_free_vnode(struct vnode *v)
{
BUG(); /* Are the dentries freed ??? */
list_del(&v->cache_list);
list_remove(&v->cache_list);
kfree(v);
}
static inline struct superblock *vfs_alloc_superblock(void)
{
struct superblock *sb = kmalloc(sizeof(struct superblock));
INIT_LIST_HEAD(&sb->list);
link_init(&sb->list);
return sb;
}

View File

@@ -78,7 +78,7 @@ void handle_fs_requests(void)
switch(tag) {
case L4_IPC_TAG_SYNC:
printf("%s: Synced with waiting thread.\n", __TASKNAME__);
return; /* No reply for this tag */
return; /* No origy for this tag */
case L4_IPC_TAG_OPEN:
ret = sys_open(sender, (void *)mr[0], (int)mr[1], (unsigned int)mr[2]);
break;

View File

@@ -12,7 +12,7 @@ struct dentry *bootfs_dentry_lookup(struct dentry *d, char *dname)
{
struct dentry *this;
list_for_each_entry(this, child, &d->children) {
list_foreach_struct(this, child, &d->children) {
if (this->compare(this, dname))
return this;
}
@@ -65,16 +65,16 @@ void bootfs_populate(struct initdata *initdata, struct superblock *sb)
d->vnode = v;
d->parent = sb->root;
strncpy(d->name, img->name, VFS_DENTRY_NAME_MAX);
INIT_LIST_HEAD(&d->child);
INIT_LIST_HEAD(&d->children);
list_add(&d->child, &sb->root->children);
link_init(&d->child);
link_init(&d->children);
list_insert(&d->child, &sb->root->children);
/* Initialise vnode for image */
v->refcnt = 0;
v->id = img->phys_start;
v->size = img->phys_end - img->phys_start;
INIT_LIST_HEAD(&v->dirents);
list_add(&d->v_ref, &v->dirents);
link_init(&v->dirents);
list_insert(&d->v_ref, &v->dirents);
/* Initialise file struct for image */
f->refcnt = 0;
@@ -93,17 +93,17 @@ void bootfs_init_root(struct dentry *r)
/* Initialise dentry for rootdir */
r->refcnt = 0;
strcpy(r->name, "");
INIT_LIST_HEAD(&r->child);
INIT_LIST_HEAD(&r->children);
INIT_LIST_HEAD(&r->vref);
link_init(&r->child);
link_init(&r->children);
link_init(&r->vref);
r->parent = r;
/* Initialise vnode for rootdir */
v->id = 0;
v->refcnt = 0;
INIT_LIST_HEAD(&v->dirents);
INIT_LIST_HEAD(&v->state_list);
list_add(&r->vref, &v->dirents);
link_init(&v->dirents);
link_init(&v->state_list);
list_insert(&r->vref, &v->dirents);
v->size = 0;
}

View File

@@ -23,7 +23,7 @@ struct file_system_type sfs_type = {
};
/* Registers sfs as an available filesystem type */
void sfs_register_fstype(struct list_head *fslist)
void sfs_register_fstype(struct link *fslist)
{
list_add(&sfs_type.list, fslist);
list_insert(&sfs_type.list, fslist);
}

View File

@@ -108,6 +108,6 @@ struct sfs_dentry {
} __attribute__ ((__packed__));
void sfs_register_type(struct list_head *);
void sfs_register_type(struct link *);
#endif /* __C0FS_LAYOUT_H__ */

View File

@@ -14,21 +14,21 @@
#include <l4/api/errno.h>
#include <memfs/memfs.h>
struct list_head fs_type_list;
struct link fs_type_list;
struct superblock *vfs_probe_filesystems(void *block)
{
struct file_system_type *fstype;
struct superblock *sb;
list_for_each_entry(fstype, &fs_type_list, list) {
list_foreach_struct(fstype, &fs_type_list, list) {
/* Does the superblock match for this fs type? */
if ((sb = fstype->ops.get_superblock(block))) {
/*
* Add this to the list of superblocks this
* fs already has.
*/
list_add(&sb->list, &fstype->sblist);
list_insert(&sb->list, &fstype->sblist);
return sb;
}
}
@@ -43,7 +43,7 @@ struct superblock *vfs_probe_filesystems(void *block)
void vfs_register_filesystems(void)
{
/* Initialise fstype list */
INIT_LIST_HEAD(&fs_type_list);
link_init(&fs_type_list);
/* Call per-fs registration functions */
memfs_register_fstype(&fs_type_list);

View File

@@ -22,7 +22,7 @@ struct vnode *lookup_dentry_children(struct dentry *parentdir,
struct vnode *v;
const char *component = pathdata_next_component(pdata);
list_for_each_entry(childdir, &parentdir->children, child)
list_foreach_struct(childdir, &parentdir->children, child)
if (IS_ERR(v = childdir->vnode->ops.lookup(childdir->vnode,
pdata, component)))
/* Means not found, continue search */
@@ -47,7 +47,7 @@ struct vnode *generic_vnode_lookup(struct vnode *thisnode,
int err;
/* Does this path component match with any of this vnode's dentries? */
list_for_each_entry(d, &thisnode->dentries, vref) {
list_foreach_struct(d, &thisnode->dentries, vref) {
if (d->ops.compare(d, component)) {
/* Is this a directory? */
if (vfs_isdir(thisnode)) {

View File

@@ -29,13 +29,13 @@ int memfs_init_caches(struct memfs_superblock *sb)
free_block = (void *)sb + sizeof(*sb);
block_cache = mem_cache_init(free_block, sb->fssize - sizeof(*sb),
sb->blocksize, 1);
list_add(&block_cache->list, &sb->block_cache_list);
list_insert(&block_cache->list, &sb->block_cache_list);
/* Allocate a block and initialise it as first inode cache */
free_block = mem_cache_alloc(block_cache);
inode_cache = mem_cache_init(free_block, sb->blocksize,
sizeof(struct memfs_inode), 0);
list_add(&inode_cache->list, &sb->inode_cache_list);
list_insert(&inode_cache->list, &sb->inode_cache_list);
return 0;
}
@@ -62,8 +62,8 @@ int memfs_format_filesystem(void *buffer)
sb->bpool = id_pool_new_init(MEMFS_TOTAL_BLOCKS);
/* Initialise bitmap allocation lists for blocks and inodes */
INIT_LIST_HEAD(&sb->block_cache_list);
INIT_LIST_HEAD(&sb->inode_cache_list);
link_init(&sb->block_cache_list);
link_init(&sb->inode_cache_list);
memfs_init_caches(sb);
return 0;
@@ -74,7 +74,7 @@ void *memfs_alloc_block(struct memfs_superblock *sb)
{
struct mem_cache *cache;
list_for_each_entry(cache, &sb->block_cache_list, list) {
list_foreach_struct(cache, &sb->block_cache_list, list) {
if (cache->free)
return mem_cache_zalloc(cache);
else
@@ -91,7 +91,7 @@ int memfs_free_block(struct memfs_superblock *sb, void *block)
{
struct mem_cache *c, *tmp;
list_for_each_entry_safe(c, tmp, &sb->block_cache_list, list)
list_foreach_removable_struct(c, tmp, &sb->block_cache_list, list)
if (!mem_cache_free(c, block))
return 0;
else
@@ -151,11 +151,11 @@ int memfs_init_rootdir(struct superblock *sb)
d->vnode = v;
/* Associate dentry with its vnode */
list_add(&d->vref, &d->vnode->dentries);
list_insert(&d->vref, &d->vnode->dentries);
/* Add both vnode and dentry to their flat caches */
list_add(&d->cache_list, &dentry_cache);
list_add(&v->cache_list, &vnode_cache);
list_insert(&d->cache_list, &dentry_cache);
list_insert(&v->cache_list, &vnode_cache);
return 0;
}
@@ -204,12 +204,12 @@ struct superblock *memfs_get_superblock(void *block)
}
/* Registers sfs as an available filesystem type */
void memfs_register_fstype(struct list_head *fslist)
void memfs_register_fstype(struct link *fslist)
{
/* Initialise superblock list for this fstype */
INIT_LIST_HEAD(&memfs_fstype.sblist);
link_init(&memfs_fstype.sblist);
/* Add this fstype to list of available fstypes. */
list_add(&memfs_fstype.list, fslist);
list_insert(&memfs_fstype.list, fslist);
}

View File

@@ -21,7 +21,7 @@ struct memfs_inode *memfs_alloc_inode(struct memfs_superblock *sb)
void *free_block;
/* Ask existing inode caches for a new inode */
list_for_each_entry(cache, &sb->inode_cache_list, list) {
list_foreach_struct(cache, &sb->inode_cache_list, list) {
if (cache->free)
if (!(i = mem_cache_zalloc(cache)))
return PTR_ERR(-ENOSPC);
@@ -38,7 +38,7 @@ struct memfs_inode *memfs_alloc_inode(struct memfs_superblock *sb)
/* Initialise it as an inode cache */
cache = mem_cache_init(free_block, sb->blocksize,
sizeof(struct memfs_inode), 0);
list_add(&cache->list, &sb->inode_cache_list);
list_insert(&cache->list, &sb->inode_cache_list);
if (!(i = mem_cache_zalloc(cache)))
return PTR_ERR(-ENOSPC);
@@ -53,13 +53,13 @@ int memfs_free_inode(struct memfs_superblock *sb, struct memfs_inode *i)
{
struct mem_cache *c, *tmp;
list_for_each_entry_safe(c, tmp, &sb->inode_cache_list, list) {
list_foreach_removable_struct(c, tmp, &sb->inode_cache_list, list) {
/* Free it, if success */
if (!mem_cache_free(c, i)) {
/* If cache completely emtpy */
if (mem_cache_is_empty(c)) {
/* Free the block, too. */
list_del(&c->list);
list_remove(&c->list);
memfs_free_block(sb, c);
}
return 0;
@@ -213,7 +213,7 @@ int memfs_write_vnode(struct superblock *sb, struct vnode *v)
struct vnode *memfs_vnode_mknod(struct vnode *v, const char *dirname,
unsigned int mode)
{
struct dentry *d, *parent = list_entry(v->dentries.next,
struct dentry *d, *parent = link_to_struct(v->dentries.next,
struct dentry, vref);
struct memfs_dentry *memfsd;
struct dentry *newd;
@@ -234,7 +234,7 @@ struct vnode *memfs_vnode_mknod(struct vnode *v, const char *dirname,
return PTR_ERR(err);
/* Check there's no existing child with same name */
list_for_each_entry(d, &parent->children, child) {
list_foreach_struct(d, &parent->children, child) {
/* Does the name exist as a child? */
if(d->ops.compare(d, dirname))
return PTR_ERR(-EEXIST);
@@ -278,14 +278,14 @@ struct vnode *memfs_vnode_mknod(struct vnode *v, const char *dirname,
strncpy(newd->name, dirname, VFS_DNAME_MAX);
/* Associate dentry with its vnode */
list_add(&newd->vref, &newd->vnode->dentries);
list_insert(&newd->vref, &newd->vnode->dentries);
/* Associate dentry with its parent */
list_add(&newd->child, &parent->children);
list_insert(&newd->child, &parent->children);
/* Add both vnode and dentry to their flat caches */
list_add(&newd->cache_list, &dentry_cache);
list_add(&newv->cache_list, &vnode_cache);
list_insert(&newd->cache_list, &dentry_cache);
list_insert(&newv->cache_list, &vnode_cache);
return newv;
}
@@ -303,7 +303,7 @@ int memfs_vnode_readdir(struct vnode *v)
{
int err;
struct memfs_dentry *memfsd;
struct dentry *parent = list_entry(v->dentries.next,
struct dentry *parent = link_to_struct(v->dentries.next,
struct dentry, vref);
/*
@@ -327,7 +327,7 @@ int memfs_vnode_readdir(struct vnode *v)
/*
* Fail if vnode size is bigger than a page. Since this allocation
* method is to be replaced, we can live with this limitation for now.
* method is to be origaced, we can live with this limitation for now.
*/
BUG_ON(v->size > PAGE_SIZE);
@@ -349,7 +349,7 @@ int memfs_vnode_readdir(struct vnode *v)
/* Initialise it */
newd->ops = generic_dentry_operations;
newd->parent = parent;
list_add(&newd->child, &parent->children);
list_insert(&newd->child, &parent->children);
/*
* Lookup the vnode for dentry by its vnode number. We call
@@ -367,7 +367,7 @@ int memfs_vnode_readdir(struct vnode *v)
}
/* Assing this dentry as a name of its vnode */
list_add(&newd->vref, &newd->vnode->dentries);
list_insert(&newd->vref, &newd->vnode->dentries);
/* Increase link count */
newv->links++;
@@ -376,8 +376,8 @@ int memfs_vnode_readdir(struct vnode *v)
memcpy(newd->name, memfsd[i].name, MEMFS_DNAME_MAX);
/* Add both vnode and dentry to their caches */
list_add(&newd->cache_list, &dentry_cache);
list_add(&newv->cache_list, &vnode_cache);
list_insert(&newd->cache_list, &dentry_cache);
list_insert(&newv->cache_list, &vnode_cache);
}
return 0;

View File

@@ -19,8 +19,8 @@ const char *pathdata_next_component(struct pathdata *pdata)
struct pathcomp *p, *n;
const char *pathstr;
list_for_each_entry_safe(p, n, &pdata->list, list) {
list_del(&p->list);
list_foreach_removable_struct(p, n, &pdata->list, list) {
list_remove(&p->list);
pathstr = p->str;
kfree(p);
return pathstr;
@@ -35,8 +35,8 @@ const char *pathdata_last_component(struct pathdata *pdata)
const char *pathstr;
if (!list_empty(&pdata->list)) {
p = list_entry(pdata->list.prev, struct pathcomp, list);
list_del(&p->list);
p = link_to_struct(pdata->list.prev, struct pathcomp, list);
list_remove(&p->list);
pathstr = p->str;
kfree(p);
return pathstr;
@@ -50,8 +50,8 @@ void pathdata_destroy(struct pathdata *p)
{
struct pathcomp *c, *n;
list_for_each_entry_safe(c, n, &p->list, list) {
list_del(&c->list);
list_foreach_removable_struct(c, n, &p->list, list) {
list_remove(&c->list);
kfree(c);
}
kfree(p);
@@ -62,7 +62,7 @@ void pathdata_print(struct pathdata *p)
struct pathcomp *comp;
printf("Extracted path is:\n");
list_for_each_entry(comp, &p->list, list)
list_foreach_struct(comp, &p->list, list)
printf("%s\n", comp->str);
}
@@ -78,7 +78,7 @@ struct pathdata *pathdata_parse(const char *pathname,
return PTR_ERR(-ENOMEM);
/* Initialise pathdata */
INIT_LIST_HEAD(&pdata->list);
link_init(&pdata->list);
strcpy(pathbuf, pathname);
/* First component is root if there's a root */
@@ -87,9 +87,9 @@ struct pathdata *pathdata_parse(const char *pathname,
kfree(pdata);
return PTR_ERR(-ENOMEM);
}
INIT_LIST_HEAD(&comp->list);
link_init(&comp->list);
comp->str = VFS_STR_ROOTDIR;
list_add_tail(&comp->list, &pdata->list);
list_insert_tail(&comp->list, &pdata->list);
if (task)
/* Lookup start vnode is root vnode */
@@ -105,15 +105,15 @@ struct pathdata *pathdata_parse(const char *pathname,
kfree(pdata);
return PTR_ERR(-ENOMEM);
}
INIT_LIST_HEAD(&comp->list);
link_init(&comp->list);
/* Get current dentry for this task */
curdir = list_entry(task->fs_data->curdir->dentries.next,
curdir = link_to_struct(task->fs_data->curdir->dentries.next,
struct dentry, vref);
/* Use its name in path component */
comp->str = curdir->name;
list_add_tail(&comp->list, &pdata->list);
list_insert_tail(&comp->list, &pdata->list);
/* Lookup start vnode is current dir vnode */
pdata->vstart = task->fs_data->curdir;
@@ -130,9 +130,9 @@ struct pathdata *pathdata_parse(const char *pathname,
pathdata_destroy(pdata);
return PTR_ERR(-ENOMEM);
}
INIT_LIST_HEAD(&comp->list);
link_init(&comp->list);
comp->str = str;
list_add_tail(&comp->list, &pdata->list);
list_insert_tail(&comp->list, &pdata->list);
}
/* Next component */

View File

@@ -55,7 +55,7 @@ int pager_sys_open(struct tcb *pager, l4id_t opener, int fd)
/*
* Write file information, they will
* be sent via the return reply.
* be sent via the return origy.
*/
write_mr(L4SYS_ARG0, v->vnum);
write_mr(L4SYS_ARG1, v->size);
@@ -89,7 +89,7 @@ int pager_open_bypath(struct tcb *pager, char *pathname)
/*
* Write file information, they will
* be sent via the return reply.
* be sent via the return origy.
*/
write_mr(L4SYS_ARG0, v->vnum);
write_mr(L4SYS_ARG1, v->size);
@@ -109,10 +109,10 @@ void print_vnode(struct vnode *v)
struct dentry *d, *c;
printf("Vnode names:\n");
list_for_each_entry(d, &v->dentries, vref) {
list_foreach_struct(d, &v->dentries, vref) {
printf("%s\n", d->name);
printf("Children dentries:\n");
list_for_each_entry(c, &d->children, child)
list_foreach_struct(c, &d->children, child)
printf("%s\n", c->name);
}
}
@@ -496,7 +496,7 @@ int sys_readdir(struct tcb *t, int fd, void *buf, int count)
if (!(v = vfs_lookup_byvnum(vfs_root.pivot->sb, vnum)))
return -EINVAL;
d = list_entry(v->dentries.next, struct dentry, vref);
d = link_to_struct(v->dentries.next, struct dentry, vref);
/* Ensure vnode is a directory */
if (!vfs_isdir(v))

View File

@@ -32,14 +32,14 @@ struct global_list global_tasks = {
void global_add_task(struct tcb *task)
{
BUG_ON(!list_empty(&task->list));
list_add_tail(&task->list, &global_tasks.list);
list_insert_tail(&task->list, &global_tasks.list);
global_tasks.total++;
}
void global_remove_task(struct tcb *task)
{
BUG_ON(list_empty(&task->list));
list_del_init(&task->list);
list_remove_init(&task->list);
BUG_ON(--global_tasks.total < 0);
}
@@ -47,7 +47,7 @@ struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &global_tasks.list, list)
list_foreach_struct(t, &global_tasks.list, list)
if (t->tid == tid)
return t;
return 0;
@@ -95,7 +95,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
task->tid = TASK_ID_INVALID;
/* Initialise list structure */
INIT_LIST_HEAD(&task->list);
link_init(&task->list);
return task;
}

View File

@@ -8,8 +8,8 @@
#include <task.h>
#include <path.h>
LIST_HEAD(vnode_cache);
LIST_HEAD(dentry_cache);
LINK_DECLARE(vnode_cache);
LINK_DECLARE(dentry_cache);
/*
* /
@@ -33,7 +33,7 @@ struct vnode *vfs_lookup_byvnum(struct superblock *sb, unsigned long vnum)
int err;
/* Check the vnode flat list by vnum */
list_for_each_entry(v, &vnode_cache, cache_list)
list_foreach_struct(v, &vnode_cache, cache_list)
if (v->vnum == vnum)
return v;
@@ -48,7 +48,7 @@ struct vnode *vfs_lookup_byvnum(struct superblock *sb, unsigned long vnum)
}
/* Add the vnode back to vnode flat list */
list_add(&v->cache_list, &vnode_cache);
list_insert(&v->cache_list, &vnode_cache);
return v;
}

View File

@@ -185,7 +185,7 @@ struct mem_cache *mem_cache_init(void *start,
area_start = addr_aligned;
}
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;

View File

@@ -17,7 +17,7 @@
* fixed-size memory cache) Keeps track of free/occupied items within its
* start/end boundaries. Does not grow/shrink but you can link-list it. */
struct mem_cache {
struct list_head list;
struct link list;
int total;
int free;
unsigned int start;

View File

@@ -26,7 +26,7 @@ static struct page_area *new_page_area(struct page_allocator *p)
struct mem_cache *cache;
struct page_area *new_area;
list_for_each_entry(cache, &p->pga_cache_list, list) {
list_foreach_struct(cache, &p->pga_cache_list, list) {
if ((new_area = mem_cache_alloc(cache)) != 0) {
new_area->cache = cache;
p->pga_free--;
@@ -45,7 +45,7 @@ get_free_page_area(int quantity, struct page_allocator *p)
if (quantity <= 0)
return 0;
list_for_each_entry(area, &p->page_area_list, list) {
list_foreach_struct(area, &p->page_area_list, list) {
/* Check for exact size match */
if (area->numpages == quantity && !area->used) {
@@ -60,8 +60,8 @@ get_free_page_area(int quantity, struct page_allocator *p)
new->pfn = area->pfn + area->numpages;
new->numpages = quantity;
new->used = 1;
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &area->list);
link_init(&new->list);
list_insert(&new->list, &area->list);
return new;
}
}
@@ -91,36 +91,36 @@ void init_page_allocator(unsigned long start, unsigned long end)
struct page_area *freemem, *area;
struct mem_cache *cache;
INIT_LIST_HEAD(&allocator.page_area_list);
INIT_LIST_HEAD(&allocator.pga_cache_list);
link_init(&allocator.page_area_list);
link_init(&allocator.pga_cache_list);
/* Initialise the first page area cache */
cache = mem_cache_init(l4_map_helper((void *)start, 1), PAGE_SIZE,
sizeof(struct page_area), 0);
list_add(&cache->list, &allocator.pga_cache_list);
list_insert(&cache->list, &allocator.pga_cache_list);
/* Initialise the first area that describes the page just allocated */
area = mem_cache_alloc(cache);
INIT_LIST_HEAD(&area->list);
link_init(&area->list);
area->pfn = __pfn(start);
area->used = 1;
area->numpages = 1;
area->cache = cache;
list_add(&area->list, &allocator.page_area_list);
list_insert(&area->list, &allocator.page_area_list);
/* Update freemem start address */
start += PAGE_SIZE;
/* Initialise first area that describes all of free physical memory */
freemem = mem_cache_alloc(cache);
INIT_LIST_HEAD(&freemem->list);
link_init(&freemem->list);
freemem->pfn = __pfn(start);
freemem->numpages = __pfn(end) - freemem->pfn;
freemem->cache = cache;
freemem->used = 0;
/* Add it as the first unused page area */
list_add(&freemem->list, &allocator.page_area_list);
list_insert(&freemem->list, &allocator.page_area_list);
/* Initialise free page area counter */
allocator.pga_free = mem_cache_total_empty(cache);
@@ -163,7 +163,7 @@ int check_page_areas(struct page_allocator *p)
* Add the new cache to available
* list of free page area caches
*/
list_add(&newcache->list, &p->pga_cache_list);
list_insert(&newcache->list, &p->pga_cache_list);
/* Unlock here */
}
return 0;
@@ -202,13 +202,13 @@ struct page_area *merge_free_areas(struct page_area *before,
BUG_ON(before == after);
before->numpages += after->numpages;
list_del(&after->list);
list_remove(&after->list);
c = after->cache;
mem_cache_free(c, after);
/* Recursively free the cache page */
if (mem_cache_is_empty(c)) {
list_del(&c->list);
list_remove(&c->list);
BUG_ON(free_page(l4_unmap_helper(c, 1)) < 0)
}
return before;
@@ -219,7 +219,7 @@ static int find_and_free_page_area(void *addr, struct page_allocator *p)
struct page_area *area, *prev, *next;
/* First find the page area to be freed. */
list_for_each_entry(area, &p->page_area_list, list)
list_foreach_struct(area, &p->page_area_list, list)
if (__pfn_to_addr(area->pfn) == (unsigned long)addr &&
area->used) { /* Found it */
area->used = 0;
@@ -230,12 +230,12 @@ static int find_and_free_page_area(void *addr, struct page_allocator *p)
found:
/* Now merge with adjacent areas, if possible */
if (area->list.prev != &p->page_area_list) {
prev = list_entry(area->list.prev, struct page_area, list);
prev = link_to_struct(area->list.prev, struct page_area, list);
if (!prev->used)
area = merge_free_areas(prev, area);
}
if (area->list.next != &p->page_area_list) {
next = list_entry(area->list.next, struct page_area, list);
next = link_to_struct(area->list.next, struct page_area, list);
if (!next->used)
area = merge_free_areas(area, next);
}

View File

@@ -6,7 +6,7 @@
/* List member to keep track of free and unused physical pages.
* Has PAGE_SIZE granularity */
struct page_area {
struct list_head list;
struct link list;
unsigned int used; /* Used or free */
unsigned int pfn; /* Base pfn */
unsigned int numpages; /* Number of pages this region covers */
@@ -15,8 +15,8 @@ struct page_area {
};
struct page_allocator {
struct list_head page_area_list;
struct list_head pga_cache_list;
struct link page_area_list;
struct link pga_cache_list;
int pga_free;
};

View File

@@ -5,7 +5,7 @@ void print_page_area_list(struct page_allocator *p)
{
struct page_area *area;
list_for_each_entry (area, &p->page_area_list, list) {
list_foreach_struct (area, &p->page_area_list, list) {
printf("%-20s\n%-20s\n", "Page area:","-------------------------");
printf("%-20s %u\n", "Pfn:", area->pfn);
printf("%-20s %d\n", "Used:", area->used);
@@ -23,11 +23,11 @@ void print_km_area(struct km_area *s)
}
void print_km_area_list(struct list_head *km_areas)
void print_km_area_list(struct link *km_areas)
{
struct km_area *area;
list_for_each_entry (area, km_areas, list)
list_foreach_struct (area, km_areas, list)
print_km_area(area);
}

View File

@@ -12,6 +12,6 @@
#endif
void print_page_area_list(struct page_allocator *p);
void print_km_area_list(struct list_head *s);
void print_km_area_list(struct link *s);
void print_km_area(struct km_area *s);
#endif /* DEBUG_H */

View File

@@ -29,13 +29,13 @@ void print_page_area(struct page_area *a, int areano)
return;
}
void print_areas(struct list_head *area_head)
void print_areas(struct link *area_head)
{
struct page_area *cur;
int areano = 1;
printf("Page areas:\n-------------\n");
list_for_each_entry(cur, area_head, list)
list_foreach_struct(cur, area_head, list)
print_page_area(cur, areano++);
}
@@ -47,12 +47,12 @@ void print_cache(struct mem_cache *c, int cacheno)
printf("Start: 0x%x\n", c->start);
}
void print_caches(struct list_head *cache_head)
void print_caches(struct link *cache_head)
{
int caches = 1;
struct mem_cache *cur;
list_for_each_entry(cur, cache_head, list)
list_foreach_struct(cur, cache_head, list)
print_cache(cur, caches++);
}

View File

@@ -6,8 +6,8 @@
void test_allocpage(int num_allocs, int alloc_max, FILE *init, FILE *exit);
void print_page_area(struct page_area *a, int no);
void print_caches(struct list_head *cache_head);
void print_caches(struct link *cache_head);
void print_cache(struct mem_cache *c, int cacheno);
void print_areas(struct list_head *area_head);
void print_areas(struct link *area_head);
void print_page_area(struct page_area *ar, int areano);
#endif

View File

@@ -17,7 +17,7 @@
#include "debug.h"
#include "tests.h"
extern struct list_head km_area_start;
extern struct link km_area_start;
void print_kmalloc_state(void)
{

View File

@@ -33,6 +33,6 @@ struct vfs_file_data {
struct vm_file *vfs_file_create(void);
extern struct list_head vm_file_list;
extern struct link vm_file_list;
#endif /* __MM0_FILE_H__ */

View File

@@ -3,7 +3,7 @@
struct global_list {
int total;
struct list_head list;
struct link list;
};
extern struct global_list global_vm_files;

View File

@@ -21,7 +21,7 @@ struct initdata {
struct page_bitmap page_map;
unsigned long pager_utcb_virt;
unsigned long pager_utcb_phys;
struct list_head boot_file_list;
struct link boot_file_list;
};
extern struct initdata initdata;

View File

@@ -25,7 +25,7 @@ struct shm_descriptor {
struct shm_descriptor {
int key; /* IPC key supplied by user task */
l4id_t shmid; /* SHM area id, allocated by mm0 */
struct list_head list; /* SHM list, used by mm0 */
struct link list; /* SHM list, used by mm0 */
struct vm_file *owner;
void *shm_addr; /* The virtual address for segment. */
unsigned long size; /* Size of the area in pages */

View File

@@ -51,18 +51,18 @@ struct task_fd_head {
};
struct task_vma_head {
struct list_head list;
struct link list;
int tcb_refs;
};
struct utcb_desc {
struct list_head list;
struct link list;
unsigned long utcb_base;
struct id_pool *slots;
};
struct utcb_head {
struct list_head list;
struct link list;
int tcb_refs;
};
@@ -70,11 +70,11 @@ struct utcb_head {
/* Stores all task information that can be kept in userspace. */
struct tcb {
/* Task list */
struct list_head list;
struct link list;
/* Fields for parent-child relations */
struct list_head child_ref; /* Child ref in parent's list */
struct list_head children; /* List of children */
struct link child_ref; /* Child ref in parent's list */
struct link children; /* List of children */
struct tcb *parent; /* Parent task */
/* Task creation flags */
@@ -131,7 +131,7 @@ struct tcb {
};
struct tcb_head {
struct list_head list;
struct link list;
int total; /* Total threads */
};

View File

@@ -57,7 +57,7 @@ enum VM_FILE_TYPE {
struct page {
int refcnt; /* Refcount */
struct spinlock lock; /* Page lock. */
struct list_head list; /* For list of a vm_object's in-memory pages */
struct link list; /* For list of a vm_object's in-memory pages */
struct vm_object *owner;/* The vm_object the page belongs to */
unsigned long virtual; /* If refs >1, first mapper's virtual address */
unsigned int flags; /* Flags associated with the page. */
@@ -115,20 +115,20 @@ struct vm_object {
int npages; /* Number of pages in memory */
int nlinks; /* Number of mapper links that refer */
int shadows; /* Number of shadows that refer */
struct list_head shref; /* Shadow reference from original object */
struct list_head shdw_list; /* List of vm objects that shadows this one */
struct list_head link_list; /* List of links that refer to this object */
struct link shref; /* Shadow reference from original object */
struct link shdw_list; /* List of vm objects that shadows this one */
struct link link_list; /* List of links that refer to this object */
struct vm_object *orig_obj; /* Original object that this one shadows */
unsigned int flags; /* Defines the type and flags of the object */
struct list_head list; /* List of all vm objects in memory */
struct link list; /* List of all vm objects in memory */
struct vm_pager *pager; /* The pager for this object */
struct list_head page_cache;/* List of in-memory pages */
struct link page_cache;/* List of in-memory pages */
};
/* In memory representation of either a vfs file, a device. */
struct vm_file {
int openers;
struct list_head list;
struct link list;
unsigned long length;
unsigned int type;
struct vm_object vm_obj;
@@ -138,22 +138,22 @@ struct vm_file {
/* To create per-vma vm_object lists */
struct vm_obj_link {
struct list_head list;
struct list_head linkref;
struct link list;
struct link linkref;
struct vm_object *obj;
};
static inline void vm_link_object(struct vm_obj_link *link, struct vm_object *obj)
{
link->obj = obj;
list_add(&link->linkref, &obj->link_list);
list_insert(&link->linkref, &obj->link_list);
obj->nlinks++;
}
static inline struct vm_object *vm_unlink_object(struct vm_obj_link *link)
{
/* Delete link from object's link list */
list_del(&link->linkref);
list_remove(&link->linkref);
/* Reduce object's mapper link count */
link->obj->nlinks--;
@@ -175,8 +175,8 @@ static inline struct vm_object *vm_unlink_object(struct vm_obj_link *link)
* object's copy of pages supersede the ones lower in the stack.
*/
struct vm_area {
struct list_head list; /* Per-task vma list */
struct list_head vm_obj_list; /* Head for vm_object list. */
struct link list; /* Per-task vma list */
struct link vm_obj_list; /* Head for vm_object list. */
unsigned long pfn_start; /* Region start virtual pfn */
unsigned long pfn_end; /* Region end virtual pfn, exclusive */
unsigned long flags; /* Protection flags. */
@@ -189,12 +189,12 @@ struct vm_area {
* rather than searching the address. E.g. munmap/msync
*/
static inline struct vm_area *find_vma(unsigned long addr,
struct list_head *vm_area_list)
struct link *vm_area_list)
{
struct vm_area *vma;
unsigned long pfn = __pfn(addr);
list_for_each_entry(vma, vm_area_list, list)
list_foreach_struct(vma, vm_area_list, list)
if ((pfn >= vma->pfn_start) && (pfn < vma->pfn_end))
return vma;
return 0;
@@ -213,12 +213,12 @@ extern struct vm_pager devzero_pager;
extern struct vm_pager swap_pager;
/* vm object and vm file lists */
extern struct list_head vm_object_list;
extern struct link vm_object_list;
/* vm object link related functions */
struct vm_obj_link *vm_objlink_create(void);
struct vm_obj_link *vma_next_link(struct list_head *link,
struct list_head *head);
struct vm_obj_link *vma_next_link(struct link *link,
struct link *head);
/* vm file and object initialisation */
struct vm_object *vm_object_create(void);
@@ -229,8 +229,8 @@ void vm_file_put(struct vm_file *f);
/* Printing objects, files */
void vm_object_print(struct vm_object *vmo);
void vm_print_objects(struct list_head *vmo_list);
void vm_print_files(struct list_head *file_list);
void vm_print_objects(struct link *vmo_list);
void vm_print_files(struct link *file_list);
/* Used for pre-faulting a page from mm0 */
int prefault_page(struct tcb *task, unsigned long address,
@@ -248,7 +248,7 @@ int validate_task_range(struct tcb *t, unsigned long start,
/* Changes all shadows and their ptes to read-only */
int vm_freeze_shadows(struct tcb *task);
int task_insert_vma(struct vm_area *vma, struct list_head *vma_list);
int task_insert_vma(struct vm_area *vma, struct link *vma_list);
/* Main page fault entry point */
int page_fault_handler(struct tcb *faulty_task, fault_kdata_t *fkdata);

View File

@@ -25,7 +25,7 @@
#include <boot.h>
/* Receives all registers and replies back */
/* Receives all registers and origies back */
int ipc_test_full_sync(l4id_t senderid)
{
for (int i = MR_UNUSED_START; i < MR_TOTAL + MR_REST; i++) {
@@ -35,7 +35,7 @@ int ipc_test_full_sync(l4id_t senderid)
write_mr(i, 0);
}
/* Send a full reply */
/* Send a full origy */
l4_send_full(senderid, 0);
return 0;
}
@@ -158,7 +158,7 @@ void handle_requests(void)
ret = sys_execve(sender, (char *)mr[0],
(char **)mr[1], (char **)mr[2]);
if (ret < 0)
break; /* We reply for errors */
break; /* We origy for errors */
else
return; /* else we're done */
}

View File

@@ -10,7 +10,7 @@
* of how mmaped devices would be mapped with a pager.
*/
struct mmap_device {
struct list_head page_list; /* Dyn-allocated page list */
struct link page_list; /* Dyn-allocated page list */
unsigned long pfn_start; /* Physical pfn start */
unsigned long pfn_end; /* Physical pfn end */
};
@@ -27,17 +27,17 @@ struct page *memdev_page_in(struct vm_object *vm_obj,
return PTR_ERR(-1);
/* Simply return the page if found */
list_for_each_entry(page, &memdev->page_list, list)
list_foreach_struct(page, &memdev->page_list, list)
if (page->offset == pfn_offset)
return page;
/* Otherwise allocate one of our own for that offset and return it */
page = kzalloc(sizeof(struct page));
INIT_LIST_HEAD(&page->list);
link_init(&page->list);
spin_lock_init(&page->lock);
page->offset = pfn_offset;
page->owner = vm_obj;
list_add(&page->list, &memdev->page_list);
list_insert(&page->list, &memdev->page_list);
return page;
}

View File

@@ -75,7 +75,7 @@ int do_execve(struct tcb *sender, char *filename, struct args_struct *args,
BUG_ON(!(tgleader = find_task(sender->tgid)));
/* Destroy all children threads. */
list_for_each_entry(thread, &tgleader->children, child_ref)
list_foreach_struct(thread, &tgleader->children, child_ref)
do_exit(thread, 0);
} else {
/* Otherwise group leader is same as sender */

View File

@@ -96,7 +96,7 @@ int execve_recycle_task(struct tcb *new, struct tcb *orig)
/* Copy parent relationship */
BUG_ON(new->parent);
new->parent = orig->parent;
list_add(&new->child_ref, &orig->parent->children);
list_insert(&new->child_ref, &orig->parent->children);
/* Flush all IO on task's files and close fds */
task_close_files(orig);

View File

@@ -56,14 +56,14 @@ unsigned long fault_to_file_offset(struct fault_data *fault)
* Given a reference to link = vma, head = vma, returns link1.
* Given a reference to link = link3, head = vma, returns 0.
*/
struct vm_obj_link *vma_next_link(struct list_head *link,
struct list_head *head)
struct vm_obj_link *vma_next_link(struct link *link,
struct link *head)
{
BUG_ON(list_empty(link));
if (link->next == head)
return 0;
else
return list_entry(link->next, struct vm_obj_link, list);
return link_to_struct(link->next, struct vm_obj_link, list);
}
/* Unlinks orig_link from its vma and deletes it but keeps the object. */
@@ -72,7 +72,7 @@ struct vm_object *vma_drop_link(struct vm_obj_link *link)
struct vm_object *dropped;
/* Remove object link from vma's list */
list_del(&link->list);
list_remove(&link->list);
/* Unlink the link from object */
dropped = vm_unlink_object(link);
@@ -104,7 +104,7 @@ int vm_object_is_subset(struct vm_object *shadow,
* Do a page by page comparison. Every lesser page
* must be in copier for overlap.
*/
list_for_each_entry(pl, &original->page_cache, list)
list_foreach_struct(pl, &original->page_cache, list)
if (!(pc = find_page(shadow, pl->offset)))
return 0;
/*
@@ -160,14 +160,14 @@ int vma_merge_object(struct vm_object *redundant)
BUG_ON(redundant->shadows != 1);
/* Get the last shadower object in front */
front = list_entry(redundant->shdw_list.next,
front = link_to_struct(redundant->shdw_list.next,
struct vm_object, shref);
/* Move all non-intersecting pages to front shadow. */
list_for_each_entry_safe(p1, n, &redundant->page_cache, list) {
list_foreach_removable_struct(p1, n, &redundant->page_cache, list) {
/* Page doesn't exist in front, move it there */
if (!(p2 = find_page(front, p1->offset))) {
list_del_init(&p1->list);
list_remove_init(&p1->list);
spin_lock(&p1->lock);
p1->owner = front;
spin_unlock(&p1->lock);
@@ -179,20 +179,20 @@ int vma_merge_object(struct vm_object *redundant)
/* Sort out shadow relationships after the merge: */
/* Front won't be a shadow of the redundant shadow anymore */
list_del_init(&front->shref);
list_remove_init(&front->shref);
/* Check that there really was one shadower of redundant left */
BUG_ON(!list_empty(&redundant->shdw_list));
/* Redundant won't be a shadow of its next object */
list_del_init(&redundant->shref);
list_remove_init(&redundant->shref);
/* Front is now a shadow of redundant's next object */
list_add(&front->shref, &redundant->orig_obj->shdw_list);
list_insert(&front->shref, &redundant->orig_obj->shdw_list);
front->orig_obj = redundant->orig_obj;
/* Find last link for the object */
last_link = list_entry(redundant->link_list.next,
last_link = link_to_struct(redundant->link_list.next,
struct vm_obj_link, linkref);
/* Drop the last link to the object */
@@ -213,8 +213,8 @@ struct vm_obj_link *vm_objlink_create(void)
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&vmo_link->list);
INIT_LIST_HEAD(&vmo_link->linkref);
link_init(&vmo_link->list);
link_init(&vmo_link->linkref);
return vmo_link;
}
@@ -274,7 +274,7 @@ int vma_copy_links(struct vm_area *new_vma, struct vm_area *vma)
/* Get the first object on the vma */
BUG_ON(list_empty(&vma->vm_obj_list));
vmo_link = list_entry(vma->vm_obj_list.next,
vmo_link = link_to_struct(vma->vm_obj_list.next,
struct vm_obj_link, list);
do {
/* Create a new link */
@@ -284,7 +284,7 @@ int vma_copy_links(struct vm_area *new_vma, struct vm_area *vma)
vm_link_object(new_link, vmo_link->obj);
/* Add the new link to vma in object order */
list_add_tail(&new_link->list, &new_vma->vm_obj_list);
list_insert_tail(&new_link->list, &new_vma->vm_obj_list);
/* Continue traversing links, doing the same copying */
} while((vmo_link = vma_next_link(&vmo_link->list,
@@ -361,10 +361,10 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* Get previous and next links, if they exist */
prev = (link->list.prev == &vma->vm_obj_list) ? 0 :
list_entry(link->list.prev, struct vm_obj_link, list);
link_to_struct(link->list.prev, struct vm_obj_link, list);
next = (link->list.next == &vma->vm_obj_list) ? 0 :
list_entry(link->list.next, struct vm_obj_link, list);
link_to_struct(link->list.next, struct vm_obj_link, list);
/* Drop the link */
obj = vma_drop_link(link);
@@ -378,7 +378,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* Remove prev from current object's shadow list */
BUG_ON(list_empty(&prev->obj->shref));
list_del_init(&prev->obj->shref);
list_remove_init(&prev->obj->shref);
/*
* We don't allow dropping non-shadow objects yet,
@@ -387,7 +387,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
BUG_ON(!next);
/* prev is now shadow of next */
list_add(&prev->obj->shref,
list_insert(&prev->obj->shref,
&next->obj->shdw_list);
prev->obj->orig_obj = next->obj;
@@ -397,7 +397,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
*/
if (obj->nlinks == 0) {
BUG_ON(obj->orig_obj != next->obj);
list_del_init(&obj->shref);
list_remove_init(&obj->shref);
} else {
/*
* Dropped object still has referrers, which
@@ -421,7 +421,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
BUG_ON(obj->orig_obj != next->obj);
BUG_ON(--next->obj->shadows < 0);
// vm_object_print(next->obj);
list_del_init(&obj->shref);
list_remove_init(&obj->shref);
}
}
}
@@ -475,7 +475,7 @@ int vma_drop_merge_delete_all(struct vm_area *vma)
BUG_ON(list_empty(&vma->vm_obj_list));
/* Traverse and get rid of all links */
list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list)
list_foreach_removable_struct(vmo_link, n, &vma->vm_obj_list, list)
vma_drop_merge_delete(vma, vmo_link);
return 0;
@@ -541,10 +541,10 @@ struct page *copy_on_write(struct fault_data *fault)
* v v
* shadow original
*/
list_add(&shadow_link->list, &vma->vm_obj_list);
list_insert(&shadow_link->list, &vma->vm_obj_list);
/* Add object to original's shadower list */
list_add(&shadow->shref, &shadow->orig_obj->shdw_list);
list_insert(&shadow->shref, &shadow->orig_obj->shdw_list);
/* Add to global object list */
global_add_vm_object(shadow);
@@ -758,7 +758,7 @@ int vm_freeze_shadows(struct tcb *task)
struct vm_object *vmo;
struct page *p;
list_for_each_entry(vma, &task->vm_area_head->list, list) {
list_foreach_struct(vma, &task->vm_area_head->list, list) {
/* Shared vmas don't have shadows */
if (vma->flags & VMA_SHARED)
@@ -766,7 +766,7 @@ int vm_freeze_shadows(struct tcb *task)
/* Get the first object */
BUG_ON(list_empty(&vma->vm_obj_list));
vmo_link = list_entry(vma->vm_obj_list.next,
vmo_link = link_to_struct(vma->vm_obj_list.next,
struct vm_obj_link, list);
vmo = vmo_link->obj;
@@ -789,7 +789,7 @@ int vm_freeze_shadows(struct tcb *task)
* Make all pages on it read-only
* in the page tables.
*/
list_for_each_entry(p, &vmo->page_cache, list) {
list_foreach_struct(p, &vmo->page_cache, list) {
/* Find virtual address of each page */
virtual = vma_page_to_virtual(vma, p);

View File

@@ -190,7 +190,7 @@ struct vm_file *do_open2(struct tcb *task, int fd, unsigned long vnum, unsigned
}
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &global_vm_files.list, list) {
list_foreach_struct(vmfile, &global_vm_files.list, list) {
/* Check whether it is a vfs file and if so vnums match. */
if ((vmfile->type & VM_FILE_VFS) &&
@@ -240,7 +240,7 @@ int do_open(struct tcb *task, int fd, unsigned long vnum, unsigned long length)
task->files->fd[fd].cursor = 0;
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &global_vm_files.list, list) {
list_foreach_struct(vmfile, &global_vm_files.list, list) {
/* Check whether it is a vfs file and if so vnums match. */
if ((vmfile->type & VM_FILE_VFS) &&
@@ -301,22 +301,22 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* Add if list is empty */
if (list_empty(&vmo->page_cache)) {
list_add_tail(&this->list, &vmo->page_cache);
list_insert_tail(&this->list, &vmo->page_cache);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, &vmo->page_cache, list) {
after = list_entry(before->list.next, struct page, list);
list_foreach_struct(before, &vmo->page_cache, list) {
after = link_to_struct(before->list.next, struct page, list);
/* If there's only one in list */
if (before->list.next == &vmo->page_cache) {
/* Add as next if greater */
if (this->offset > before->offset)
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->offset < before->offset)
list_add_tail(&this->list, &before->list);
list_insert_tail(&this->list, &before->list);
else
BUG();
return 0;
@@ -325,7 +325,7 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* If this page is in-between two other, insert it there */
if (before->offset < this->offset &&
after->offset > this->offset) {
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
return 0;
}
BUG_ON(this->offset == before->offset);
@@ -603,7 +603,7 @@ int write_cache_pages_orig(struct vm_file *vmfile, struct tcb *task, void *buf,
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list)
if (head->offset == pfn_start)
goto copy;
@@ -627,7 +627,7 @@ copy:
last_pgoff = head->offset;
/* Map the rest, copy and unmap. */
list_for_each_entry(this, &head->list, list) {
list_foreach_struct(this, &head->list, list) {
if (left == 0 || this->offset == pfn_end)
break;
@@ -666,7 +666,7 @@ int write_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list) {
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list) {
/* First page */
if (head->offset == pfn_start) {
left = count;
@@ -726,7 +726,7 @@ int read_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
unsigned long copy_offset; /* Current copy offset on the buffer */
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list)
if (head->offset == pfn_start)
goto copy;
@@ -745,7 +745,7 @@ copy:
last_pgoff = head->offset;
/* Map the rest, copy and unmap. */
list_for_each_entry(this, &head->list, list) {
list_foreach_struct(this, &head->list, list) {
if (left == 0 || this->offset == pfn_end)
break;

View File

@@ -21,7 +21,7 @@
#include <utcb.h>
/* A separate list than the generic file list that keeps just the boot files */
LIST_HEAD(boot_file_list);
LINK_DECLARE(boot_file_list);
/*
* A specialised function for setting up the task environment of mm0.
@@ -53,7 +53,7 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
return err;
/* Set pager as child and parent of itself */
list_add(&task->child_ref, &task->children);
list_insert(&task->child_ref, &task->children);
task->parent = task;
/*
@@ -78,9 +78,9 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
struct vm_file *initdata_next_bootfile(struct initdata *initdata)
{
struct vm_file *file, *n;
list_for_each_entry_safe(file, n, &initdata->boot_file_list,
list_foreach_removable_struct(file, n, &initdata->boot_file_list,
list) {
list_del_init(&file->list);
list_remove_init(&file->list);
return file;
}
return 0;
@@ -96,10 +96,10 @@ int start_boot_tasks(struct initdata *initdata)
struct tcb *fs0_task;
struct svc_image *img;
struct task_ids ids;
struct list_head other_files;
struct link other_files;
int total = 0;
INIT_LIST_HEAD(&other_files);
link_init(&other_files);
/* Separate out special server tasks and regular files */
do {
@@ -113,7 +113,7 @@ int start_boot_tasks(struct initdata *initdata)
else if (!strcmp(img->name, __VFSNAME__))
fs0_file = file;
else
list_add(&file->list, &other_files);
list_insert(&file->list, &other_files);
} else
break;
} while (1);
@@ -138,12 +138,12 @@ int start_boot_tasks(struct initdata *initdata)
total++;
/* Initialise other tasks */
list_for_each_entry_safe(file, n, &other_files, list) {
list_foreach_removable_struct(file, n, &other_files, list) {
// printf("%s: Initialising new boot task.\n", __TASKNAME__);
ids.tid = TASK_ID_INVALID;
ids.spid = TASK_ID_INVALID;
ids.tgid = TASK_ID_INVALID;
list_del_init(&file->list);
list_remove_init(&file->list);
BUG_ON(IS_ERR(boottask_exec(file, USER_AREA_START, USER_AREA_END, &ids)));
total++;
}

View File

@@ -79,7 +79,7 @@ void init_physmem(struct initdata *initdata, struct membank *membank)
/* Initialise the page array */
for (int i = 0; i < npages; i++) {
INIT_LIST_HEAD(&membank[0].page_array[i].list);
link_init(&membank[0].page_array[i].list);
/* Set use counts for pages the kernel has already used up */
if (!(pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))

View File

@@ -28,8 +28,8 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
if (!(vma = kzalloc(sizeof(struct vm_area))))
return 0;
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->vm_obj_list);
link_init(&vma->list);
link_init(&vma->vm_obj_list);
vma->pfn_start = pfn_start;
vma->pfn_end = pfn_start + npages;
@@ -45,19 +45,19 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
* The new vma is assumed to have been correctly set up not to intersect
* with any other existing vma.
*/
int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
int task_insert_vma(struct vm_area *this, struct link *vma_list)
{
struct vm_area *before, *after;
/* Add if list is empty */
if (list_empty(vma_list)) {
list_add_tail(&this->list, vma_list);
list_insert_tail(&this->list, vma_list);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, vma_list, list) {
after = list_entry(before->list.next, struct vm_area, list);
list_foreach_struct(before, vma_list, list) {
after = link_to_struct(before->list.next, struct vm_area, list);
/* If there's only one in list */
if (before->list.next == vma_list) {
@@ -69,10 +69,10 @@ int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
/* Add as next if greater */
if (this->pfn_start > before->pfn_start)
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->pfn_start < before->pfn_start)
list_add_tail(&this->list, &before->list);
list_insert_tail(&this->list, &before->list);
else
BUG();
@@ -90,7 +90,7 @@ int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
BUG_ON(set_intersection(this->pfn_start, this->pfn_end,
after->pfn_start,
after->pfn_end));
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
return 0;
}
@@ -122,7 +122,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
return task->start;
/* First vma to check our range against */
vma = list_entry(task->vm_area_head->list.next, struct vm_area, list);
vma = link_to_struct(task->vm_area_head->list.next, struct vm_area, list);
/* Start searching from task's end of data to start of stack */
while (pfn_end <= __pfn(task->end)) {
@@ -147,7 +147,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
}
/* Otherwise get next vma entry */
vma = list_entry(vma->list.next,
vma = link_to_struct(vma->list.next,
struct vm_area, list);
continue;
}
@@ -282,7 +282,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
vm_link_object(vmo_link, &mapfile->vm_obj);
/* Add link to vma list */
list_add_tail(&vmo_link->list, &new->vm_obj_list);
list_insert_tail(&vmo_link->list, &new->vm_obj_list);
/*
* If the file is a shm file, also map devzero behind it. i.e.
@@ -304,7 +304,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
return PTR_ERR(-ENOMEM);
}
vm_link_object(vmo_link2, &dzero->vm_obj);
list_add_tail(&vmo_link2->list, &new->vm_obj_list);
list_insert_tail(&vmo_link2->list, &new->vm_obj_list);
}
/* Finished initialising the vma, add it to task */

View File

@@ -43,7 +43,7 @@ int vma_split(struct vm_area *vma, struct tcb *task,
vma_copy_links(new, vma);
/* Add new one next to original vma */
list_add_tail(&new->list, &vma->list);
list_insert_tail(&new->list, &vma->list);
/* Unmap the removed portion */
BUG_ON(l4_unmap((void *)__pfn_to_addr(unmap_start),
@@ -102,7 +102,7 @@ int vma_destroy_single(struct tcb *task, struct vm_area *vma)
vma->pfn_end - vma->pfn_start, task->tid);
/* Unlink and delete vma */
list_del(&vma->list);
list_remove(&vma->list);
kfree(vma);
return 0;
@@ -149,7 +149,7 @@ int vma_flush_pages(struct vm_area *vma)
* could only be a single VM_SHARED file-backed object in the chain.
*/
BUG_ON(list_empty(&vma->list));
vmo_link = list_entry(vma->vm_obj_list.next, struct vm_obj_link, list);
vmo_link = link_to_struct(vma->vm_obj_list.next, struct vm_obj_link, list);
vmo = vmo_link->obj;
/* Only dirty objects would need flushing */
@@ -187,7 +187,7 @@ int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages)
struct vm_area *vma, *n;
int err;
list_for_each_entry_safe(vma, n, &task->vm_area_head->list, list) {
list_foreach_removable_struct(vma, n, &task->vm_area_head->list, list) {
/* Check for intersection */
if (set_intersection(munmap_start, munmap_end,
vma->pfn_start, vma->pfn_end)) {

View File

@@ -21,7 +21,7 @@ struct page *page_init(struct page *page)
memset(page, 0, sizeof(*page));
page->refcnt = -1;
spin_lock_init(&page->lock);
INIT_LIST_HEAD(&page->list);
link_init(&page->list);
return page;
@@ -30,7 +30,7 @@ struct page *find_page(struct vm_object *obj, unsigned long pfn)
{
struct page *p;
list_for_each_entry(p, &obj->page_cache, list)
list_foreach_struct(p, &obj->page_cache, list)
if (p->offset == pfn)
return p;
@@ -46,8 +46,8 @@ int default_release_pages(struct vm_object *vm_obj)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
list_del_init(&p->list);
list_foreach_removable_struct(p, n, &vm_obj->page_cache, list) {
list_remove_init(&p->list);
BUG_ON(p->refcnt);
/* Reinitialise the page */
@@ -219,8 +219,8 @@ int bootfile_release_pages(struct vm_object *vm_obj)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
list_del(&p->list);
list_foreach_removable_struct(p, n, &vm_obj->page_cache, list) {
list_remove(&p->list);
BUG_ON(p->refcnt);
/* Reinitialise the page */
@@ -295,7 +295,7 @@ int init_boot_files(struct initdata *initdata)
struct vm_file *boot_file;
struct svc_image *img;
INIT_LIST_HEAD(&initdata->boot_file_list);
link_init(&initdata->boot_file_list);
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
@@ -311,7 +311,7 @@ int init_boot_files(struct initdata *initdata)
boot_file->vm_obj.pager = &bootfile_pager;
/* Add the file to initdata's bootfile list */
list_add_tail(&boot_file->list, &initdata->boot_file_list);
list_insert_tail(&boot_file->list, &initdata->boot_file_list);
}
return 0;
@@ -345,7 +345,7 @@ struct vm_file *get_devzero(void)
{
struct vm_file *f;
list_for_each_entry(f, &global_vm_files.list, list)
list_foreach_struct(f, &global_vm_files.list, list)
if (f->type == VM_FILE_DEVZERO)
return f;
return 0;

View File

@@ -130,7 +130,7 @@ void *sys_shmat(struct tcb *task, l4id_t shmid, void *shmaddr, int shmflg)
{
struct vm_file *shm_file, *n;
list_for_each_entry_safe(shm_file, n, &global_vm_files.list, list) {
list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list) {
if (shm_file->type == VM_FILE_SHM &&
shm_file_to_desc(shm_file)->shmid == shmid)
return do_shmat(shm_file, shmaddr,
@@ -156,7 +156,7 @@ int sys_shmdt(struct tcb *task, const void *shmaddr)
{
struct vm_file *shm_file, *n;
list_for_each_entry_safe(shm_file, n, &global_vm_files.list, list)
list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list)
if (shm_file->type == VM_FILE_SHM &&
shm_file_to_desc(shm_file)->shm_addr == shmaddr)
return do_shmdt(task, shm_file);
@@ -235,7 +235,7 @@ void *shmat_shmget_internal(struct tcb *task, key_t key, void *shmaddr)
struct vm_file *shm_file;
struct shm_descriptor *shm_desc;
list_for_each_entry(shm_file, &global_vm_files.list, list) {
list_foreach_struct(shm_file, &global_vm_files.list, list) {
if(shm_file->type == VM_FILE_SHM) {
shm_desc = shm_file_to_desc(shm_file);
/* Found the key, shmat that area */
@@ -274,7 +274,7 @@ int sys_shmget(key_t key, int size, int shmflg)
return shm_file_to_desc(shm)->shmid;
}
list_for_each_entry(shm, &global_vm_files.list, list) {
list_foreach_struct(shm, &global_vm_files.list, list) {
if (shm->type != VM_FILE_SHM)
continue;

View File

@@ -45,7 +45,7 @@ void print_tasks(void)
{
struct tcb *task;
printf("Tasks:\n========\n");
list_for_each_entry(task, &global_tasks.list, list) {
list_foreach_struct(task, &global_tasks.list, list) {
printf("Task tid: %d, spid: %d\n", task->tid, task->spid);
}
}
@@ -53,14 +53,14 @@ void print_tasks(void)
void global_add_task(struct tcb *task)
{
BUG_ON(!list_empty(&task->list));
list_add_tail(&task->list, &global_tasks.list);
list_insert_tail(&task->list, &global_tasks.list);
global_tasks.total++;
}
void global_remove_task(struct tcb *task)
{
BUG_ON(list_empty(&task->list));
list_del_init(&task->list);
list_remove_init(&task->list);
BUG_ON(--global_tasks.total < 0);
}
@@ -68,7 +68,7 @@ struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &global_tasks.list, list)
list_foreach_struct(t, &global_tasks.list, list)
if (t->tid == tid)
return t;
return 0;
@@ -89,7 +89,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
return PTR_ERR(-ENOMEM);
}
task->vm_area_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->vm_area_head->list);
link_init(&task->vm_area_head->list);
/* Also allocate a utcb head for new address space */
if (!(task->utcb_head =
@@ -99,7 +99,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
return PTR_ERR(-ENOMEM);
}
task->utcb_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->utcb_head->list);
link_init(&task->utcb_head->list);
}
/* Allocate file structures if not shared */
@@ -120,9 +120,9 @@ struct tcb *tcb_alloc_init(unsigned int flags)
task->tgid = TASK_ID_INVALID;
/* Initialise list structure */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->child_ref);
INIT_LIST_HEAD(&task->children);
link_init(&task->list);
link_init(&task->child_ref);
link_init(&task->children);
return task;
}
@@ -180,15 +180,15 @@ int tcb_destroy(struct tcb *task)
* All children of the current task becomes children
* of the parent of this task.
*/
list_for_each_entry_safe(child, n, &task->children,
list_foreach_removable_struct(child, n, &task->children,
child_ref) {
list_del_init(&child->child_ref);
list_add_tail(&child->child_ref,
list_remove_init(&child->child_ref);
list_insert_tail(&child->child_ref,
&task->parent->children);
child->parent = task->parent;
}
/* The task is not a child of its parent */
list_del_init(&task->child_ref);
list_remove_init(&task->child_ref);
/* Now task deletion make sure task is in no list */
BUG_ON(!list_empty(&task->list));
@@ -209,7 +209,7 @@ int task_copy_vmas(struct tcb *to, struct tcb *from)
{
struct vm_area *vma, *new_vma;
list_for_each_entry(vma, &from->vm_area_head->list, list) {
list_foreach_struct(vma, &from->vm_area_head->list, list) {
/* Create a new vma */
new_vma = vma_new(vma->pfn_start, vma->pfn_end - vma->pfn_start,
@@ -233,12 +233,12 @@ int task_release_vmas(struct task_vma_head *vma_head)
{
struct vm_area *vma, *n;
list_for_each_entry_safe(vma, n, &vma_head->list, list) {
list_foreach_removable_struct(vma, n, &vma_head->list, list) {
/* Release all links */
vma_drop_merge_delete_all(vma);
/* Delete the vma from task's vma list */
list_del(&vma->list);
list_remove(&vma->list);
/* Free the vma */
kfree(vma);
@@ -358,11 +358,11 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
* On these conditions child shares
* the parent of the caller
*/
list_add_tail(&task->child_ref,
list_insert_tail(&task->child_ref,
&parent->parent->children);
task->parent = parent->parent;
} else {
list_add_tail(&task->child_ref,
list_insert_tail(&task->child_ref,
&parent->children);
task->parent = parent;
}
@@ -370,7 +370,7 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
struct tcb *pager = find_task(PAGER_TID);
/* All parentless tasks are children of the pager */
list_add_tail(&task->child_ref, &pager->children);
list_insert_tail(&task->child_ref, &pager->children);
task->parent = pager;
}
@@ -674,7 +674,7 @@ int vfs_send_task_data(struct tcb *vfs)
tdata_head->total = global_tasks.total;
/* Write per-task data for all tasks */
list_for_each_entry(t, &global_tasks.list, list) {
list_foreach_struct(t, &global_tasks.list, list) {
tdata_head->tdata[li].tid = t->tid;
tdata_head->tdata[li].shpage_address = (unsigned long)t->shared_page;
li++;
@@ -697,7 +697,7 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f)
{
struct vm_area *vma;
list_for_each_entry(vma, &task->vm_area_head->list, list) {
list_foreach_struct(vma, &task->vm_area_head->list, list) {
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
VM_READ | VM_WRITE) < 0);

View File

@@ -30,7 +30,7 @@ int vm_object_test_link_count(struct vm_object *vmo)
int links = 0;
struct vm_obj_link *l;
list_for_each_entry(l, &vmo->link_list, linkref)
list_foreach_struct(l, &vmo->link_list, linkref)
links++;
BUG_ON(links != vmo->nlinks);
@@ -42,7 +42,7 @@ int vm_object_test_shadow_count(struct vm_object *vmo)
struct vm_object *sh;
int shadows = 0;
list_for_each_entry(sh, &vmo->shdw_list, shref)
list_foreach_struct(sh, &vmo->shdw_list, shref)
shadows++;
BUG_ON(shadows != vmo->shadows);
@@ -64,7 +64,7 @@ int mm0_test_global_vm_integrity(void)
memset(&vmstat, 0, sizeof(vmstat));
/* Count all shadow and file objects */
list_for_each_entry(vmo, &global_vm_objects.list, list) {
list_foreach_struct(vmo, &global_vm_objects.list, list) {
vmstat.shadows_referred += vmo->shadows;
if (vmo->flags & VM_OBJ_SHADOW)
vmstat.shadow_objects++;
@@ -76,7 +76,7 @@ int mm0_test_global_vm_integrity(void)
}
/* Count all registered vmfiles */
list_for_each_entry(f, &global_vm_files.list, list) {
list_foreach_struct(f, &global_vm_files.list, list) {
vmstat.vm_files++;
if (f->type == VM_FILE_SHM)
vmstat.shm_files++;
@@ -116,7 +116,7 @@ int mm0_test_global_vm_integrity(void)
BUG_ON(vmstat.shadow_objects != vmstat.shadows_referred);
/* Count all tasks */
list_for_each_entry(task, &global_tasks.list, list)
list_foreach_struct(task, &global_tasks.list, list)
vmstat.tasks++;
if (vmstat.tasks != global_tasks.total) {

View File

@@ -67,7 +67,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
if (!(d = kzalloc(sizeof(*d))))
return 0;
INIT_LIST_HEAD(&d->list);
link_init(&d->list);
/* We currently assume UTCB is smaller than PAGE_SIZE */
BUG_ON(UTCB_SIZE > PAGE_SIZE);
@@ -80,7 +80,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
d->utcb_base = (unsigned long)utcb_new_address(1);
/* Add descriptor to tcb's chain */
list_add(&d->list, &task->utcb_head->list);
list_insert(&d->list, &task->utcb_head->list);
/* Obtain and return first slot */
return utcb_new_slot(d);
@@ -89,7 +89,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
int task_delete_utcb_desc(struct tcb *task, struct utcb_desc *d)
{
/* Unlink desc from its list */
list_del_init(&d->list);
list_remove_init(&d->list);
/* Unmap the descriptor region */
do_munmap(task, d->utcb_base, 1);
@@ -104,7 +104,7 @@ int task_delete_utcb_desc(struct tcb *task, struct utcb_desc *d)
}
/*
* Upon fork, the utcb descriptor list is replaced by a new one, since it is a new
* Upon fork, the utcb descriptor list is origaced by a new one, since it is a new
* address space. A new utcb is allocated and mmap'ed for the child task
* running in the newly created address space.
*
@@ -126,7 +126,7 @@ int task_setup_utcb(struct tcb *task)
BUG_ON(task->utcb_address);
/* Search for an empty utcb slot already allocated to this space */
list_for_each_entry(udesc, &task->utcb_head->list, list)
list_foreach_struct(udesc, &task->utcb_head->list, list)
if ((slot = utcb_new_slot(udesc)))
goto out;
@@ -163,7 +163,7 @@ int task_destroy_utcb(struct tcb *task)
// printf("UTCB: Destroying 0x%x\n", task->utcb_address);
/* Find the utcb descriptor slot first */
list_for_each_entry(udesc, &task->utcb_head->list, list) {
list_foreach_struct(udesc, &task->utcb_head->list, list) {
/* FIXME: Use variable alignment than a page */
/* Detect matching slot */
if (page_align(task->utcb_address) == udesc->utcb_base) {

View File

@@ -26,21 +26,21 @@ struct global_list global_vm_objects = {
void global_add_vm_object(struct vm_object *obj)
{
BUG_ON(!list_empty(&obj->list));
list_add(&obj->list, &global_vm_objects.list);
list_insert(&obj->list, &global_vm_objects.list);
global_vm_objects.total++;
}
void global_remove_vm_object(struct vm_object *obj)
{
BUG_ON(list_empty(&obj->list));
list_del_init(&obj->list);
list_remove_init(&obj->list);
BUG_ON(--global_vm_objects.total < 0);
}
void global_add_vm_file(struct vm_file *f)
{
BUG_ON(!list_empty(&f->list));
list_add(&f->list, &global_vm_files.list);
list_insert(&f->list, &global_vm_files.list);
global_vm_files.total++;
global_add_vm_object(&f->vm_obj);
@@ -49,7 +49,7 @@ void global_add_vm_file(struct vm_file *f)
void global_remove_vm_file(struct vm_file *f)
{
BUG_ON(list_empty(&f->list));
list_del_init(&f->list);
list_remove_init(&f->list);
BUG_ON(--global_vm_files.total < 0);
global_remove_vm_object(&f->vm_obj);
@@ -62,7 +62,7 @@ void print_cache_pages(struct vm_object *vmo)
if (!list_empty(&vmo->page_cache))
printf("Pages:\n======\n");
list_for_each_entry(p, &vmo->page_cache, list) {
list_foreach_struct(p, &vmo->page_cache, list) {
dprintf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
p->virtual, p->refcnt);
}
@@ -97,29 +97,29 @@ void vm_object_print(struct vm_object *vmo)
// printf("\n");
}
void vm_print_files(struct list_head *files)
void vm_print_files(struct link *files)
{
struct vm_file *f;
list_for_each_entry(f, files, list)
list_foreach_struct(f, files, list)
vm_object_print(&f->vm_obj);
}
void vm_print_objects(struct list_head *objects)
void vm_print_objects(struct link *objects)
{
struct vm_object *vmo;
list_for_each_entry(vmo, objects, list)
list_foreach_struct(vmo, objects, list)
vm_object_print(vmo);
}
struct vm_object *vm_object_init(struct vm_object *obj)
{
INIT_LIST_HEAD(&obj->list);
INIT_LIST_HEAD(&obj->shref);
INIT_LIST_HEAD(&obj->shdw_list);
INIT_LIST_HEAD(&obj->page_cache);
INIT_LIST_HEAD(&obj->link_list);
link_init(&obj->list);
link_init(&obj->shref);
link_init(&obj->shdw_list);
link_init(&obj->page_cache);
link_init(&obj->link_list);
return obj;
}
@@ -142,7 +142,7 @@ struct vm_file *vm_file_create(void)
if (!(f = kzalloc(sizeof(*f))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&f->list);
link_init(&f->list);
vm_object_init(&f->vm_obj);
f->vm_obj.flags = VM_OBJ_FILE;