Removed linux linked list dependency.

This commit is contained in:
Bahadir Balban
2009-06-02 13:19:17 +03:00
parent 4757f46f71
commit 276b4643c6
69 changed files with 455 additions and 885 deletions

View File

@@ -27,7 +27,7 @@
/* A simple page table with a reference count */
struct address_space {
l4id_t spid;
struct list_head list;
struct link list;
struct mutex lock;
pgd_table_t *pgd;
int ktcb_refs;

View File

@@ -52,7 +52,7 @@ struct ktcb {
syscall_context_t *syscall_regs;
/* Runqueue related */
struct list_head rq_list;
struct link rq_list;
struct runqueue *rq;
/* Thread information */
@@ -74,7 +74,7 @@ struct ktcb {
u32 ts_need_resched; /* Scheduling flag */
enum task_state state;
struct list_head task_list; /* Global task list. */
struct link task_list; /* Global task list. */
/* UTCB related, see utcb.txt in docs */
unsigned long utcb_address; /* Virtual ref to task's utcb area */

View File

@@ -1,520 +1,96 @@
#ifndef __LIST_H__
#define __LIST_H__
/*
* LICENSE:
* Clever linked list implementation taken from Linux.
*/
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0xDEADBEE0)
#define LIST_POISON2 ((void *) 0xDEADBEE4)
#define L4_DEADWORD 0xDEADCCCC
/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/
struct list_head {
struct list_head *next;
struct list_head *prev;
struct link {
struct link *next;
struct link *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
static inline void INIT_LIST_HEAD(struct list_head *list)
static inline void link_init(struct link *l)
{
list->next = list;
list->prev = list;
l->next = l;
l->prev = l;
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
#define LINK_INIT(link) { &(link), &(link) }
#define LINK_DECLARE(l) \
struct link l = LINK_INIT(l)
static inline void list_insert(struct link *new, struct link *list)
{
next->prev = new;
struct link *next = list->next;
/*
* The new link goes between the
* current and next links on the list e.g.
* list -> new -> next
*/
new->next = next;
next->prev = new;
list->next = new;
new->prev = list;
}
static inline void list_insert_tail(struct link *new, struct link *list)
{
struct link *prev = list->prev;
/*
* The new link goes between the
* current and prev links on the list, e.g.
* prev -> new -> list
*/
new->next = list;
list->prev = new;
new->prev = prev;
prev->next = new;
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
static inline void list_remove(struct link *link)
{
__list_add(new, head, head->next);
}
struct link *prev = link->prev;
struct link *next = link->next;
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
next->prev = prev;
link->next = (struct link *)L4_DEADWORD;
link->prev = (struct link *)L4_DEADWORD;
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
static inline void list_remove_init(struct link *link)
{
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
struct link *prev = link->prev;
struct link *next = link->next;
prev->next = next;
next->prev = prev;
link->next = link;
link->prev = link;
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
static inline int list_empty(struct link *list)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del(list->prev, list->next);
list_add(list, head);
}
/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del(list->prev, list->next);
list_add_tail(list, head);
}
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
*
* NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
* to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
first->prev = head;
head->next = first;
last->next = at;
at->prev = last;
}
/**
* list_splice - join two lists
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(struct list_head *list, struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head);
}
/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
INIT_LIST_HEAD(list);
}
}
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; /* prefetch(pos->next), */ pos != (head); \
pos = pos->next)
/**
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*
* This variant differs from list_for_each() in that it's the
* simplest possible list iteration code, no prefetching is done.
* Use this for code that knows the list to be very short (empty
* or 1 entry) most of the time.
*/
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop counter.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
/* prefetch(pos->member.next), */ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
* list_prepare_entry - prepare a pos entry for use as a start point in
* list_for_each_entry_continue
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_struct within the struct.
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, typeof(*pos), member))
/**
* list_for_each_entry_continue - iterate over list of given type
* continuing after existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_from - iterate over list of given type
* continuing from existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_from(pos, head, member) \
for (; prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_continue - iterate over list of given type
* continuing after existing point safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_from - iterate over list of given type
* from existing point safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
* removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
#define HLIST_HEAD_INIT { .first = 0 }
#define HLIST_HEAD(name) struct hlist_head name = { .first = 0 }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = 0)
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = 0;
h->pprev = 0;
}
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
static inline int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}
static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
return list->prev == list && list->next == list;
}
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}
#define link_to_struct(link, struct_type, link_field) \
container_of(link, struct_type, link_field)
static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
#define list_foreach_struct(struct_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field); \
&struct_ptr->link_field != (link_start); \
struct_ptr = link_to_struct(struct_ptr->link_field.next, typeof(*struct_ptr), link_field))
if(next->next)
next->next->pprev = &next->next;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
/**
* hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
#define list_foreach_removable_struct(struct_ptr, temp_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field), \
temp_ptr = link_to_struct((struct_ptr)->link_field.next, typeof(*struct_ptr), link_field);\
&struct_ptr->link_field != (link_start); \
struct_ptr = temp_ptr, temp_ptr = link_to_struct(temp_ptr->link_field.next, typeof(*temp_ptr), link_field))
#endif /* __LIST_H__ */

View File

@@ -20,7 +20,7 @@
* start/end boundaries. Does not grow/shrink but you can link-list it.
*/
struct mem_cache {
struct list_head list;
struct link list;
struct mutex mutex;
int total;
int free;

View File

@@ -6,7 +6,7 @@
struct ktcb;
struct waitqueue {
struct list_head task_list;
struct link task_list;
struct ktcb *task;
};
@@ -26,13 +26,13 @@ struct waitqueue wq = { \
struct waitqueue_head {
int sleepers;
struct spinlock slock;
struct list_head task_list;
struct link task_list;
};
static inline void waitqueue_head_init(struct waitqueue_head *head)
{
memset(head, 0, sizeof(struct waitqueue_head));
INIT_LIST_HEAD(&head->task_list);
link_init(&head->task_list);
}
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
@@ -58,7 +58,7 @@ do { \
CREATE_WAITQUEUE_ON_STACK(wq, current); \
task_set_wqh(current, wqh, &wq); \
(wqh)->sleepers++; \
list_add_tail(&wq.task_list, &(wqh)->task_list);\
list_insert_tail(&wq.task_list, &(wqh)->task_list);\
/* printk("(%d) waiting...\n", current->tid);*/ \
sched_prepare_sleep(); \
spin_unlock(&(wqh)->slock); \

View File

@@ -232,7 +232,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
struct waitqueue *wq = receiver->wq;
/* Remove from waitqueue */
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqhr->sleepers--;
task_unset_wqh(receiver);
@@ -255,7 +255,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
/* The receiver is not ready and/or not expecting us */
CREATE_WAITQUEUE_ON_STACK(wq, current);
wqhs->sleepers++;
list_add_tail(&wq.task_list, &wqhs->task_list);
list_insert_tail(&wq.task_list, &wqhs->task_list);
task_set_wqh(current, wqhs, &wq);
sched_prepare_sleep();
spin_unlock(&wqhr->slock);
@@ -292,13 +292,13 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
BUG_ON(list_empty(&wqhs->task_list));
/* Look for a sender we want to receive from */
list_for_each_entry_safe(wq, n, &wqhs->task_list, task_list) {
list_foreach_removable_struct(wq, n, &wqhs->task_list, task_list) {
sleeper = wq->task;
/* Found a sender that we wanted to receive from */
if ((sleeper->tid == current->expected_sender) ||
(current->expected_sender == L4_ANYTHREAD)) {
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqhs->sleepers--;
task_unset_wqh(sleeper);
spin_unlock(&wqhr->slock);
@@ -320,7 +320,7 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
/* The sender is not ready */
CREATE_WAITQUEUE_ON_STACK(wq, current);
wqhr->sleepers++;
list_add_tail(&wq.task_list, &wqhr->task_list);
list_insert_tail(&wq.task_list, &wqhr->task_list);
task_set_wqh(current, wqhr, &wq);
sched_prepare_sleep();
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
@@ -341,7 +341,7 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
* (1) User task (client) calls ipc_sendrecv();
* (2) System task (server) calls ipc_recv() with from == ANYTHREAD.
* (3) Rendezvous occurs. Both tasks exchange mrs and leave rendezvous.
* (4,5) User task, immediately calls ipc_recv(), expecting a reply from server.
* (4,5) User task, immediately calls ipc_recv(), expecting a origy from server.
* (4,5) System task handles the request in userspace.
* (6) System task calls ipc_send() sending the return result.
* (7) Rendezvous occurs. Both tasks exchange mrs and leave rendezvous.
@@ -355,7 +355,7 @@ int ipc_sendrecv(l4id_t to, l4id_t from, unsigned int flags)
if ((ret = ipc_send(to, flags)) < 0)
return ret;
/*
* Get reply. A client would block its server
* Get origy. A client would block its server
* only very briefly between these calls.
*/
if ((ret = ipc_recv(from, flags)) < 0)

View File

@@ -18,13 +18,13 @@
struct mutex_queue {
unsigned long physical;
struct list_head list;
struct link list;
struct waitqueue_head wqh_waiters;
struct waitqueue_head wqh_wakers;
};
struct mutex_queue_head {
struct list_head list;
struct link list;
/*
* Single lock for:
@@ -42,7 +42,7 @@ struct mutex_queue_head {
void init_mutex_queue_head(void)
{
memset(&mutex_queue_head, 0, sizeof (mutex_queue_head));
INIT_LIST_HEAD(&mutex_queue_head.list);
link_init(&mutex_queue_head.list);
mutex_init(&mutex_queue_head.mutex_control_mutex);
}
void mutex_queue_head_lock()
@@ -61,7 +61,7 @@ void mutex_queue_init(struct mutex_queue *mq, unsigned long physical)
/* This is the unique key that describes this mutex */
mq->physical = physical;
INIT_LIST_HEAD(&mq->list);
link_init(&mq->list);
waitqueue_head_init(&mq->wqh_wakers);
waitqueue_head_init(&mq->wqh_waiters);
}
@@ -70,13 +70,13 @@ void mutex_control_add(struct mutex_queue *mq)
{
BUG_ON(!list_empty(&mq->list));
list_add(&mq->list, &mutex_queue_head.list);
list_insert(&mq->list, &mutex_queue_head.list);
mutex_queue_head.count++;
}
void mutex_control_remove(struct mutex_queue *mq)
{
list_del_init(&mq->list);
list_remove_init(&mq->list);
mutex_queue_head.count--;
}
@@ -86,7 +86,7 @@ struct mutex_queue *mutex_control_find(unsigned long mutex_physical)
struct mutex_queue *mutex_queue;
/* Find the mutex queue with this key */
list_for_each_entry(mutex_queue, &mutex_queue_head.list, list)
list_foreach_struct(mutex_queue, &mutex_queue_head.list, list)
if (mutex_queue->physical == mutex_physical)
return mutex_queue;

View File

@@ -13,7 +13,7 @@
* For lazy mm switching, a list of newly created mappings that are common to
* all tasks (e.g. any mapping done in the kernel) can be kept here so that when
* a new task is scheduled, the same mappings are copied to its page tables as
* well. struct list_head new_mappings;
* well. struct link new_mappings;
*/
int sys_map(syscall_context_t *regs)

View File

@@ -616,7 +616,7 @@ void relocate_page_tables(void)
* Useful for upgrading to page-grained control over a section mapping:
* Remaps a section mapping in pages. It allocates a pmd, (at all times because
* there can't really be an already existing pmd for a section mapping) fills
* in the page information, and replaces the direct section physical translation
* in the page information, and origaces the direct section physical translation
* with the address of the pmd. Flushes the caches/tlbs.
*/
void remap_as_pages(void *vstart, void *vend)

View File

@@ -12,7 +12,7 @@
#define KMALLOC_POOLS_MAX 5
struct kmalloc_pool_head {
struct list_head cache_list;
struct link cache_list;
int occupied;
int total_caches;
int cache_size;
@@ -28,7 +28,7 @@ struct kmalloc_mempool km_pool;
void init_kmalloc()
{
for (int i = 0; i < KMALLOC_POOLS_MAX; i++) {
INIT_LIST_HEAD(&km_pool.pool_head[i].cache_list);
link_init(&km_pool.pool_head[i].cache_list);
km_pool.pool_head[i].occupied = 0;
km_pool.pool_head[i].total_caches = 0;
km_pool.pool_head[i].cache_size = 0;
@@ -58,7 +58,7 @@ void *__kmalloc(int size)
* Found the pool, now see if any
* cache has available slots
*/
list_for_each_entry(cache, &km_pool.pool_head[i].cache_list,
list_foreach_struct(cache, &km_pool.pool_head[i].cache_list,
list) {
if (cache->free)
return mem_cache_alloc(cache);
@@ -89,7 +89,7 @@ void *__kmalloc(int size)
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
size, 0)));
// printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
list_add(&cache->list, &km_pool.pool_head[index].cache_list);
list_insert(&cache->list, &km_pool.pool_head[index].cache_list);
km_pool.pool_head[index].occupied = 1;
km_pool.pool_head[index].total_caches++;
km_pool.pool_head[index].cache_size = size;
@@ -115,13 +115,13 @@ int __kfree(void *p)
struct mem_cache *cache, *tmp;
for (int i = 0; i < km_pool.total; i++)
list_for_each_entry_safe(cache, tmp,
list_foreach_removable_struct(cache, tmp,
&km_pool.pool_head[i].cache_list,
list) {
if (!mem_cache_free(cache, p)) {
if (mem_cache_is_empty(cache)) {
km_pool.pool_head[i].total_caches--;
list_del(&cache->list);
list_remove(&cache->list);
free_page(cache);
/*
* Total remains the same but slot

View File

@@ -28,15 +28,15 @@
/* Covers 3 main types of memory needed by the kernel. */
struct pgalloc {
struct list_head cache_list[3];
struct link cache_list[3];
};
static struct pgalloc pgalloc;
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
{
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
list_add(&cache->list, &pgalloc.cache_list[cidx]);
list_insert(&cache->list, &pgalloc.cache_list[cidx]);
}
void print_kmem_grant_params(grant_kmem_usage_t *params)
@@ -108,7 +108,7 @@ void init_pgalloc(void)
int initial_grant = PGALLOC_INIT_GRANT;
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
INIT_LIST_HEAD(&pgalloc.cache_list[i]);
link_init(&pgalloc.cache_list[i]);
/* Grant ourselves with an initial chunk of physical memory */
physmem.free_cur = page_align_up(physmem.free_cur);
@@ -122,14 +122,14 @@ void init_pgalloc(void)
void pgalloc_remove_cache(struct mem_cache *cache)
{
list_del_init(&cache->list);
list_remove_init(&cache->list);
}
static inline void *pgalloc_from_cache(int cidx)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_total_empty(cache))
return mem_cache_zalloc(cache);
return 0;
@@ -139,7 +139,7 @@ int kfree_to_cache(int cidx, void *virtual)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_free(cache, virtual) == 0)
return 0;
return -1;

View File

@@ -26,7 +26,7 @@
/* A basic runqueue */
struct runqueue {
struct spinlock lock; /* Lock */
struct list_head task_list; /* List of tasks in rq */
struct link task_list; /* List of tasks in rq */
unsigned int total; /* Total tasks */
};
@@ -101,7 +101,7 @@ void sched_init_runqueues(void)
{
for (int i = 0; i < SCHED_RQ_TOTAL; i++) {
memset(&sched_rq[i], 0, sizeof(struct runqueue));
INIT_LIST_HEAD(&sched_rq[i].task_list);
link_init(&sched_rq[i].task_list);
spin_lock_init(&sched_rq[i].lock);
}
@@ -135,9 +135,9 @@ static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
sched_lock_runqueues();
if (front)
list_add(&task->rq_list, &rq->task_list);
list_insert(&task->rq_list, &rq->task_list);
else
list_add_tail(&task->rq_list, &rq->task_list);
list_insert_tail(&task->rq_list, &rq->task_list);
rq->total++;
task->rq = rq;
sched_unlock_runqueues();
@@ -156,7 +156,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
*/
rq = task->rq;
BUG_ON(list_empty(&task->rq_list));
list_del_init(&task->rq_list);
list_remove_init(&task->rq_list);
task->rq = 0;
rq->total--;
@@ -167,7 +167,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
void sched_init_task(struct ktcb *task, int prio)
{
INIT_LIST_HEAD(&task->rq_list);
link_init(&task->rq_list);
task->priority = prio;
task->ticks_left = 0;
task->state = TASK_INACTIVE;
@@ -345,12 +345,12 @@ void schedule()
/* Determine the next task to be run */
if (rq_runnable->total > 0) {
next = list_entry(rq_runnable->task_list.next,
next = link_to_struct(rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
if (rq_expired->total > 0) {
sched_rq_swap_runqueues();
next = list_entry(rq_runnable->task_list.next,
next = link_to_struct(rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
printk("Idle task.\n");

View File

@@ -16,7 +16,7 @@
#include <l4/lib/idpool.h>
struct address_space_list {
struct list_head list;
struct link list;
/* Lock for list add/removal */
struct spinlock list_lock;
@@ -34,7 +34,7 @@ void init_address_space_list(void)
mutex_init(&address_space_list.ref_lock);
spin_lock_init(&address_space_list.list_lock);
INIT_LIST_HEAD(&address_space_list.list);
link_init(&address_space_list.list);
}
void address_space_reference_lock()
@@ -58,7 +58,7 @@ struct address_space *address_space_find(l4id_t spid)
struct address_space *space;
spin_lock(&address_space_list.list_lock);
list_for_each_entry(space, &address_space_list.list, list) {
list_foreach_struct(space, &address_space_list.list, list) {
if (space->spid == spid) {
spin_unlock(&address_space_list.list_lock);
return space;
@@ -72,7 +72,7 @@ void address_space_add(struct address_space *space)
{
spin_lock(&address_space_list.list_lock);
BUG_ON(!list_empty(&space->list));
list_add(&space->list, &address_space_list.list);
list_insert(&space->list, &address_space_list.list);
BUG_ON(!++address_space_list.count);
spin_unlock(&address_space_list.list_lock);
}
@@ -82,7 +82,7 @@ void address_space_remove(struct address_space *space)
spin_lock(&address_space_list.list_lock);
BUG_ON(list_empty(&space->list));
BUG_ON(--address_space_list.count < 0);
list_del_init(&space->list);
list_remove_init(&space->list);
spin_unlock(&address_space_list.list_lock);
}
@@ -118,7 +118,7 @@ struct address_space *address_space_create(struct address_space *orig)
}
/* Initialize space structure */
INIT_LIST_HEAD(&space->list);
link_init(&space->list);
mutex_init(&space->lock);
space->pgd = pgd;

View File

@@ -21,7 +21,7 @@ struct id_pool *space_id_pool;
/* Hash table for all existing tasks */
struct ktcb_list {
struct list_head list;
struct link list;
struct spinlock list_lock;
int count;
};
@@ -32,12 +32,12 @@ void init_ktcb_list(void)
{
memset(&ktcb_list, 0, sizeof(ktcb_list));
spin_lock_init(&ktcb_list.list_lock);
INIT_LIST_HEAD(&ktcb_list.list);
link_init(&ktcb_list.list);
}
void tcb_init(struct ktcb *new)
{
INIT_LIST_HEAD(&new->task_list);
link_init(&new->task_list);
mutex_init(&new->thread_control_lock);
/* Initialise task's scheduling state and parameters. */
@@ -107,7 +107,7 @@ struct ktcb *tcb_find_by_space(l4id_t spid)
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
list_foreach_struct(task, &ktcb_list.list, task_list) {
if (task->space->spid == spid) {
spin_unlock(&ktcb_list.list_lock);
return task;
@@ -122,7 +122,7 @@ struct ktcb *tcb_find(l4id_t tid)
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
list_foreach_struct(task, &ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&ktcb_list.list_lock);
return task;
@@ -137,7 +137,7 @@ void tcb_add(struct ktcb *new)
spin_lock(&ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++ktcb_list.count);
list_add(&new->task_list, &ktcb_list.list);
list_insert(&new->task_list, &ktcb_list.list);
spin_unlock(&ktcb_list.list_lock);
}
@@ -146,7 +146,7 @@ void tcb_remove(struct ktcb *new)
spin_lock(&ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list));
BUG_ON(--ktcb_list.count < 0);
list_del_init(&new->task_list);
list_remove_init(&new->task_list);
spin_unlock(&ktcb_list.list_lock);
}

View File

@@ -5,7 +5,7 @@
void print_page_area_list(struct page_area *p)
{
struct page_area *current_item = p;
struct list_head *begin = &p->list;
struct link *begin = &p->list;
if (!current_item) {
printf("%-20s\n", "Null list.");
return;
@@ -18,7 +18,7 @@ void print_page_area_list(struct page_area *p)
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Number of pages:", current_item->numpages);
list_for_each_entry (current_item, begin, list) {
list_foreach_struct (current_item, begin, list) {
printf("%-20s\n%-20s\n", "Page area:","-------------------------");
printf("%-20s %d\n", "Index:", current_item->index);
printf("%-20s %d\n", "Used:", current_item->used);
@@ -38,7 +38,7 @@ void print_subpage_area(struct subpage_area *s)
void print_subpage_area_list(struct subpage_area *s)
{
struct subpage_area *current_item = s;
struct list_head *begin = &s->list;
struct link *begin = &s->list;
if (!current_item) {
printf("Null list.\n");
return;
@@ -52,7 +52,7 @@ void print_subpage_area_list(struct subpage_area *s)
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Head_of_pages:", current_item->head_of_pages);
list_for_each_entry (current_item, begin, list) {
list_foreach_struct (current_item, begin, list) {
print_subpage_area(current_item);
}
}

View File

@@ -40,7 +40,7 @@ void print_areas(struct page_area *ar)
return;
}
print_page_area(cur, areano++);
list_for_each_entry(cur, &ar->list, list) {
list_foreach_struct(cur, &ar->list, list) {
print_page_area(cur, areano++);
}
return;
@@ -64,7 +64,7 @@ void print_caches(struct mem_cache *c)
return;
}
print_cache(cur, caches++);
list_for_each_entry(cur, &c->list, list) {
list_foreach_struct(cur, &c->list, list) {
print_cache(cur, caches++);
}
return;

View File

@@ -159,7 +159,7 @@ struct mem_cache *mem_cache_init(void *start,
area_start = addr_aligned;
}
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;

View File

@@ -37,8 +37,8 @@ void sem_up(struct mutex *mutex)
/* Each producer wakes one consumer in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
list_foreach_struct(wq, &mutex->wq.task_list, task_list) {
list_remove_init(&wq->task_list);
spin_unlock(&mutex->slock);
sleeper = wq->task;
printk("(%d) Waking up consumer (%d)\n", current->tid,
@@ -48,8 +48,8 @@ void sem_up(struct mutex *mutex)
}
} else if (cnt > 0) {
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
link_init(&wq.task_list);
list_insert_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_prepare_sleep();
printk("(%d) produced, now sleeping...\n", current->tid);
@@ -75,8 +75,8 @@ void sem_down(struct mutex *mutex)
/* Each consumer wakes one producer in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
list_foreach_struct(wq, &mutex->wq.task_list, task_list) {
list_remove_init(&wq->task_list);
spin_unlock(&mutex->slock);
sleeper = wq->task;
printk("(%d) Waking up producer (%d)\n", current->tid,
@@ -86,8 +86,8 @@ void sem_down(struct mutex *mutex)
}
} else if (cnt < 0) {
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
link_init(&wq.task_list);
list_insert_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_prepare_sleep();
printk("(%d) Waiting to consume, now sleeping...\n", current->tid);
@@ -122,7 +122,7 @@ int mutex_lock(struct mutex *mutex)
if (!__mutex_lock(&mutex->lock)) { /* Could not lock, sleep. */
CREATE_WAITQUEUE_ON_STACK(wq, current);
task_set_wqh(current, &mutex->wqh, &wq);
list_add_tail(&wq.task_list, &mutex->wqh.task_list);
list_insert_tail(&wq.task_list, &mutex->wqh.task_list);
mutex->wqh.sleepers++;
sched_prepare_sleep();
spin_unlock(&mutex->wqh.slock);
@@ -151,14 +151,14 @@ void mutex_unlock(struct mutex *mutex)
BUG_ON(current->nlocks < 0);
BUG_ON(mutex->wqh.sleepers < 0);
if (mutex->wqh.sleepers > 0) {
struct waitqueue *wq = list_entry(mutex->wqh.task_list.next,
struct waitqueue *wq = link_to_struct(mutex->wqh.task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
task_unset_wqh(sleeper);
BUG_ON(list_empty(&mutex->wqh.task_list));
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
mutex->wqh.sleepers--;
spin_unlock(&mutex->wqh.slock);

View File

@@ -62,7 +62,7 @@ int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq)
{
spin_lock(&wqh->slock);
wqh->sleepers++;
list_add_tail(&wq->task_list, &wqh->task_list);
list_insert_tail(&wq->task_list, &wqh->task_list);
task_set_wqh(current, wqh, wq);
sched_prepare_sleep();
//printk("(%d) waiting on wqh at: 0x%p\n",
@@ -78,7 +78,7 @@ int wait_on(struct waitqueue_head *wqh)
CREATE_WAITQUEUE_ON_STACK(wq, current);
spin_lock(&wqh->slock);
wqh->sleepers++;
list_add_tail(&wq.task_list, &wqh->task_list);
list_insert_tail(&wq.task_list, &wqh->task_list);
task_set_wqh(current, wqh, &wq);
sched_prepare_sleep();
//printk("(%d) waiting on wqh at: 0x%p\n",
@@ -101,13 +101,13 @@ void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
while (wqh->sleepers > 0) {
struct waitqueue *wq = list_entry(wqh->task_list.next,
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
task_unset_wqh(sleeper);
BUG_ON(list_empty(&wqh->task_list));
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqh->sleepers--;
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
@@ -128,12 +128,12 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
if (wqh->sleepers > 0) {
struct waitqueue *wq = list_entry(wqh->task_list.next,
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
BUG_ON(list_empty(&wqh->task_list));
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqh->sleepers--;
task_unset_wqh(sleeper);
if (flags & WAKEUP_INTERRUPT)
@@ -193,7 +193,7 @@ int wake_up_task(struct ktcb *task, unsigned int flags)
}
/* Now we can remove the task from its waitqueue */
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqh->sleepers--;
task->waiting_on = 0;
task->wq = 0;

View File

@@ -85,10 +85,10 @@ struct dentry {
int refcnt;
char name[VFS_DNAME_MAX];
struct dentry *parent; /* Parent dentry */
struct list_head child; /* List of dentries with same parent */
struct list_head children; /* List of children dentries */
struct list_head vref; /* For vnode's dirent reference list */
struct list_head cache_list; /* Dentry cache reference */
struct link child; /* List of dentries with same parent */
struct link children; /* List of children dentries */
struct link vref; /* For vnode's dirent reference list */
struct link cache_list; /* Dentry cache reference */
struct vnode *vnode; /* The vnode associated with dentry */
struct dentry_ops ops;
};
@@ -120,8 +120,8 @@ struct vnode {
struct superblock *sb; /* Reference to superblock */
struct vnode_ops ops; /* Operations on this vnode */
struct file_ops fops; /* File-related operations on this vnode */
struct list_head dentries; /* Dirents that refer to this vnode */
struct list_head cache_list; /* For adding the vnode to vnode cache */
struct link dentries; /* Dirents that refer to this vnode */
struct link cache_list; /* For adding the vnode to vnode cache */
struct dirbuf dirbuf; /* Only directory buffers are kept */
u32 mode; /* Permissions and vnode type */
u32 owner; /* Owner */
@@ -149,15 +149,15 @@ struct file_system_type {
char name[VFS_FSNAME_MAX];
unsigned long magic;
struct fstype_ops ops;
struct list_head list; /* Member of list of all fs types */
struct list_head sblist; /* List of superblocks with this type */
struct link list; /* Member of list of all fs types */
struct link sblist; /* List of superblocks with this type */
};
struct superblock *get_superblock(void *buf);
struct superblock {
u64 fssize;
unsigned int blocksize;
struct list_head list;
struct link list;
struct file_system_type *fs;
struct superblock_ops *ops;
struct vnode *root;

View File

@@ -3,7 +3,7 @@
struct global_list {
int total;
struct list_head list;
struct link list;
};
extern struct global_list global_vm_files;

View File

@@ -66,8 +66,8 @@ struct memfs_superblock {
u64 fmaxblocks; /* Maximum number of blocks per file */
u64 fssize; /* Total size of filesystem */
unsigned long root_vnum; /* The root vnum of this superblock */
struct list_head inode_cache_list; /* Chain of alloc caches */
struct list_head block_cache_list; /* Chain of alloc caches */
struct link inode_cache_list; /* Chain of alloc caches */
struct link block_cache_list; /* Chain of alloc caches */
struct id_pool *ipool; /* Index pool for inodes */
struct id_pool *bpool; /* Index pool for blocks */
struct memfs_inode *inode[MEMFS_TOTAL_INODES]; /* Table of inodes */
@@ -88,7 +88,7 @@ extern struct file_ops memfs_file_operations;
int memfs_format_filesystem(void *buffer);
struct memfs_inode *memfs_create_inode(struct memfs_superblock *sb);
void memfs_register_fstype(struct list_head *);
void memfs_register_fstype(struct link *);
struct superblock *memfs_get_superblock(void *block);
int memfs_generate_superblock(void *block);

View File

@@ -21,12 +21,12 @@
#define VFS_STR_XATDIR "...."
struct pathdata {
struct list_head list;
struct link list;
struct vnode *vstart;
};
struct pathcomp {
struct list_head list;
struct link list;
const char *str;
};

View File

@@ -32,7 +32,7 @@ struct task_fs_data {
/* Thread control block, fs0 portion */
struct tcb {
l4id_t tid;
struct list_head list;
struct link list;
unsigned long shpage_address;
struct task_fd_head *files;
struct task_fs_data *fs_data;

View File

@@ -10,11 +10,11 @@
#include <task.h>
#include <path.h>
extern struct list_head vnode_cache;
extern struct list_head dentry_cache;
extern struct link vnode_cache;
extern struct link dentry_cache;
/*
* This is a temporary replacement for page cache support provided by mm0.
* This is a temporary origacement for page cache support provided by mm0.
* Normally mm0 tracks all vnode pages, but this is used to track pages in
* directory vnodes, which are normally never mapped by tasks.
*/
@@ -36,10 +36,10 @@ static inline struct dentry *vfs_alloc_dentry(void)
{
struct dentry *d = kzalloc(sizeof(struct dentry));
INIT_LIST_HEAD(&d->child);
INIT_LIST_HEAD(&d->children);
INIT_LIST_HEAD(&d->vref);
INIT_LIST_HEAD(&d->cache_list);
link_init(&d->child);
link_init(&d->children);
link_init(&d->vref);
link_init(&d->cache_list);
return d;
}
@@ -53,8 +53,8 @@ static inline struct vnode *vfs_alloc_vnode(void)
{
struct vnode *v = kzalloc(sizeof(struct vnode));
INIT_LIST_HEAD(&v->dentries);
INIT_LIST_HEAD(&v->cache_list);
link_init(&v->dentries);
link_init(&v->cache_list);
return v;
}
@@ -62,14 +62,14 @@ static inline struct vnode *vfs_alloc_vnode(void)
static inline void vfs_free_vnode(struct vnode *v)
{
BUG(); /* Are the dentries freed ??? */
list_del(&v->cache_list);
list_remove(&v->cache_list);
kfree(v);
}
static inline struct superblock *vfs_alloc_superblock(void)
{
struct superblock *sb = kmalloc(sizeof(struct superblock));
INIT_LIST_HEAD(&sb->list);
link_init(&sb->list);
return sb;
}

View File

@@ -78,7 +78,7 @@ void handle_fs_requests(void)
switch(tag) {
case L4_IPC_TAG_SYNC:
printf("%s: Synced with waiting thread.\n", __TASKNAME__);
return; /* No reply for this tag */
return; /* No origy for this tag */
case L4_IPC_TAG_OPEN:
ret = sys_open(sender, (void *)mr[0], (int)mr[1], (unsigned int)mr[2]);
break;

View File

@@ -12,7 +12,7 @@ struct dentry *bootfs_dentry_lookup(struct dentry *d, char *dname)
{
struct dentry *this;
list_for_each_entry(this, child, &d->children) {
list_foreach_struct(this, child, &d->children) {
if (this->compare(this, dname))
return this;
}
@@ -65,16 +65,16 @@ void bootfs_populate(struct initdata *initdata, struct superblock *sb)
d->vnode = v;
d->parent = sb->root;
strncpy(d->name, img->name, VFS_DENTRY_NAME_MAX);
INIT_LIST_HEAD(&d->child);
INIT_LIST_HEAD(&d->children);
list_add(&d->child, &sb->root->children);
link_init(&d->child);
link_init(&d->children);
list_insert(&d->child, &sb->root->children);
/* Initialise vnode for image */
v->refcnt = 0;
v->id = img->phys_start;
v->size = img->phys_end - img->phys_start;
INIT_LIST_HEAD(&v->dirents);
list_add(&d->v_ref, &v->dirents);
link_init(&v->dirents);
list_insert(&d->v_ref, &v->dirents);
/* Initialise file struct for image */
f->refcnt = 0;
@@ -93,17 +93,17 @@ void bootfs_init_root(struct dentry *r)
/* Initialise dentry for rootdir */
r->refcnt = 0;
strcpy(r->name, "");
INIT_LIST_HEAD(&r->child);
INIT_LIST_HEAD(&r->children);
INIT_LIST_HEAD(&r->vref);
link_init(&r->child);
link_init(&r->children);
link_init(&r->vref);
r->parent = r;
/* Initialise vnode for rootdir */
v->id = 0;
v->refcnt = 0;
INIT_LIST_HEAD(&v->dirents);
INIT_LIST_HEAD(&v->state_list);
list_add(&r->vref, &v->dirents);
link_init(&v->dirents);
link_init(&v->state_list);
list_insert(&r->vref, &v->dirents);
v->size = 0;
}

View File

@@ -23,7 +23,7 @@ struct file_system_type sfs_type = {
};
/* Registers sfs as an available filesystem type */
void sfs_register_fstype(struct list_head *fslist)
void sfs_register_fstype(struct link *fslist)
{
list_add(&sfs_type.list, fslist);
list_insert(&sfs_type.list, fslist);
}

View File

@@ -108,6 +108,6 @@ struct sfs_dentry {
} __attribute__ ((__packed__));
void sfs_register_type(struct list_head *);
void sfs_register_type(struct link *);
#endif /* __C0FS_LAYOUT_H__ */

View File

@@ -14,21 +14,21 @@
#include <l4/api/errno.h>
#include <memfs/memfs.h>
struct list_head fs_type_list;
struct link fs_type_list;
struct superblock *vfs_probe_filesystems(void *block)
{
struct file_system_type *fstype;
struct superblock *sb;
list_for_each_entry(fstype, &fs_type_list, list) {
list_foreach_struct(fstype, &fs_type_list, list) {
/* Does the superblock match for this fs type? */
if ((sb = fstype->ops.get_superblock(block))) {
/*
* Add this to the list of superblocks this
* fs already has.
*/
list_add(&sb->list, &fstype->sblist);
list_insert(&sb->list, &fstype->sblist);
return sb;
}
}
@@ -43,7 +43,7 @@ struct superblock *vfs_probe_filesystems(void *block)
void vfs_register_filesystems(void)
{
/* Initialise fstype list */
INIT_LIST_HEAD(&fs_type_list);
link_init(&fs_type_list);
/* Call per-fs registration functions */
memfs_register_fstype(&fs_type_list);

View File

@@ -22,7 +22,7 @@ struct vnode *lookup_dentry_children(struct dentry *parentdir,
struct vnode *v;
const char *component = pathdata_next_component(pdata);
list_for_each_entry(childdir, &parentdir->children, child)
list_foreach_struct(childdir, &parentdir->children, child)
if (IS_ERR(v = childdir->vnode->ops.lookup(childdir->vnode,
pdata, component)))
/* Means not found, continue search */
@@ -47,7 +47,7 @@ struct vnode *generic_vnode_lookup(struct vnode *thisnode,
int err;
/* Does this path component match with any of this vnode's dentries? */
list_for_each_entry(d, &thisnode->dentries, vref) {
list_foreach_struct(d, &thisnode->dentries, vref) {
if (d->ops.compare(d, component)) {
/* Is this a directory? */
if (vfs_isdir(thisnode)) {

View File

@@ -29,13 +29,13 @@ int memfs_init_caches(struct memfs_superblock *sb)
free_block = (void *)sb + sizeof(*sb);
block_cache = mem_cache_init(free_block, sb->fssize - sizeof(*sb),
sb->blocksize, 1);
list_add(&block_cache->list, &sb->block_cache_list);
list_insert(&block_cache->list, &sb->block_cache_list);
/* Allocate a block and initialise it as first inode cache */
free_block = mem_cache_alloc(block_cache);
inode_cache = mem_cache_init(free_block, sb->blocksize,
sizeof(struct memfs_inode), 0);
list_add(&inode_cache->list, &sb->inode_cache_list);
list_insert(&inode_cache->list, &sb->inode_cache_list);
return 0;
}
@@ -62,8 +62,8 @@ int memfs_format_filesystem(void *buffer)
sb->bpool = id_pool_new_init(MEMFS_TOTAL_BLOCKS);
/* Initialise bitmap allocation lists for blocks and inodes */
INIT_LIST_HEAD(&sb->block_cache_list);
INIT_LIST_HEAD(&sb->inode_cache_list);
link_init(&sb->block_cache_list);
link_init(&sb->inode_cache_list);
memfs_init_caches(sb);
return 0;
@@ -74,7 +74,7 @@ void *memfs_alloc_block(struct memfs_superblock *sb)
{
struct mem_cache *cache;
list_for_each_entry(cache, &sb->block_cache_list, list) {
list_foreach_struct(cache, &sb->block_cache_list, list) {
if (cache->free)
return mem_cache_zalloc(cache);
else
@@ -91,7 +91,7 @@ int memfs_free_block(struct memfs_superblock *sb, void *block)
{
struct mem_cache *c, *tmp;
list_for_each_entry_safe(c, tmp, &sb->block_cache_list, list)
list_foreach_removable_struct(c, tmp, &sb->block_cache_list, list)
if (!mem_cache_free(c, block))
return 0;
else
@@ -151,11 +151,11 @@ int memfs_init_rootdir(struct superblock *sb)
d->vnode = v;
/* Associate dentry with its vnode */
list_add(&d->vref, &d->vnode->dentries);
list_insert(&d->vref, &d->vnode->dentries);
/* Add both vnode and dentry to their flat caches */
list_add(&d->cache_list, &dentry_cache);
list_add(&v->cache_list, &vnode_cache);
list_insert(&d->cache_list, &dentry_cache);
list_insert(&v->cache_list, &vnode_cache);
return 0;
}
@@ -204,12 +204,12 @@ struct superblock *memfs_get_superblock(void *block)
}
/* Registers sfs as an available filesystem type */
void memfs_register_fstype(struct list_head *fslist)
void memfs_register_fstype(struct link *fslist)
{
/* Initialise superblock list for this fstype */
INIT_LIST_HEAD(&memfs_fstype.sblist);
link_init(&memfs_fstype.sblist);
/* Add this fstype to list of available fstypes. */
list_add(&memfs_fstype.list, fslist);
list_insert(&memfs_fstype.list, fslist);
}

View File

@@ -21,7 +21,7 @@ struct memfs_inode *memfs_alloc_inode(struct memfs_superblock *sb)
void *free_block;
/* Ask existing inode caches for a new inode */
list_for_each_entry(cache, &sb->inode_cache_list, list) {
list_foreach_struct(cache, &sb->inode_cache_list, list) {
if (cache->free)
if (!(i = mem_cache_zalloc(cache)))
return PTR_ERR(-ENOSPC);
@@ -38,7 +38,7 @@ struct memfs_inode *memfs_alloc_inode(struct memfs_superblock *sb)
/* Initialise it as an inode cache */
cache = mem_cache_init(free_block, sb->blocksize,
sizeof(struct memfs_inode), 0);
list_add(&cache->list, &sb->inode_cache_list);
list_insert(&cache->list, &sb->inode_cache_list);
if (!(i = mem_cache_zalloc(cache)))
return PTR_ERR(-ENOSPC);
@@ -53,13 +53,13 @@ int memfs_free_inode(struct memfs_superblock *sb, struct memfs_inode *i)
{
struct mem_cache *c, *tmp;
list_for_each_entry_safe(c, tmp, &sb->inode_cache_list, list) {
list_foreach_removable_struct(c, tmp, &sb->inode_cache_list, list) {
/* Free it, if success */
if (!mem_cache_free(c, i)) {
/* If cache completely emtpy */
if (mem_cache_is_empty(c)) {
/* Free the block, too. */
list_del(&c->list);
list_remove(&c->list);
memfs_free_block(sb, c);
}
return 0;
@@ -213,7 +213,7 @@ int memfs_write_vnode(struct superblock *sb, struct vnode *v)
struct vnode *memfs_vnode_mknod(struct vnode *v, const char *dirname,
unsigned int mode)
{
struct dentry *d, *parent = list_entry(v->dentries.next,
struct dentry *d, *parent = link_to_struct(v->dentries.next,
struct dentry, vref);
struct memfs_dentry *memfsd;
struct dentry *newd;
@@ -234,7 +234,7 @@ struct vnode *memfs_vnode_mknod(struct vnode *v, const char *dirname,
return PTR_ERR(err);
/* Check there's no existing child with same name */
list_for_each_entry(d, &parent->children, child) {
list_foreach_struct(d, &parent->children, child) {
/* Does the name exist as a child? */
if(d->ops.compare(d, dirname))
return PTR_ERR(-EEXIST);
@@ -278,14 +278,14 @@ struct vnode *memfs_vnode_mknod(struct vnode *v, const char *dirname,
strncpy(newd->name, dirname, VFS_DNAME_MAX);
/* Associate dentry with its vnode */
list_add(&newd->vref, &newd->vnode->dentries);
list_insert(&newd->vref, &newd->vnode->dentries);
/* Associate dentry with its parent */
list_add(&newd->child, &parent->children);
list_insert(&newd->child, &parent->children);
/* Add both vnode and dentry to their flat caches */
list_add(&newd->cache_list, &dentry_cache);
list_add(&newv->cache_list, &vnode_cache);
list_insert(&newd->cache_list, &dentry_cache);
list_insert(&newv->cache_list, &vnode_cache);
return newv;
}
@@ -303,7 +303,7 @@ int memfs_vnode_readdir(struct vnode *v)
{
int err;
struct memfs_dentry *memfsd;
struct dentry *parent = list_entry(v->dentries.next,
struct dentry *parent = link_to_struct(v->dentries.next,
struct dentry, vref);
/*
@@ -327,7 +327,7 @@ int memfs_vnode_readdir(struct vnode *v)
/*
* Fail if vnode size is bigger than a page. Since this allocation
* method is to be replaced, we can live with this limitation for now.
* method is to be origaced, we can live with this limitation for now.
*/
BUG_ON(v->size > PAGE_SIZE);
@@ -349,7 +349,7 @@ int memfs_vnode_readdir(struct vnode *v)
/* Initialise it */
newd->ops = generic_dentry_operations;
newd->parent = parent;
list_add(&newd->child, &parent->children);
list_insert(&newd->child, &parent->children);
/*
* Lookup the vnode for dentry by its vnode number. We call
@@ -367,7 +367,7 @@ int memfs_vnode_readdir(struct vnode *v)
}
/* Assing this dentry as a name of its vnode */
list_add(&newd->vref, &newd->vnode->dentries);
list_insert(&newd->vref, &newd->vnode->dentries);
/* Increase link count */
newv->links++;
@@ -376,8 +376,8 @@ int memfs_vnode_readdir(struct vnode *v)
memcpy(newd->name, memfsd[i].name, MEMFS_DNAME_MAX);
/* Add both vnode and dentry to their caches */
list_add(&newd->cache_list, &dentry_cache);
list_add(&newv->cache_list, &vnode_cache);
list_insert(&newd->cache_list, &dentry_cache);
list_insert(&newv->cache_list, &vnode_cache);
}
return 0;

View File

@@ -19,8 +19,8 @@ const char *pathdata_next_component(struct pathdata *pdata)
struct pathcomp *p, *n;
const char *pathstr;
list_for_each_entry_safe(p, n, &pdata->list, list) {
list_del(&p->list);
list_foreach_removable_struct(p, n, &pdata->list, list) {
list_remove(&p->list);
pathstr = p->str;
kfree(p);
return pathstr;
@@ -35,8 +35,8 @@ const char *pathdata_last_component(struct pathdata *pdata)
const char *pathstr;
if (!list_empty(&pdata->list)) {
p = list_entry(pdata->list.prev, struct pathcomp, list);
list_del(&p->list);
p = link_to_struct(pdata->list.prev, struct pathcomp, list);
list_remove(&p->list);
pathstr = p->str;
kfree(p);
return pathstr;
@@ -50,8 +50,8 @@ void pathdata_destroy(struct pathdata *p)
{
struct pathcomp *c, *n;
list_for_each_entry_safe(c, n, &p->list, list) {
list_del(&c->list);
list_foreach_removable_struct(c, n, &p->list, list) {
list_remove(&c->list);
kfree(c);
}
kfree(p);
@@ -62,7 +62,7 @@ void pathdata_print(struct pathdata *p)
struct pathcomp *comp;
printf("Extracted path is:\n");
list_for_each_entry(comp, &p->list, list)
list_foreach_struct(comp, &p->list, list)
printf("%s\n", comp->str);
}
@@ -78,7 +78,7 @@ struct pathdata *pathdata_parse(const char *pathname,
return PTR_ERR(-ENOMEM);
/* Initialise pathdata */
INIT_LIST_HEAD(&pdata->list);
link_init(&pdata->list);
strcpy(pathbuf, pathname);
/* First component is root if there's a root */
@@ -87,9 +87,9 @@ struct pathdata *pathdata_parse(const char *pathname,
kfree(pdata);
return PTR_ERR(-ENOMEM);
}
INIT_LIST_HEAD(&comp->list);
link_init(&comp->list);
comp->str = VFS_STR_ROOTDIR;
list_add_tail(&comp->list, &pdata->list);
list_insert_tail(&comp->list, &pdata->list);
if (task)
/* Lookup start vnode is root vnode */
@@ -105,15 +105,15 @@ struct pathdata *pathdata_parse(const char *pathname,
kfree(pdata);
return PTR_ERR(-ENOMEM);
}
INIT_LIST_HEAD(&comp->list);
link_init(&comp->list);
/* Get current dentry for this task */
curdir = list_entry(task->fs_data->curdir->dentries.next,
curdir = link_to_struct(task->fs_data->curdir->dentries.next,
struct dentry, vref);
/* Use its name in path component */
comp->str = curdir->name;
list_add_tail(&comp->list, &pdata->list);
list_insert_tail(&comp->list, &pdata->list);
/* Lookup start vnode is current dir vnode */
pdata->vstart = task->fs_data->curdir;
@@ -130,9 +130,9 @@ struct pathdata *pathdata_parse(const char *pathname,
pathdata_destroy(pdata);
return PTR_ERR(-ENOMEM);
}
INIT_LIST_HEAD(&comp->list);
link_init(&comp->list);
comp->str = str;
list_add_tail(&comp->list, &pdata->list);
list_insert_tail(&comp->list, &pdata->list);
}
/* Next component */

View File

@@ -55,7 +55,7 @@ int pager_sys_open(struct tcb *pager, l4id_t opener, int fd)
/*
* Write file information, they will
* be sent via the return reply.
* be sent via the return origy.
*/
write_mr(L4SYS_ARG0, v->vnum);
write_mr(L4SYS_ARG1, v->size);
@@ -89,7 +89,7 @@ int pager_open_bypath(struct tcb *pager, char *pathname)
/*
* Write file information, they will
* be sent via the return reply.
* be sent via the return origy.
*/
write_mr(L4SYS_ARG0, v->vnum);
write_mr(L4SYS_ARG1, v->size);
@@ -109,10 +109,10 @@ void print_vnode(struct vnode *v)
struct dentry *d, *c;
printf("Vnode names:\n");
list_for_each_entry(d, &v->dentries, vref) {
list_foreach_struct(d, &v->dentries, vref) {
printf("%s\n", d->name);
printf("Children dentries:\n");
list_for_each_entry(c, &d->children, child)
list_foreach_struct(c, &d->children, child)
printf("%s\n", c->name);
}
}
@@ -496,7 +496,7 @@ int sys_readdir(struct tcb *t, int fd, void *buf, int count)
if (!(v = vfs_lookup_byvnum(vfs_root.pivot->sb, vnum)))
return -EINVAL;
d = list_entry(v->dentries.next, struct dentry, vref);
d = link_to_struct(v->dentries.next, struct dentry, vref);
/* Ensure vnode is a directory */
if (!vfs_isdir(v))

View File

@@ -32,14 +32,14 @@ struct global_list global_tasks = {
void global_add_task(struct tcb *task)
{
BUG_ON(!list_empty(&task->list));
list_add_tail(&task->list, &global_tasks.list);
list_insert_tail(&task->list, &global_tasks.list);
global_tasks.total++;
}
void global_remove_task(struct tcb *task)
{
BUG_ON(list_empty(&task->list));
list_del_init(&task->list);
list_remove_init(&task->list);
BUG_ON(--global_tasks.total < 0);
}
@@ -47,7 +47,7 @@ struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &global_tasks.list, list)
list_foreach_struct(t, &global_tasks.list, list)
if (t->tid == tid)
return t;
return 0;
@@ -95,7 +95,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
task->tid = TASK_ID_INVALID;
/* Initialise list structure */
INIT_LIST_HEAD(&task->list);
link_init(&task->list);
return task;
}

View File

@@ -8,8 +8,8 @@
#include <task.h>
#include <path.h>
LIST_HEAD(vnode_cache);
LIST_HEAD(dentry_cache);
LINK_DECLARE(vnode_cache);
LINK_DECLARE(dentry_cache);
/*
* /
@@ -33,7 +33,7 @@ struct vnode *vfs_lookup_byvnum(struct superblock *sb, unsigned long vnum)
int err;
/* Check the vnode flat list by vnum */
list_for_each_entry(v, &vnode_cache, cache_list)
list_foreach_struct(v, &vnode_cache, cache_list)
if (v->vnum == vnum)
return v;
@@ -48,7 +48,7 @@ struct vnode *vfs_lookup_byvnum(struct superblock *sb, unsigned long vnum)
}
/* Add the vnode back to vnode flat list */
list_add(&v->cache_list, &vnode_cache);
list_insert(&v->cache_list, &vnode_cache);
return v;
}

View File

@@ -185,7 +185,7 @@ struct mem_cache *mem_cache_init(void *start,
area_start = addr_aligned;
}
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;

View File

@@ -17,7 +17,7 @@
* fixed-size memory cache) Keeps track of free/occupied items within its
* start/end boundaries. Does not grow/shrink but you can link-list it. */
struct mem_cache {
struct list_head list;
struct link list;
int total;
int free;
unsigned int start;

View File

@@ -26,7 +26,7 @@ static struct page_area *new_page_area(struct page_allocator *p)
struct mem_cache *cache;
struct page_area *new_area;
list_for_each_entry(cache, &p->pga_cache_list, list) {
list_foreach_struct(cache, &p->pga_cache_list, list) {
if ((new_area = mem_cache_alloc(cache)) != 0) {
new_area->cache = cache;
p->pga_free--;
@@ -45,7 +45,7 @@ get_free_page_area(int quantity, struct page_allocator *p)
if (quantity <= 0)
return 0;
list_for_each_entry(area, &p->page_area_list, list) {
list_foreach_struct(area, &p->page_area_list, list) {
/* Check for exact size match */
if (area->numpages == quantity && !area->used) {
@@ -60,8 +60,8 @@ get_free_page_area(int quantity, struct page_allocator *p)
new->pfn = area->pfn + area->numpages;
new->numpages = quantity;
new->used = 1;
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &area->list);
link_init(&new->list);
list_insert(&new->list, &area->list);
return new;
}
}
@@ -91,36 +91,36 @@ void init_page_allocator(unsigned long start, unsigned long end)
struct page_area *freemem, *area;
struct mem_cache *cache;
INIT_LIST_HEAD(&allocator.page_area_list);
INIT_LIST_HEAD(&allocator.pga_cache_list);
link_init(&allocator.page_area_list);
link_init(&allocator.pga_cache_list);
/* Initialise the first page area cache */
cache = mem_cache_init(l4_map_helper((void *)start, 1), PAGE_SIZE,
sizeof(struct page_area), 0);
list_add(&cache->list, &allocator.pga_cache_list);
list_insert(&cache->list, &allocator.pga_cache_list);
/* Initialise the first area that describes the page just allocated */
area = mem_cache_alloc(cache);
INIT_LIST_HEAD(&area->list);
link_init(&area->list);
area->pfn = __pfn(start);
area->used = 1;
area->numpages = 1;
area->cache = cache;
list_add(&area->list, &allocator.page_area_list);
list_insert(&area->list, &allocator.page_area_list);
/* Update freemem start address */
start += PAGE_SIZE;
/* Initialise first area that describes all of free physical memory */
freemem = mem_cache_alloc(cache);
INIT_LIST_HEAD(&freemem->list);
link_init(&freemem->list);
freemem->pfn = __pfn(start);
freemem->numpages = __pfn(end) - freemem->pfn;
freemem->cache = cache;
freemem->used = 0;
/* Add it as the first unused page area */
list_add(&freemem->list, &allocator.page_area_list);
list_insert(&freemem->list, &allocator.page_area_list);
/* Initialise free page area counter */
allocator.pga_free = mem_cache_total_empty(cache);
@@ -163,7 +163,7 @@ int check_page_areas(struct page_allocator *p)
* Add the new cache to available
* list of free page area caches
*/
list_add(&newcache->list, &p->pga_cache_list);
list_insert(&newcache->list, &p->pga_cache_list);
/* Unlock here */
}
return 0;
@@ -202,13 +202,13 @@ struct page_area *merge_free_areas(struct page_area *before,
BUG_ON(before == after);
before->numpages += after->numpages;
list_del(&after->list);
list_remove(&after->list);
c = after->cache;
mem_cache_free(c, after);
/* Recursively free the cache page */
if (mem_cache_is_empty(c)) {
list_del(&c->list);
list_remove(&c->list);
BUG_ON(free_page(l4_unmap_helper(c, 1)) < 0)
}
return before;
@@ -219,7 +219,7 @@ static int find_and_free_page_area(void *addr, struct page_allocator *p)
struct page_area *area, *prev, *next;
/* First find the page area to be freed. */
list_for_each_entry(area, &p->page_area_list, list)
list_foreach_struct(area, &p->page_area_list, list)
if (__pfn_to_addr(area->pfn) == (unsigned long)addr &&
area->used) { /* Found it */
area->used = 0;
@@ -230,12 +230,12 @@ static int find_and_free_page_area(void *addr, struct page_allocator *p)
found:
/* Now merge with adjacent areas, if possible */
if (area->list.prev != &p->page_area_list) {
prev = list_entry(area->list.prev, struct page_area, list);
prev = link_to_struct(area->list.prev, struct page_area, list);
if (!prev->used)
area = merge_free_areas(prev, area);
}
if (area->list.next != &p->page_area_list) {
next = list_entry(area->list.next, struct page_area, list);
next = link_to_struct(area->list.next, struct page_area, list);
if (!next->used)
area = merge_free_areas(area, next);
}

View File

@@ -6,7 +6,7 @@
/* List member to keep track of free and unused physical pages.
* Has PAGE_SIZE granularity */
struct page_area {
struct list_head list;
struct link list;
unsigned int used; /* Used or free */
unsigned int pfn; /* Base pfn */
unsigned int numpages; /* Number of pages this region covers */
@@ -15,8 +15,8 @@ struct page_area {
};
struct page_allocator {
struct list_head page_area_list;
struct list_head pga_cache_list;
struct link page_area_list;
struct link pga_cache_list;
int pga_free;
};

View File

@@ -5,7 +5,7 @@ void print_page_area_list(struct page_allocator *p)
{
struct page_area *area;
list_for_each_entry (area, &p->page_area_list, list) {
list_foreach_struct (area, &p->page_area_list, list) {
printf("%-20s\n%-20s\n", "Page area:","-------------------------");
printf("%-20s %u\n", "Pfn:", area->pfn);
printf("%-20s %d\n", "Used:", area->used);
@@ -23,11 +23,11 @@ void print_km_area(struct km_area *s)
}
void print_km_area_list(struct list_head *km_areas)
void print_km_area_list(struct link *km_areas)
{
struct km_area *area;
list_for_each_entry (area, km_areas, list)
list_foreach_struct (area, km_areas, list)
print_km_area(area);
}

View File

@@ -12,6 +12,6 @@
#endif
void print_page_area_list(struct page_allocator *p);
void print_km_area_list(struct list_head *s);
void print_km_area_list(struct link *s);
void print_km_area(struct km_area *s);
#endif /* DEBUG_H */

View File

@@ -29,13 +29,13 @@ void print_page_area(struct page_area *a, int areano)
return;
}
void print_areas(struct list_head *area_head)
void print_areas(struct link *area_head)
{
struct page_area *cur;
int areano = 1;
printf("Page areas:\n-------------\n");
list_for_each_entry(cur, area_head, list)
list_foreach_struct(cur, area_head, list)
print_page_area(cur, areano++);
}
@@ -47,12 +47,12 @@ void print_cache(struct mem_cache *c, int cacheno)
printf("Start: 0x%x\n", c->start);
}
void print_caches(struct list_head *cache_head)
void print_caches(struct link *cache_head)
{
int caches = 1;
struct mem_cache *cur;
list_for_each_entry(cur, cache_head, list)
list_foreach_struct(cur, cache_head, list)
print_cache(cur, caches++);
}

View File

@@ -6,8 +6,8 @@
void test_allocpage(int num_allocs, int alloc_max, FILE *init, FILE *exit);
void print_page_area(struct page_area *a, int no);
void print_caches(struct list_head *cache_head);
void print_caches(struct link *cache_head);
void print_cache(struct mem_cache *c, int cacheno);
void print_areas(struct list_head *area_head);
void print_areas(struct link *area_head);
void print_page_area(struct page_area *ar, int areano);
#endif

View File

@@ -17,7 +17,7 @@
#include "debug.h"
#include "tests.h"
extern struct list_head km_area_start;
extern struct link km_area_start;
void print_kmalloc_state(void)
{

View File

@@ -33,6 +33,6 @@ struct vfs_file_data {
struct vm_file *vfs_file_create(void);
extern struct list_head vm_file_list;
extern struct link vm_file_list;
#endif /* __MM0_FILE_H__ */

View File

@@ -3,7 +3,7 @@
struct global_list {
int total;
struct list_head list;
struct link list;
};
extern struct global_list global_vm_files;

View File

@@ -21,7 +21,7 @@ struct initdata {
struct page_bitmap page_map;
unsigned long pager_utcb_virt;
unsigned long pager_utcb_phys;
struct list_head boot_file_list;
struct link boot_file_list;
};
extern struct initdata initdata;

View File

@@ -25,7 +25,7 @@ struct shm_descriptor {
struct shm_descriptor {
int key; /* IPC key supplied by user task */
l4id_t shmid; /* SHM area id, allocated by mm0 */
struct list_head list; /* SHM list, used by mm0 */
struct link list; /* SHM list, used by mm0 */
struct vm_file *owner;
void *shm_addr; /* The virtual address for segment. */
unsigned long size; /* Size of the area in pages */

View File

@@ -51,18 +51,18 @@ struct task_fd_head {
};
struct task_vma_head {
struct list_head list;
struct link list;
int tcb_refs;
};
struct utcb_desc {
struct list_head list;
struct link list;
unsigned long utcb_base;
struct id_pool *slots;
};
struct utcb_head {
struct list_head list;
struct link list;
int tcb_refs;
};
@@ -70,11 +70,11 @@ struct utcb_head {
/* Stores all task information that can be kept in userspace. */
struct tcb {
/* Task list */
struct list_head list;
struct link list;
/* Fields for parent-child relations */
struct list_head child_ref; /* Child ref in parent's list */
struct list_head children; /* List of children */
struct link child_ref; /* Child ref in parent's list */
struct link children; /* List of children */
struct tcb *parent; /* Parent task */
/* Task creation flags */
@@ -131,7 +131,7 @@ struct tcb {
};
struct tcb_head {
struct list_head list;
struct link list;
int total; /* Total threads */
};

View File

@@ -57,7 +57,7 @@ enum VM_FILE_TYPE {
struct page {
int refcnt; /* Refcount */
struct spinlock lock; /* Page lock. */
struct list_head list; /* For list of a vm_object's in-memory pages */
struct link list; /* For list of a vm_object's in-memory pages */
struct vm_object *owner;/* The vm_object the page belongs to */
unsigned long virtual; /* If refs >1, first mapper's virtual address */
unsigned int flags; /* Flags associated with the page. */
@@ -115,20 +115,20 @@ struct vm_object {
int npages; /* Number of pages in memory */
int nlinks; /* Number of mapper links that refer */
int shadows; /* Number of shadows that refer */
struct list_head shref; /* Shadow reference from original object */
struct list_head shdw_list; /* List of vm objects that shadows this one */
struct list_head link_list; /* List of links that refer to this object */
struct link shref; /* Shadow reference from original object */
struct link shdw_list; /* List of vm objects that shadows this one */
struct link link_list; /* List of links that refer to this object */
struct vm_object *orig_obj; /* Original object that this one shadows */
unsigned int flags; /* Defines the type and flags of the object */
struct list_head list; /* List of all vm objects in memory */
struct link list; /* List of all vm objects in memory */
struct vm_pager *pager; /* The pager for this object */
struct list_head page_cache;/* List of in-memory pages */
struct link page_cache;/* List of in-memory pages */
};
/* In memory representation of either a vfs file, a device. */
struct vm_file {
int openers;
struct list_head list;
struct link list;
unsigned long length;
unsigned int type;
struct vm_object vm_obj;
@@ -138,22 +138,22 @@ struct vm_file {
/* To create per-vma vm_object lists */
struct vm_obj_link {
struct list_head list;
struct list_head linkref;
struct link list;
struct link linkref;
struct vm_object *obj;
};
static inline void vm_link_object(struct vm_obj_link *link, struct vm_object *obj)
{
link->obj = obj;
list_add(&link->linkref, &obj->link_list);
list_insert(&link->linkref, &obj->link_list);
obj->nlinks++;
}
static inline struct vm_object *vm_unlink_object(struct vm_obj_link *link)
{
/* Delete link from object's link list */
list_del(&link->linkref);
list_remove(&link->linkref);
/* Reduce object's mapper link count */
link->obj->nlinks--;
@@ -175,8 +175,8 @@ static inline struct vm_object *vm_unlink_object(struct vm_obj_link *link)
* object's copy of pages supersede the ones lower in the stack.
*/
struct vm_area {
struct list_head list; /* Per-task vma list */
struct list_head vm_obj_list; /* Head for vm_object list. */
struct link list; /* Per-task vma list */
struct link vm_obj_list; /* Head for vm_object list. */
unsigned long pfn_start; /* Region start virtual pfn */
unsigned long pfn_end; /* Region end virtual pfn, exclusive */
unsigned long flags; /* Protection flags. */
@@ -189,12 +189,12 @@ struct vm_area {
* rather than searching the address. E.g. munmap/msync
*/
static inline struct vm_area *find_vma(unsigned long addr,
struct list_head *vm_area_list)
struct link *vm_area_list)
{
struct vm_area *vma;
unsigned long pfn = __pfn(addr);
list_for_each_entry(vma, vm_area_list, list)
list_foreach_struct(vma, vm_area_list, list)
if ((pfn >= vma->pfn_start) && (pfn < vma->pfn_end))
return vma;
return 0;
@@ -213,12 +213,12 @@ extern struct vm_pager devzero_pager;
extern struct vm_pager swap_pager;
/* vm object and vm file lists */
extern struct list_head vm_object_list;
extern struct link vm_object_list;
/* vm object link related functions */
struct vm_obj_link *vm_objlink_create(void);
struct vm_obj_link *vma_next_link(struct list_head *link,
struct list_head *head);
struct vm_obj_link *vma_next_link(struct link *link,
struct link *head);
/* vm file and object initialisation */
struct vm_object *vm_object_create(void);
@@ -229,8 +229,8 @@ void vm_file_put(struct vm_file *f);
/* Printing objects, files */
void vm_object_print(struct vm_object *vmo);
void vm_print_objects(struct list_head *vmo_list);
void vm_print_files(struct list_head *file_list);
void vm_print_objects(struct link *vmo_list);
void vm_print_files(struct link *file_list);
/* Used for pre-faulting a page from mm0 */
int prefault_page(struct tcb *task, unsigned long address,
@@ -248,7 +248,7 @@ int validate_task_range(struct tcb *t, unsigned long start,
/* Changes all shadows and their ptes to read-only */
int vm_freeze_shadows(struct tcb *task);
int task_insert_vma(struct vm_area *vma, struct list_head *vma_list);
int task_insert_vma(struct vm_area *vma, struct link *vma_list);
/* Main page fault entry point */
int page_fault_handler(struct tcb *faulty_task, fault_kdata_t *fkdata);

View File

@@ -25,7 +25,7 @@
#include <boot.h>
/* Receives all registers and replies back */
/* Receives all registers and origies back */
int ipc_test_full_sync(l4id_t senderid)
{
for (int i = MR_UNUSED_START; i < MR_TOTAL + MR_REST; i++) {
@@ -35,7 +35,7 @@ int ipc_test_full_sync(l4id_t senderid)
write_mr(i, 0);
}
/* Send a full reply */
/* Send a full origy */
l4_send_full(senderid, 0);
return 0;
}
@@ -158,7 +158,7 @@ void handle_requests(void)
ret = sys_execve(sender, (char *)mr[0],
(char **)mr[1], (char **)mr[2]);
if (ret < 0)
break; /* We reply for errors */
break; /* We origy for errors */
else
return; /* else we're done */
}

View File

@@ -10,7 +10,7 @@
* of how mmaped devices would be mapped with a pager.
*/
struct mmap_device {
struct list_head page_list; /* Dyn-allocated page list */
struct link page_list; /* Dyn-allocated page list */
unsigned long pfn_start; /* Physical pfn start */
unsigned long pfn_end; /* Physical pfn end */
};
@@ -27,17 +27,17 @@ struct page *memdev_page_in(struct vm_object *vm_obj,
return PTR_ERR(-1);
/* Simply return the page if found */
list_for_each_entry(page, &memdev->page_list, list)
list_foreach_struct(page, &memdev->page_list, list)
if (page->offset == pfn_offset)
return page;
/* Otherwise allocate one of our own for that offset and return it */
page = kzalloc(sizeof(struct page));
INIT_LIST_HEAD(&page->list);
link_init(&page->list);
spin_lock_init(&page->lock);
page->offset = pfn_offset;
page->owner = vm_obj;
list_add(&page->list, &memdev->page_list);
list_insert(&page->list, &memdev->page_list);
return page;
}

View File

@@ -75,7 +75,7 @@ int do_execve(struct tcb *sender, char *filename, struct args_struct *args,
BUG_ON(!(tgleader = find_task(sender->tgid)));
/* Destroy all children threads. */
list_for_each_entry(thread, &tgleader->children, child_ref)
list_foreach_struct(thread, &tgleader->children, child_ref)
do_exit(thread, 0);
} else {
/* Otherwise group leader is same as sender */

View File

@@ -96,7 +96,7 @@ int execve_recycle_task(struct tcb *new, struct tcb *orig)
/* Copy parent relationship */
BUG_ON(new->parent);
new->parent = orig->parent;
list_add(&new->child_ref, &orig->parent->children);
list_insert(&new->child_ref, &orig->parent->children);
/* Flush all IO on task's files and close fds */
task_close_files(orig);

View File

@@ -56,14 +56,14 @@ unsigned long fault_to_file_offset(struct fault_data *fault)
* Given a reference to link = vma, head = vma, returns link1.
* Given a reference to link = link3, head = vma, returns 0.
*/
struct vm_obj_link *vma_next_link(struct list_head *link,
struct list_head *head)
struct vm_obj_link *vma_next_link(struct link *link,
struct link *head)
{
BUG_ON(list_empty(link));
if (link->next == head)
return 0;
else
return list_entry(link->next, struct vm_obj_link, list);
return link_to_struct(link->next, struct vm_obj_link, list);
}
/* Unlinks orig_link from its vma and deletes it but keeps the object. */
@@ -72,7 +72,7 @@ struct vm_object *vma_drop_link(struct vm_obj_link *link)
struct vm_object *dropped;
/* Remove object link from vma's list */
list_del(&link->list);
list_remove(&link->list);
/* Unlink the link from object */
dropped = vm_unlink_object(link);
@@ -104,7 +104,7 @@ int vm_object_is_subset(struct vm_object *shadow,
* Do a page by page comparison. Every lesser page
* must be in copier for overlap.
*/
list_for_each_entry(pl, &original->page_cache, list)
list_foreach_struct(pl, &original->page_cache, list)
if (!(pc = find_page(shadow, pl->offset)))
return 0;
/*
@@ -160,14 +160,14 @@ int vma_merge_object(struct vm_object *redundant)
BUG_ON(redundant->shadows != 1);
/* Get the last shadower object in front */
front = list_entry(redundant->shdw_list.next,
front = link_to_struct(redundant->shdw_list.next,
struct vm_object, shref);
/* Move all non-intersecting pages to front shadow. */
list_for_each_entry_safe(p1, n, &redundant->page_cache, list) {
list_foreach_removable_struct(p1, n, &redundant->page_cache, list) {
/* Page doesn't exist in front, move it there */
if (!(p2 = find_page(front, p1->offset))) {
list_del_init(&p1->list);
list_remove_init(&p1->list);
spin_lock(&p1->lock);
p1->owner = front;
spin_unlock(&p1->lock);
@@ -179,20 +179,20 @@ int vma_merge_object(struct vm_object *redundant)
/* Sort out shadow relationships after the merge: */
/* Front won't be a shadow of the redundant shadow anymore */
list_del_init(&front->shref);
list_remove_init(&front->shref);
/* Check that there really was one shadower of redundant left */
BUG_ON(!list_empty(&redundant->shdw_list));
/* Redundant won't be a shadow of its next object */
list_del_init(&redundant->shref);
list_remove_init(&redundant->shref);
/* Front is now a shadow of redundant's next object */
list_add(&front->shref, &redundant->orig_obj->shdw_list);
list_insert(&front->shref, &redundant->orig_obj->shdw_list);
front->orig_obj = redundant->orig_obj;
/* Find last link for the object */
last_link = list_entry(redundant->link_list.next,
last_link = link_to_struct(redundant->link_list.next,
struct vm_obj_link, linkref);
/* Drop the last link to the object */
@@ -213,8 +213,8 @@ struct vm_obj_link *vm_objlink_create(void)
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&vmo_link->list);
INIT_LIST_HEAD(&vmo_link->linkref);
link_init(&vmo_link->list);
link_init(&vmo_link->linkref);
return vmo_link;
}
@@ -274,7 +274,7 @@ int vma_copy_links(struct vm_area *new_vma, struct vm_area *vma)
/* Get the first object on the vma */
BUG_ON(list_empty(&vma->vm_obj_list));
vmo_link = list_entry(vma->vm_obj_list.next,
vmo_link = link_to_struct(vma->vm_obj_list.next,
struct vm_obj_link, list);
do {
/* Create a new link */
@@ -284,7 +284,7 @@ int vma_copy_links(struct vm_area *new_vma, struct vm_area *vma)
vm_link_object(new_link, vmo_link->obj);
/* Add the new link to vma in object order */
list_add_tail(&new_link->list, &new_vma->vm_obj_list);
list_insert_tail(&new_link->list, &new_vma->vm_obj_list);
/* Continue traversing links, doing the same copying */
} while((vmo_link = vma_next_link(&vmo_link->list,
@@ -361,10 +361,10 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* Get previous and next links, if they exist */
prev = (link->list.prev == &vma->vm_obj_list) ? 0 :
list_entry(link->list.prev, struct vm_obj_link, list);
link_to_struct(link->list.prev, struct vm_obj_link, list);
next = (link->list.next == &vma->vm_obj_list) ? 0 :
list_entry(link->list.next, struct vm_obj_link, list);
link_to_struct(link->list.next, struct vm_obj_link, list);
/* Drop the link */
obj = vma_drop_link(link);
@@ -378,7 +378,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* Remove prev from current object's shadow list */
BUG_ON(list_empty(&prev->obj->shref));
list_del_init(&prev->obj->shref);
list_remove_init(&prev->obj->shref);
/*
* We don't allow dropping non-shadow objects yet,
@@ -387,7 +387,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
BUG_ON(!next);
/* prev is now shadow of next */
list_add(&prev->obj->shref,
list_insert(&prev->obj->shref,
&next->obj->shdw_list);
prev->obj->orig_obj = next->obj;
@@ -397,7 +397,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
*/
if (obj->nlinks == 0) {
BUG_ON(obj->orig_obj != next->obj);
list_del_init(&obj->shref);
list_remove_init(&obj->shref);
} else {
/*
* Dropped object still has referrers, which
@@ -421,7 +421,7 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
BUG_ON(obj->orig_obj != next->obj);
BUG_ON(--next->obj->shadows < 0);
// vm_object_print(next->obj);
list_del_init(&obj->shref);
list_remove_init(&obj->shref);
}
}
}
@@ -475,7 +475,7 @@ int vma_drop_merge_delete_all(struct vm_area *vma)
BUG_ON(list_empty(&vma->vm_obj_list));
/* Traverse and get rid of all links */
list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list)
list_foreach_removable_struct(vmo_link, n, &vma->vm_obj_list, list)
vma_drop_merge_delete(vma, vmo_link);
return 0;
@@ -541,10 +541,10 @@ struct page *copy_on_write(struct fault_data *fault)
* v v
* shadow original
*/
list_add(&shadow_link->list, &vma->vm_obj_list);
list_insert(&shadow_link->list, &vma->vm_obj_list);
/* Add object to original's shadower list */
list_add(&shadow->shref, &shadow->orig_obj->shdw_list);
list_insert(&shadow->shref, &shadow->orig_obj->shdw_list);
/* Add to global object list */
global_add_vm_object(shadow);
@@ -758,7 +758,7 @@ int vm_freeze_shadows(struct tcb *task)
struct vm_object *vmo;
struct page *p;
list_for_each_entry(vma, &task->vm_area_head->list, list) {
list_foreach_struct(vma, &task->vm_area_head->list, list) {
/* Shared vmas don't have shadows */
if (vma->flags & VMA_SHARED)
@@ -766,7 +766,7 @@ int vm_freeze_shadows(struct tcb *task)
/* Get the first object */
BUG_ON(list_empty(&vma->vm_obj_list));
vmo_link = list_entry(vma->vm_obj_list.next,
vmo_link = link_to_struct(vma->vm_obj_list.next,
struct vm_obj_link, list);
vmo = vmo_link->obj;
@@ -789,7 +789,7 @@ int vm_freeze_shadows(struct tcb *task)
* Make all pages on it read-only
* in the page tables.
*/
list_for_each_entry(p, &vmo->page_cache, list) {
list_foreach_struct(p, &vmo->page_cache, list) {
/* Find virtual address of each page */
virtual = vma_page_to_virtual(vma, p);

View File

@@ -190,7 +190,7 @@ struct vm_file *do_open2(struct tcb *task, int fd, unsigned long vnum, unsigned
}
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &global_vm_files.list, list) {
list_foreach_struct(vmfile, &global_vm_files.list, list) {
/* Check whether it is a vfs file and if so vnums match. */
if ((vmfile->type & VM_FILE_VFS) &&
@@ -240,7 +240,7 @@ int do_open(struct tcb *task, int fd, unsigned long vnum, unsigned long length)
task->files->fd[fd].cursor = 0;
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &global_vm_files.list, list) {
list_foreach_struct(vmfile, &global_vm_files.list, list) {
/* Check whether it is a vfs file and if so vnums match. */
if ((vmfile->type & VM_FILE_VFS) &&
@@ -301,22 +301,22 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* Add if list is empty */
if (list_empty(&vmo->page_cache)) {
list_add_tail(&this->list, &vmo->page_cache);
list_insert_tail(&this->list, &vmo->page_cache);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, &vmo->page_cache, list) {
after = list_entry(before->list.next, struct page, list);
list_foreach_struct(before, &vmo->page_cache, list) {
after = link_to_struct(before->list.next, struct page, list);
/* If there's only one in list */
if (before->list.next == &vmo->page_cache) {
/* Add as next if greater */
if (this->offset > before->offset)
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->offset < before->offset)
list_add_tail(&this->list, &before->list);
list_insert_tail(&this->list, &before->list);
else
BUG();
return 0;
@@ -325,7 +325,7 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
/* If this page is in-between two other, insert it there */
if (before->offset < this->offset &&
after->offset > this->offset) {
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
return 0;
}
BUG_ON(this->offset == before->offset);
@@ -603,7 +603,7 @@ int write_cache_pages_orig(struct vm_file *vmfile, struct tcb *task, void *buf,
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list)
if (head->offset == pfn_start)
goto copy;
@@ -627,7 +627,7 @@ copy:
last_pgoff = head->offset;
/* Map the rest, copy and unmap. */
list_for_each_entry(this, &head->list, list) {
list_foreach_struct(this, &head->list, list) {
if (left == 0 || this->offset == pfn_end)
break;
@@ -666,7 +666,7 @@ int write_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
int copysize, left;
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list) {
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list) {
/* First page */
if (head->offset == pfn_start) {
left = count;
@@ -726,7 +726,7 @@ int read_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
unsigned long copy_offset; /* Current copy offset on the buffer */
/* Find the head of consecutive pages */
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
list_foreach_struct(head, &vmfile->vm_obj.page_cache, list)
if (head->offset == pfn_start)
goto copy;
@@ -745,7 +745,7 @@ copy:
last_pgoff = head->offset;
/* Map the rest, copy and unmap. */
list_for_each_entry(this, &head->list, list) {
list_foreach_struct(this, &head->list, list) {
if (left == 0 || this->offset == pfn_end)
break;

View File

@@ -21,7 +21,7 @@
#include <utcb.h>
/* A separate list than the generic file list that keeps just the boot files */
LIST_HEAD(boot_file_list);
LINK_DECLARE(boot_file_list);
/*
* A specialised function for setting up the task environment of mm0.
@@ -53,7 +53,7 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
return err;
/* Set pager as child and parent of itself */
list_add(&task->child_ref, &task->children);
list_insert(&task->child_ref, &task->children);
task->parent = task;
/*
@@ -78,9 +78,9 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
struct vm_file *initdata_next_bootfile(struct initdata *initdata)
{
struct vm_file *file, *n;
list_for_each_entry_safe(file, n, &initdata->boot_file_list,
list_foreach_removable_struct(file, n, &initdata->boot_file_list,
list) {
list_del_init(&file->list);
list_remove_init(&file->list);
return file;
}
return 0;
@@ -96,10 +96,10 @@ int start_boot_tasks(struct initdata *initdata)
struct tcb *fs0_task;
struct svc_image *img;
struct task_ids ids;
struct list_head other_files;
struct link other_files;
int total = 0;
INIT_LIST_HEAD(&other_files);
link_init(&other_files);
/* Separate out special server tasks and regular files */
do {
@@ -113,7 +113,7 @@ int start_boot_tasks(struct initdata *initdata)
else if (!strcmp(img->name, __VFSNAME__))
fs0_file = file;
else
list_add(&file->list, &other_files);
list_insert(&file->list, &other_files);
} else
break;
} while (1);
@@ -138,12 +138,12 @@ int start_boot_tasks(struct initdata *initdata)
total++;
/* Initialise other tasks */
list_for_each_entry_safe(file, n, &other_files, list) {
list_foreach_removable_struct(file, n, &other_files, list) {
// printf("%s: Initialising new boot task.\n", __TASKNAME__);
ids.tid = TASK_ID_INVALID;
ids.spid = TASK_ID_INVALID;
ids.tgid = TASK_ID_INVALID;
list_del_init(&file->list);
list_remove_init(&file->list);
BUG_ON(IS_ERR(boottask_exec(file, USER_AREA_START, USER_AREA_END, &ids)));
total++;
}

View File

@@ -79,7 +79,7 @@ void init_physmem(struct initdata *initdata, struct membank *membank)
/* Initialise the page array */
for (int i = 0; i < npages; i++) {
INIT_LIST_HEAD(&membank[0].page_array[i].list);
link_init(&membank[0].page_array[i].list);
/* Set use counts for pages the kernel has already used up */
if (!(pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))

View File

@@ -28,8 +28,8 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
if (!(vma = kzalloc(sizeof(struct vm_area))))
return 0;
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->vm_obj_list);
link_init(&vma->list);
link_init(&vma->vm_obj_list);
vma->pfn_start = pfn_start;
vma->pfn_end = pfn_start + npages;
@@ -45,19 +45,19 @@ struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
* The new vma is assumed to have been correctly set up not to intersect
* with any other existing vma.
*/
int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
int task_insert_vma(struct vm_area *this, struct link *vma_list)
{
struct vm_area *before, *after;
/* Add if list is empty */
if (list_empty(vma_list)) {
list_add_tail(&this->list, vma_list);
list_insert_tail(&this->list, vma_list);
return 0;
}
/* Else find the right interval */
list_for_each_entry(before, vma_list, list) {
after = list_entry(before->list.next, struct vm_area, list);
list_foreach_struct(before, vma_list, list) {
after = link_to_struct(before->list.next, struct vm_area, list);
/* If there's only one in list */
if (before->list.next == vma_list) {
@@ -69,10 +69,10 @@ int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
/* Add as next if greater */
if (this->pfn_start > before->pfn_start)
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
/* Add as previous if smaller */
else if (this->pfn_start < before->pfn_start)
list_add_tail(&this->list, &before->list);
list_insert_tail(&this->list, &before->list);
else
BUG();
@@ -90,7 +90,7 @@ int task_insert_vma(struct vm_area *this, struct list_head *vma_list)
BUG_ON(set_intersection(this->pfn_start, this->pfn_end,
after->pfn_start,
after->pfn_end));
list_add(&this->list, &before->list);
list_insert(&this->list, &before->list);
return 0;
}
@@ -122,7 +122,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
return task->start;
/* First vma to check our range against */
vma = list_entry(task->vm_area_head->list.next, struct vm_area, list);
vma = link_to_struct(task->vm_area_head->list.next, struct vm_area, list);
/* Start searching from task's end of data to start of stack */
while (pfn_end <= __pfn(task->end)) {
@@ -147,7 +147,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
}
/* Otherwise get next vma entry */
vma = list_entry(vma->list.next,
vma = link_to_struct(vma->list.next,
struct vm_area, list);
continue;
}
@@ -282,7 +282,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
vm_link_object(vmo_link, &mapfile->vm_obj);
/* Add link to vma list */
list_add_tail(&vmo_link->list, &new->vm_obj_list);
list_insert_tail(&vmo_link->list, &new->vm_obj_list);
/*
* If the file is a shm file, also map devzero behind it. i.e.
@@ -304,7 +304,7 @@ void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
return PTR_ERR(-ENOMEM);
}
vm_link_object(vmo_link2, &dzero->vm_obj);
list_add_tail(&vmo_link2->list, &new->vm_obj_list);
list_insert_tail(&vmo_link2->list, &new->vm_obj_list);
}
/* Finished initialising the vma, add it to task */

View File

@@ -43,7 +43,7 @@ int vma_split(struct vm_area *vma, struct tcb *task,
vma_copy_links(new, vma);
/* Add new one next to original vma */
list_add_tail(&new->list, &vma->list);
list_insert_tail(&new->list, &vma->list);
/* Unmap the removed portion */
BUG_ON(l4_unmap((void *)__pfn_to_addr(unmap_start),
@@ -102,7 +102,7 @@ int vma_destroy_single(struct tcb *task, struct vm_area *vma)
vma->pfn_end - vma->pfn_start, task->tid);
/* Unlink and delete vma */
list_del(&vma->list);
list_remove(&vma->list);
kfree(vma);
return 0;
@@ -149,7 +149,7 @@ int vma_flush_pages(struct vm_area *vma)
* could only be a single VM_SHARED file-backed object in the chain.
*/
BUG_ON(list_empty(&vma->list));
vmo_link = list_entry(vma->vm_obj_list.next, struct vm_obj_link, list);
vmo_link = link_to_struct(vma->vm_obj_list.next, struct vm_obj_link, list);
vmo = vmo_link->obj;
/* Only dirty objects would need flushing */
@@ -187,7 +187,7 @@ int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages)
struct vm_area *vma, *n;
int err;
list_for_each_entry_safe(vma, n, &task->vm_area_head->list, list) {
list_foreach_removable_struct(vma, n, &task->vm_area_head->list, list) {
/* Check for intersection */
if (set_intersection(munmap_start, munmap_end,
vma->pfn_start, vma->pfn_end)) {

View File

@@ -21,7 +21,7 @@ struct page *page_init(struct page *page)
memset(page, 0, sizeof(*page));
page->refcnt = -1;
spin_lock_init(&page->lock);
INIT_LIST_HEAD(&page->list);
link_init(&page->list);
return page;
@@ -30,7 +30,7 @@ struct page *find_page(struct vm_object *obj, unsigned long pfn)
{
struct page *p;
list_for_each_entry(p, &obj->page_cache, list)
list_foreach_struct(p, &obj->page_cache, list)
if (p->offset == pfn)
return p;
@@ -46,8 +46,8 @@ int default_release_pages(struct vm_object *vm_obj)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
list_del_init(&p->list);
list_foreach_removable_struct(p, n, &vm_obj->page_cache, list) {
list_remove_init(&p->list);
BUG_ON(p->refcnt);
/* Reinitialise the page */
@@ -219,8 +219,8 @@ int bootfile_release_pages(struct vm_object *vm_obj)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
list_del(&p->list);
list_foreach_removable_struct(p, n, &vm_obj->page_cache, list) {
list_remove(&p->list);
BUG_ON(p->refcnt);
/* Reinitialise the page */
@@ -295,7 +295,7 @@ int init_boot_files(struct initdata *initdata)
struct vm_file *boot_file;
struct svc_image *img;
INIT_LIST_HEAD(&initdata->boot_file_list);
link_init(&initdata->boot_file_list);
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
@@ -311,7 +311,7 @@ int init_boot_files(struct initdata *initdata)
boot_file->vm_obj.pager = &bootfile_pager;
/* Add the file to initdata's bootfile list */
list_add_tail(&boot_file->list, &initdata->boot_file_list);
list_insert_tail(&boot_file->list, &initdata->boot_file_list);
}
return 0;
@@ -345,7 +345,7 @@ struct vm_file *get_devzero(void)
{
struct vm_file *f;
list_for_each_entry(f, &global_vm_files.list, list)
list_foreach_struct(f, &global_vm_files.list, list)
if (f->type == VM_FILE_DEVZERO)
return f;
return 0;

View File

@@ -130,7 +130,7 @@ void *sys_shmat(struct tcb *task, l4id_t shmid, void *shmaddr, int shmflg)
{
struct vm_file *shm_file, *n;
list_for_each_entry_safe(shm_file, n, &global_vm_files.list, list) {
list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list) {
if (shm_file->type == VM_FILE_SHM &&
shm_file_to_desc(shm_file)->shmid == shmid)
return do_shmat(shm_file, shmaddr,
@@ -156,7 +156,7 @@ int sys_shmdt(struct tcb *task, const void *shmaddr)
{
struct vm_file *shm_file, *n;
list_for_each_entry_safe(shm_file, n, &global_vm_files.list, list)
list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list)
if (shm_file->type == VM_FILE_SHM &&
shm_file_to_desc(shm_file)->shm_addr == shmaddr)
return do_shmdt(task, shm_file);
@@ -235,7 +235,7 @@ void *shmat_shmget_internal(struct tcb *task, key_t key, void *shmaddr)
struct vm_file *shm_file;
struct shm_descriptor *shm_desc;
list_for_each_entry(shm_file, &global_vm_files.list, list) {
list_foreach_struct(shm_file, &global_vm_files.list, list) {
if(shm_file->type == VM_FILE_SHM) {
shm_desc = shm_file_to_desc(shm_file);
/* Found the key, shmat that area */
@@ -274,7 +274,7 @@ int sys_shmget(key_t key, int size, int shmflg)
return shm_file_to_desc(shm)->shmid;
}
list_for_each_entry(shm, &global_vm_files.list, list) {
list_foreach_struct(shm, &global_vm_files.list, list) {
if (shm->type != VM_FILE_SHM)
continue;

View File

@@ -45,7 +45,7 @@ void print_tasks(void)
{
struct tcb *task;
printf("Tasks:\n========\n");
list_for_each_entry(task, &global_tasks.list, list) {
list_foreach_struct(task, &global_tasks.list, list) {
printf("Task tid: %d, spid: %d\n", task->tid, task->spid);
}
}
@@ -53,14 +53,14 @@ void print_tasks(void)
void global_add_task(struct tcb *task)
{
BUG_ON(!list_empty(&task->list));
list_add_tail(&task->list, &global_tasks.list);
list_insert_tail(&task->list, &global_tasks.list);
global_tasks.total++;
}
void global_remove_task(struct tcb *task)
{
BUG_ON(list_empty(&task->list));
list_del_init(&task->list);
list_remove_init(&task->list);
BUG_ON(--global_tasks.total < 0);
}
@@ -68,7 +68,7 @@ struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &global_tasks.list, list)
list_foreach_struct(t, &global_tasks.list, list)
if (t->tid == tid)
return t;
return 0;
@@ -89,7 +89,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
return PTR_ERR(-ENOMEM);
}
task->vm_area_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->vm_area_head->list);
link_init(&task->vm_area_head->list);
/* Also allocate a utcb head for new address space */
if (!(task->utcb_head =
@@ -99,7 +99,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
return PTR_ERR(-ENOMEM);
}
task->utcb_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->utcb_head->list);
link_init(&task->utcb_head->list);
}
/* Allocate file structures if not shared */
@@ -120,9 +120,9 @@ struct tcb *tcb_alloc_init(unsigned int flags)
task->tgid = TASK_ID_INVALID;
/* Initialise list structure */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->child_ref);
INIT_LIST_HEAD(&task->children);
link_init(&task->list);
link_init(&task->child_ref);
link_init(&task->children);
return task;
}
@@ -180,15 +180,15 @@ int tcb_destroy(struct tcb *task)
* All children of the current task becomes children
* of the parent of this task.
*/
list_for_each_entry_safe(child, n, &task->children,
list_foreach_removable_struct(child, n, &task->children,
child_ref) {
list_del_init(&child->child_ref);
list_add_tail(&child->child_ref,
list_remove_init(&child->child_ref);
list_insert_tail(&child->child_ref,
&task->parent->children);
child->parent = task->parent;
}
/* The task is not a child of its parent */
list_del_init(&task->child_ref);
list_remove_init(&task->child_ref);
/* Now task deletion make sure task is in no list */
BUG_ON(!list_empty(&task->list));
@@ -209,7 +209,7 @@ int task_copy_vmas(struct tcb *to, struct tcb *from)
{
struct vm_area *vma, *new_vma;
list_for_each_entry(vma, &from->vm_area_head->list, list) {
list_foreach_struct(vma, &from->vm_area_head->list, list) {
/* Create a new vma */
new_vma = vma_new(vma->pfn_start, vma->pfn_end - vma->pfn_start,
@@ -233,12 +233,12 @@ int task_release_vmas(struct task_vma_head *vma_head)
{
struct vm_area *vma, *n;
list_for_each_entry_safe(vma, n, &vma_head->list, list) {
list_foreach_removable_struct(vma, n, &vma_head->list, list) {
/* Release all links */
vma_drop_merge_delete_all(vma);
/* Delete the vma from task's vma list */
list_del(&vma->list);
list_remove(&vma->list);
/* Free the vma */
kfree(vma);
@@ -358,11 +358,11 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
* On these conditions child shares
* the parent of the caller
*/
list_add_tail(&task->child_ref,
list_insert_tail(&task->child_ref,
&parent->parent->children);
task->parent = parent->parent;
} else {
list_add_tail(&task->child_ref,
list_insert_tail(&task->child_ref,
&parent->children);
task->parent = parent;
}
@@ -370,7 +370,7 @@ struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
struct tcb *pager = find_task(PAGER_TID);
/* All parentless tasks are children of the pager */
list_add_tail(&task->child_ref, &pager->children);
list_insert_tail(&task->child_ref, &pager->children);
task->parent = pager;
}
@@ -674,7 +674,7 @@ int vfs_send_task_data(struct tcb *vfs)
tdata_head->total = global_tasks.total;
/* Write per-task data for all tasks */
list_for_each_entry(t, &global_tasks.list, list) {
list_foreach_struct(t, &global_tasks.list, list) {
tdata_head->tdata[li].tid = t->tid;
tdata_head->tdata[li].shpage_address = (unsigned long)t->shared_page;
li++;
@@ -697,7 +697,7 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f)
{
struct vm_area *vma;
list_for_each_entry(vma, &task->vm_area_head->list, list) {
list_foreach_struct(vma, &task->vm_area_head->list, list) {
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
VM_READ | VM_WRITE) < 0);

View File

@@ -30,7 +30,7 @@ int vm_object_test_link_count(struct vm_object *vmo)
int links = 0;
struct vm_obj_link *l;
list_for_each_entry(l, &vmo->link_list, linkref)
list_foreach_struct(l, &vmo->link_list, linkref)
links++;
BUG_ON(links != vmo->nlinks);
@@ -42,7 +42,7 @@ int vm_object_test_shadow_count(struct vm_object *vmo)
struct vm_object *sh;
int shadows = 0;
list_for_each_entry(sh, &vmo->shdw_list, shref)
list_foreach_struct(sh, &vmo->shdw_list, shref)
shadows++;
BUG_ON(shadows != vmo->shadows);
@@ -64,7 +64,7 @@ int mm0_test_global_vm_integrity(void)
memset(&vmstat, 0, sizeof(vmstat));
/* Count all shadow and file objects */
list_for_each_entry(vmo, &global_vm_objects.list, list) {
list_foreach_struct(vmo, &global_vm_objects.list, list) {
vmstat.shadows_referred += vmo->shadows;
if (vmo->flags & VM_OBJ_SHADOW)
vmstat.shadow_objects++;
@@ -76,7 +76,7 @@ int mm0_test_global_vm_integrity(void)
}
/* Count all registered vmfiles */
list_for_each_entry(f, &global_vm_files.list, list) {
list_foreach_struct(f, &global_vm_files.list, list) {
vmstat.vm_files++;
if (f->type == VM_FILE_SHM)
vmstat.shm_files++;
@@ -116,7 +116,7 @@ int mm0_test_global_vm_integrity(void)
BUG_ON(vmstat.shadow_objects != vmstat.shadows_referred);
/* Count all tasks */
list_for_each_entry(task, &global_tasks.list, list)
list_foreach_struct(task, &global_tasks.list, list)
vmstat.tasks++;
if (vmstat.tasks != global_tasks.total) {

View File

@@ -67,7 +67,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
if (!(d = kzalloc(sizeof(*d))))
return 0;
INIT_LIST_HEAD(&d->list);
link_init(&d->list);
/* We currently assume UTCB is smaller than PAGE_SIZE */
BUG_ON(UTCB_SIZE > PAGE_SIZE);
@@ -80,7 +80,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
d->utcb_base = (unsigned long)utcb_new_address(1);
/* Add descriptor to tcb's chain */
list_add(&d->list, &task->utcb_head->list);
list_insert(&d->list, &task->utcb_head->list);
/* Obtain and return first slot */
return utcb_new_slot(d);
@@ -89,7 +89,7 @@ unsigned long task_new_utcb_desc(struct tcb *task)
int task_delete_utcb_desc(struct tcb *task, struct utcb_desc *d)
{
/* Unlink desc from its list */
list_del_init(&d->list);
list_remove_init(&d->list);
/* Unmap the descriptor region */
do_munmap(task, d->utcb_base, 1);
@@ -104,7 +104,7 @@ int task_delete_utcb_desc(struct tcb *task, struct utcb_desc *d)
}
/*
* Upon fork, the utcb descriptor list is replaced by a new one, since it is a new
* Upon fork, the utcb descriptor list is origaced by a new one, since it is a new
* address space. A new utcb is allocated and mmap'ed for the child task
* running in the newly created address space.
*
@@ -126,7 +126,7 @@ int task_setup_utcb(struct tcb *task)
BUG_ON(task->utcb_address);
/* Search for an empty utcb slot already allocated to this space */
list_for_each_entry(udesc, &task->utcb_head->list, list)
list_foreach_struct(udesc, &task->utcb_head->list, list)
if ((slot = utcb_new_slot(udesc)))
goto out;
@@ -163,7 +163,7 @@ int task_destroy_utcb(struct tcb *task)
// printf("UTCB: Destroying 0x%x\n", task->utcb_address);
/* Find the utcb descriptor slot first */
list_for_each_entry(udesc, &task->utcb_head->list, list) {
list_foreach_struct(udesc, &task->utcb_head->list, list) {
/* FIXME: Use variable alignment than a page */
/* Detect matching slot */
if (page_align(task->utcb_address) == udesc->utcb_base) {

View File

@@ -26,21 +26,21 @@ struct global_list global_vm_objects = {
void global_add_vm_object(struct vm_object *obj)
{
BUG_ON(!list_empty(&obj->list));
list_add(&obj->list, &global_vm_objects.list);
list_insert(&obj->list, &global_vm_objects.list);
global_vm_objects.total++;
}
void global_remove_vm_object(struct vm_object *obj)
{
BUG_ON(list_empty(&obj->list));
list_del_init(&obj->list);
list_remove_init(&obj->list);
BUG_ON(--global_vm_objects.total < 0);
}
void global_add_vm_file(struct vm_file *f)
{
BUG_ON(!list_empty(&f->list));
list_add(&f->list, &global_vm_files.list);
list_insert(&f->list, &global_vm_files.list);
global_vm_files.total++;
global_add_vm_object(&f->vm_obj);
@@ -49,7 +49,7 @@ void global_add_vm_file(struct vm_file *f)
void global_remove_vm_file(struct vm_file *f)
{
BUG_ON(list_empty(&f->list));
list_del_init(&f->list);
list_remove_init(&f->list);
BUG_ON(--global_vm_files.total < 0);
global_remove_vm_object(&f->vm_obj);
@@ -62,7 +62,7 @@ void print_cache_pages(struct vm_object *vmo)
if (!list_empty(&vmo->page_cache))
printf("Pages:\n======\n");
list_for_each_entry(p, &vmo->page_cache, list) {
list_foreach_struct(p, &vmo->page_cache, list) {
dprintf("Page offset: 0x%x, virtual: 0x%x, refcnt: %d\n", p->offset,
p->virtual, p->refcnt);
}
@@ -97,29 +97,29 @@ void vm_object_print(struct vm_object *vmo)
// printf("\n");
}
void vm_print_files(struct list_head *files)
void vm_print_files(struct link *files)
{
struct vm_file *f;
list_for_each_entry(f, files, list)
list_foreach_struct(f, files, list)
vm_object_print(&f->vm_obj);
}
void vm_print_objects(struct list_head *objects)
void vm_print_objects(struct link *objects)
{
struct vm_object *vmo;
list_for_each_entry(vmo, objects, list)
list_foreach_struct(vmo, objects, list)
vm_object_print(vmo);
}
struct vm_object *vm_object_init(struct vm_object *obj)
{
INIT_LIST_HEAD(&obj->list);
INIT_LIST_HEAD(&obj->shref);
INIT_LIST_HEAD(&obj->shdw_list);
INIT_LIST_HEAD(&obj->page_cache);
INIT_LIST_HEAD(&obj->link_list);
link_init(&obj->list);
link_init(&obj->shref);
link_init(&obj->shdw_list);
link_init(&obj->page_cache);
link_init(&obj->link_list);
return obj;
}
@@ -142,7 +142,7 @@ struct vm_file *vm_file_create(void)
if (!(f = kzalloc(sizeof(*f))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&f->list);
link_init(&f->list);
vm_object_init(&f->vm_obj);
f->vm_obj.flags = VM_OBJ_FILE;

View File

@@ -1,6 +0,0 @@
for filename in `find ./ -name '*.[ch]'`
do
sed -i -e "s/ORIGINAL/REPLACEMENT/g" $filename
done