Removed linux linked list dependency.

This commit is contained in:
Bahadir Balban
2009-06-02 13:19:17 +03:00
parent 4757f46f71
commit 276b4643c6
69 changed files with 455 additions and 885 deletions

View File

@@ -12,7 +12,7 @@
#define KMALLOC_POOLS_MAX 5
struct kmalloc_pool_head {
struct list_head cache_list;
struct link cache_list;
int occupied;
int total_caches;
int cache_size;
@@ -28,7 +28,7 @@ struct kmalloc_mempool km_pool;
void init_kmalloc()
{
for (int i = 0; i < KMALLOC_POOLS_MAX; i++) {
INIT_LIST_HEAD(&km_pool.pool_head[i].cache_list);
link_init(&km_pool.pool_head[i].cache_list);
km_pool.pool_head[i].occupied = 0;
km_pool.pool_head[i].total_caches = 0;
km_pool.pool_head[i].cache_size = 0;
@@ -58,7 +58,7 @@ void *__kmalloc(int size)
* Found the pool, now see if any
* cache has available slots
*/
list_for_each_entry(cache, &km_pool.pool_head[i].cache_list,
list_foreach_struct(cache, &km_pool.pool_head[i].cache_list,
list) {
if (cache->free)
return mem_cache_alloc(cache);
@@ -89,7 +89,7 @@ void *__kmalloc(int size)
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
size, 0)));
// printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
list_add(&cache->list, &km_pool.pool_head[index].cache_list);
list_insert(&cache->list, &km_pool.pool_head[index].cache_list);
km_pool.pool_head[index].occupied = 1;
km_pool.pool_head[index].total_caches++;
km_pool.pool_head[index].cache_size = size;
@@ -115,13 +115,13 @@ int __kfree(void *p)
struct mem_cache *cache, *tmp;
for (int i = 0; i < km_pool.total; i++)
list_for_each_entry_safe(cache, tmp,
list_foreach_removable_struct(cache, tmp,
&km_pool.pool_head[i].cache_list,
list) {
if (!mem_cache_free(cache, p)) {
if (mem_cache_is_empty(cache)) {
km_pool.pool_head[i].total_caches--;
list_del(&cache->list);
list_remove(&cache->list);
free_page(cache);
/*
* Total remains the same but slot

View File

@@ -28,15 +28,15 @@
/* Covers 3 main types of memory needed by the kernel. */
struct pgalloc {
struct list_head cache_list[3];
struct link cache_list[3];
};
static struct pgalloc pgalloc;
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
{
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
list_add(&cache->list, &pgalloc.cache_list[cidx]);
list_insert(&cache->list, &pgalloc.cache_list[cidx]);
}
void print_kmem_grant_params(grant_kmem_usage_t *params)
@@ -108,7 +108,7 @@ void init_pgalloc(void)
int initial_grant = PGALLOC_INIT_GRANT;
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
INIT_LIST_HEAD(&pgalloc.cache_list[i]);
link_init(&pgalloc.cache_list[i]);
/* Grant ourselves with an initial chunk of physical memory */
physmem.free_cur = page_align_up(physmem.free_cur);
@@ -122,14 +122,14 @@ void init_pgalloc(void)
void pgalloc_remove_cache(struct mem_cache *cache)
{
list_del_init(&cache->list);
list_remove_init(&cache->list);
}
static inline void *pgalloc_from_cache(int cidx)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_total_empty(cache))
return mem_cache_zalloc(cache);
return 0;
@@ -139,7 +139,7 @@ int kfree_to_cache(int cidx, void *virtual)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_free(cache, virtual) == 0)
return 0;
return -1;

View File

@@ -26,7 +26,7 @@
/* A basic runqueue */
struct runqueue {
struct spinlock lock; /* Lock */
struct list_head task_list; /* List of tasks in rq */
struct link task_list; /* List of tasks in rq */
unsigned int total; /* Total tasks */
};
@@ -101,7 +101,7 @@ void sched_init_runqueues(void)
{
for (int i = 0; i < SCHED_RQ_TOTAL; i++) {
memset(&sched_rq[i], 0, sizeof(struct runqueue));
INIT_LIST_HEAD(&sched_rq[i].task_list);
link_init(&sched_rq[i].task_list);
spin_lock_init(&sched_rq[i].lock);
}
@@ -135,9 +135,9 @@ static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
sched_lock_runqueues();
if (front)
list_add(&task->rq_list, &rq->task_list);
list_insert(&task->rq_list, &rq->task_list);
else
list_add_tail(&task->rq_list, &rq->task_list);
list_insert_tail(&task->rq_list, &rq->task_list);
rq->total++;
task->rq = rq;
sched_unlock_runqueues();
@@ -156,7 +156,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
*/
rq = task->rq;
BUG_ON(list_empty(&task->rq_list));
list_del_init(&task->rq_list);
list_remove_init(&task->rq_list);
task->rq = 0;
rq->total--;
@@ -167,7 +167,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
void sched_init_task(struct ktcb *task, int prio)
{
INIT_LIST_HEAD(&task->rq_list);
link_init(&task->rq_list);
task->priority = prio;
task->ticks_left = 0;
task->state = TASK_INACTIVE;
@@ -345,12 +345,12 @@ void schedule()
/* Determine the next task to be run */
if (rq_runnable->total > 0) {
next = list_entry(rq_runnable->task_list.next,
next = link_to_struct(rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
if (rq_expired->total > 0) {
sched_rq_swap_runqueues();
next = list_entry(rq_runnable->task_list.next,
next = link_to_struct(rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
printk("Idle task.\n");

View File

@@ -16,7 +16,7 @@
#include <l4/lib/idpool.h>
struct address_space_list {
struct list_head list;
struct link list;
/* Lock for list add/removal */
struct spinlock list_lock;
@@ -34,7 +34,7 @@ void init_address_space_list(void)
mutex_init(&address_space_list.ref_lock);
spin_lock_init(&address_space_list.list_lock);
INIT_LIST_HEAD(&address_space_list.list);
link_init(&address_space_list.list);
}
void address_space_reference_lock()
@@ -58,7 +58,7 @@ struct address_space *address_space_find(l4id_t spid)
struct address_space *space;
spin_lock(&address_space_list.list_lock);
list_for_each_entry(space, &address_space_list.list, list) {
list_foreach_struct(space, &address_space_list.list, list) {
if (space->spid == spid) {
spin_unlock(&address_space_list.list_lock);
return space;
@@ -72,7 +72,7 @@ void address_space_add(struct address_space *space)
{
spin_lock(&address_space_list.list_lock);
BUG_ON(!list_empty(&space->list));
list_add(&space->list, &address_space_list.list);
list_insert(&space->list, &address_space_list.list);
BUG_ON(!++address_space_list.count);
spin_unlock(&address_space_list.list_lock);
}
@@ -82,7 +82,7 @@ void address_space_remove(struct address_space *space)
spin_lock(&address_space_list.list_lock);
BUG_ON(list_empty(&space->list));
BUG_ON(--address_space_list.count < 0);
list_del_init(&space->list);
list_remove_init(&space->list);
spin_unlock(&address_space_list.list_lock);
}
@@ -118,7 +118,7 @@ struct address_space *address_space_create(struct address_space *orig)
}
/* Initialize space structure */
INIT_LIST_HEAD(&space->list);
link_init(&space->list);
mutex_init(&space->lock);
space->pgd = pgd;

View File

@@ -21,7 +21,7 @@ struct id_pool *space_id_pool;
/* Hash table for all existing tasks */
struct ktcb_list {
struct list_head list;
struct link list;
struct spinlock list_lock;
int count;
};
@@ -32,12 +32,12 @@ void init_ktcb_list(void)
{
memset(&ktcb_list, 0, sizeof(ktcb_list));
spin_lock_init(&ktcb_list.list_lock);
INIT_LIST_HEAD(&ktcb_list.list);
link_init(&ktcb_list.list);
}
void tcb_init(struct ktcb *new)
{
INIT_LIST_HEAD(&new->task_list);
link_init(&new->task_list);
mutex_init(&new->thread_control_lock);
/* Initialise task's scheduling state and parameters. */
@@ -107,7 +107,7 @@ struct ktcb *tcb_find_by_space(l4id_t spid)
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
list_foreach_struct(task, &ktcb_list.list, task_list) {
if (task->space->spid == spid) {
spin_unlock(&ktcb_list.list_lock);
return task;
@@ -122,7 +122,7 @@ struct ktcb *tcb_find(l4id_t tid)
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
list_foreach_struct(task, &ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&ktcb_list.list_lock);
return task;
@@ -137,7 +137,7 @@ void tcb_add(struct ktcb *new)
spin_lock(&ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++ktcb_list.count);
list_add(&new->task_list, &ktcb_list.list);
list_insert(&new->task_list, &ktcb_list.list);
spin_unlock(&ktcb_list.list_lock);
}
@@ -146,7 +146,7 @@ void tcb_remove(struct ktcb *new)
spin_lock(&ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list));
BUG_ON(--ktcb_list.count < 0);
list_del_init(&new->task_list);
list_remove_init(&new->task_list);
spin_unlock(&ktcb_list.list_lock);
}