Removed linux linked list dependency.

This commit is contained in:
Bahadir Balban
2009-06-02 13:19:17 +03:00
parent 4757f46f71
commit 276b4643c6
69 changed files with 455 additions and 885 deletions

View File

@@ -232,7 +232,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
struct waitqueue *wq = receiver->wq;
/* Remove from waitqueue */
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqhr->sleepers--;
task_unset_wqh(receiver);
@@ -255,7 +255,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
/* The receiver is not ready and/or not expecting us */
CREATE_WAITQUEUE_ON_STACK(wq, current);
wqhs->sleepers++;
list_add_tail(&wq.task_list, &wqhs->task_list);
list_insert_tail(&wq.task_list, &wqhs->task_list);
task_set_wqh(current, wqhs, &wq);
sched_prepare_sleep();
spin_unlock(&wqhr->slock);
@@ -292,13 +292,13 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
BUG_ON(list_empty(&wqhs->task_list));
/* Look for a sender we want to receive from */
list_for_each_entry_safe(wq, n, &wqhs->task_list, task_list) {
list_foreach_removable_struct(wq, n, &wqhs->task_list, task_list) {
sleeper = wq->task;
/* Found a sender that we wanted to receive from */
if ((sleeper->tid == current->expected_sender) ||
(current->expected_sender == L4_ANYTHREAD)) {
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqhs->sleepers--;
task_unset_wqh(sleeper);
spin_unlock(&wqhr->slock);
@@ -320,7 +320,7 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
/* The sender is not ready */
CREATE_WAITQUEUE_ON_STACK(wq, current);
wqhr->sleepers++;
list_add_tail(&wq.task_list, &wqhr->task_list);
list_insert_tail(&wq.task_list, &wqhr->task_list);
task_set_wqh(current, wqhr, &wq);
sched_prepare_sleep();
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
@@ -341,7 +341,7 @@ int ipc_recv(l4id_t senderid, unsigned int flags)
* (1) User task (client) calls ipc_sendrecv();
* (2) System task (server) calls ipc_recv() with from == ANYTHREAD.
* (3) Rendezvous occurs. Both tasks exchange mrs and leave rendezvous.
* (4,5) User task, immediately calls ipc_recv(), expecting a reply from server.
* (4,5) User task, immediately calls ipc_recv(), expecting a origy from server.
* (4,5) System task handles the request in userspace.
* (6) System task calls ipc_send() sending the return result.
* (7) Rendezvous occurs. Both tasks exchange mrs and leave rendezvous.
@@ -355,7 +355,7 @@ int ipc_sendrecv(l4id_t to, l4id_t from, unsigned int flags)
if ((ret = ipc_send(to, flags)) < 0)
return ret;
/*
* Get reply. A client would block its server
* Get origy. A client would block its server
* only very briefly between these calls.
*/
if ((ret = ipc_recv(from, flags)) < 0)

View File

@@ -18,13 +18,13 @@
struct mutex_queue {
unsigned long physical;
struct list_head list;
struct link list;
struct waitqueue_head wqh_waiters;
struct waitqueue_head wqh_wakers;
};
struct mutex_queue_head {
struct list_head list;
struct link list;
/*
* Single lock for:
@@ -42,7 +42,7 @@ struct mutex_queue_head {
void init_mutex_queue_head(void)
{
memset(&mutex_queue_head, 0, sizeof (mutex_queue_head));
INIT_LIST_HEAD(&mutex_queue_head.list);
link_init(&mutex_queue_head.list);
mutex_init(&mutex_queue_head.mutex_control_mutex);
}
void mutex_queue_head_lock()
@@ -61,7 +61,7 @@ void mutex_queue_init(struct mutex_queue *mq, unsigned long physical)
/* This is the unique key that describes this mutex */
mq->physical = physical;
INIT_LIST_HEAD(&mq->list);
link_init(&mq->list);
waitqueue_head_init(&mq->wqh_wakers);
waitqueue_head_init(&mq->wqh_waiters);
}
@@ -70,13 +70,13 @@ void mutex_control_add(struct mutex_queue *mq)
{
BUG_ON(!list_empty(&mq->list));
list_add(&mq->list, &mutex_queue_head.list);
list_insert(&mq->list, &mutex_queue_head.list);
mutex_queue_head.count++;
}
void mutex_control_remove(struct mutex_queue *mq)
{
list_del_init(&mq->list);
list_remove_init(&mq->list);
mutex_queue_head.count--;
}
@@ -86,7 +86,7 @@ struct mutex_queue *mutex_control_find(unsigned long mutex_physical)
struct mutex_queue *mutex_queue;
/* Find the mutex queue with this key */
list_for_each_entry(mutex_queue, &mutex_queue_head.list, list)
list_foreach_struct(mutex_queue, &mutex_queue_head.list, list)
if (mutex_queue->physical == mutex_physical)
return mutex_queue;

View File

@@ -13,7 +13,7 @@
* For lazy mm switching, a list of newly created mappings that are common to
* all tasks (e.g. any mapping done in the kernel) can be kept here so that when
* a new task is scheduled, the same mappings are copied to its page tables as
* well. struct list_head new_mappings;
* well. struct link new_mappings;
*/
int sys_map(syscall_context_t *regs)

View File

@@ -616,7 +616,7 @@ void relocate_page_tables(void)
* Useful for upgrading to page-grained control over a section mapping:
* Remaps a section mapping in pages. It allocates a pmd, (at all times because
* there can't really be an already existing pmd for a section mapping) fills
* in the page information, and replaces the direct section physical translation
* in the page information, and origaces the direct section physical translation
* with the address of the pmd. Flushes the caches/tlbs.
*/
void remap_as_pages(void *vstart, void *vend)

View File

@@ -12,7 +12,7 @@
#define KMALLOC_POOLS_MAX 5
struct kmalloc_pool_head {
struct list_head cache_list;
struct link cache_list;
int occupied;
int total_caches;
int cache_size;
@@ -28,7 +28,7 @@ struct kmalloc_mempool km_pool;
void init_kmalloc()
{
for (int i = 0; i < KMALLOC_POOLS_MAX; i++) {
INIT_LIST_HEAD(&km_pool.pool_head[i].cache_list);
link_init(&km_pool.pool_head[i].cache_list);
km_pool.pool_head[i].occupied = 0;
km_pool.pool_head[i].total_caches = 0;
km_pool.pool_head[i].cache_size = 0;
@@ -58,7 +58,7 @@ void *__kmalloc(int size)
* Found the pool, now see if any
* cache has available slots
*/
list_for_each_entry(cache, &km_pool.pool_head[i].cache_list,
list_foreach_struct(cache, &km_pool.pool_head[i].cache_list,
list) {
if (cache->free)
return mem_cache_alloc(cache);
@@ -89,7 +89,7 @@ void *__kmalloc(int size)
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
size, 0)));
// printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
list_add(&cache->list, &km_pool.pool_head[index].cache_list);
list_insert(&cache->list, &km_pool.pool_head[index].cache_list);
km_pool.pool_head[index].occupied = 1;
km_pool.pool_head[index].total_caches++;
km_pool.pool_head[index].cache_size = size;
@@ -115,13 +115,13 @@ int __kfree(void *p)
struct mem_cache *cache, *tmp;
for (int i = 0; i < km_pool.total; i++)
list_for_each_entry_safe(cache, tmp,
list_foreach_removable_struct(cache, tmp,
&km_pool.pool_head[i].cache_list,
list) {
if (!mem_cache_free(cache, p)) {
if (mem_cache_is_empty(cache)) {
km_pool.pool_head[i].total_caches--;
list_del(&cache->list);
list_remove(&cache->list);
free_page(cache);
/*
* Total remains the same but slot

View File

@@ -28,15 +28,15 @@
/* Covers 3 main types of memory needed by the kernel. */
struct pgalloc {
struct list_head cache_list[3];
struct link cache_list[3];
};
static struct pgalloc pgalloc;
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
{
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
list_add(&cache->list, &pgalloc.cache_list[cidx]);
list_insert(&cache->list, &pgalloc.cache_list[cidx]);
}
void print_kmem_grant_params(grant_kmem_usage_t *params)
@@ -108,7 +108,7 @@ void init_pgalloc(void)
int initial_grant = PGALLOC_INIT_GRANT;
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
INIT_LIST_HEAD(&pgalloc.cache_list[i]);
link_init(&pgalloc.cache_list[i]);
/* Grant ourselves with an initial chunk of physical memory */
physmem.free_cur = page_align_up(physmem.free_cur);
@@ -122,14 +122,14 @@ void init_pgalloc(void)
void pgalloc_remove_cache(struct mem_cache *cache)
{
list_del_init(&cache->list);
list_remove_init(&cache->list);
}
static inline void *pgalloc_from_cache(int cidx)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_total_empty(cache))
return mem_cache_zalloc(cache);
return 0;
@@ -139,7 +139,7 @@ int kfree_to_cache(int cidx, void *virtual)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_free(cache, virtual) == 0)
return 0;
return -1;

View File

@@ -26,7 +26,7 @@
/* A basic runqueue */
struct runqueue {
struct spinlock lock; /* Lock */
struct list_head task_list; /* List of tasks in rq */
struct link task_list; /* List of tasks in rq */
unsigned int total; /* Total tasks */
};
@@ -101,7 +101,7 @@ void sched_init_runqueues(void)
{
for (int i = 0; i < SCHED_RQ_TOTAL; i++) {
memset(&sched_rq[i], 0, sizeof(struct runqueue));
INIT_LIST_HEAD(&sched_rq[i].task_list);
link_init(&sched_rq[i].task_list);
spin_lock_init(&sched_rq[i].lock);
}
@@ -135,9 +135,9 @@ static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
sched_lock_runqueues();
if (front)
list_add(&task->rq_list, &rq->task_list);
list_insert(&task->rq_list, &rq->task_list);
else
list_add_tail(&task->rq_list, &rq->task_list);
list_insert_tail(&task->rq_list, &rq->task_list);
rq->total++;
task->rq = rq;
sched_unlock_runqueues();
@@ -156,7 +156,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
*/
rq = task->rq;
BUG_ON(list_empty(&task->rq_list));
list_del_init(&task->rq_list);
list_remove_init(&task->rq_list);
task->rq = 0;
rq->total--;
@@ -167,7 +167,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
void sched_init_task(struct ktcb *task, int prio)
{
INIT_LIST_HEAD(&task->rq_list);
link_init(&task->rq_list);
task->priority = prio;
task->ticks_left = 0;
task->state = TASK_INACTIVE;
@@ -345,12 +345,12 @@ void schedule()
/* Determine the next task to be run */
if (rq_runnable->total > 0) {
next = list_entry(rq_runnable->task_list.next,
next = link_to_struct(rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
if (rq_expired->total > 0) {
sched_rq_swap_runqueues();
next = list_entry(rq_runnable->task_list.next,
next = link_to_struct(rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
printk("Idle task.\n");

View File

@@ -16,7 +16,7 @@
#include <l4/lib/idpool.h>
struct address_space_list {
struct list_head list;
struct link list;
/* Lock for list add/removal */
struct spinlock list_lock;
@@ -34,7 +34,7 @@ void init_address_space_list(void)
mutex_init(&address_space_list.ref_lock);
spin_lock_init(&address_space_list.list_lock);
INIT_LIST_HEAD(&address_space_list.list);
link_init(&address_space_list.list);
}
void address_space_reference_lock()
@@ -58,7 +58,7 @@ struct address_space *address_space_find(l4id_t spid)
struct address_space *space;
spin_lock(&address_space_list.list_lock);
list_for_each_entry(space, &address_space_list.list, list) {
list_foreach_struct(space, &address_space_list.list, list) {
if (space->spid == spid) {
spin_unlock(&address_space_list.list_lock);
return space;
@@ -72,7 +72,7 @@ void address_space_add(struct address_space *space)
{
spin_lock(&address_space_list.list_lock);
BUG_ON(!list_empty(&space->list));
list_add(&space->list, &address_space_list.list);
list_insert(&space->list, &address_space_list.list);
BUG_ON(!++address_space_list.count);
spin_unlock(&address_space_list.list_lock);
}
@@ -82,7 +82,7 @@ void address_space_remove(struct address_space *space)
spin_lock(&address_space_list.list_lock);
BUG_ON(list_empty(&space->list));
BUG_ON(--address_space_list.count < 0);
list_del_init(&space->list);
list_remove_init(&space->list);
spin_unlock(&address_space_list.list_lock);
}
@@ -118,7 +118,7 @@ struct address_space *address_space_create(struct address_space *orig)
}
/* Initialize space structure */
INIT_LIST_HEAD(&space->list);
link_init(&space->list);
mutex_init(&space->lock);
space->pgd = pgd;

View File

@@ -21,7 +21,7 @@ struct id_pool *space_id_pool;
/* Hash table for all existing tasks */
struct ktcb_list {
struct list_head list;
struct link list;
struct spinlock list_lock;
int count;
};
@@ -32,12 +32,12 @@ void init_ktcb_list(void)
{
memset(&ktcb_list, 0, sizeof(ktcb_list));
spin_lock_init(&ktcb_list.list_lock);
INIT_LIST_HEAD(&ktcb_list.list);
link_init(&ktcb_list.list);
}
void tcb_init(struct ktcb *new)
{
INIT_LIST_HEAD(&new->task_list);
link_init(&new->task_list);
mutex_init(&new->thread_control_lock);
/* Initialise task's scheduling state and parameters. */
@@ -107,7 +107,7 @@ struct ktcb *tcb_find_by_space(l4id_t spid)
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
list_foreach_struct(task, &ktcb_list.list, task_list) {
if (task->space->spid == spid) {
spin_unlock(&ktcb_list.list_lock);
return task;
@@ -122,7 +122,7 @@ struct ktcb *tcb_find(l4id_t tid)
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
list_foreach_struct(task, &ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&ktcb_list.list_lock);
return task;
@@ -137,7 +137,7 @@ void tcb_add(struct ktcb *new)
spin_lock(&ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++ktcb_list.count);
list_add(&new->task_list, &ktcb_list.list);
list_insert(&new->task_list, &ktcb_list.list);
spin_unlock(&ktcb_list.list_lock);
}
@@ -146,7 +146,7 @@ void tcb_remove(struct ktcb *new)
spin_lock(&ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list));
BUG_ON(--ktcb_list.count < 0);
list_del_init(&new->task_list);
list_remove_init(&new->task_list);
spin_unlock(&ktcb_list.list_lock);
}

View File

@@ -5,7 +5,7 @@
void print_page_area_list(struct page_area *p)
{
struct page_area *current_item = p;
struct list_head *begin = &p->list;
struct link *begin = &p->list;
if (!current_item) {
printf("%-20s\n", "Null list.");
return;
@@ -18,7 +18,7 @@ void print_page_area_list(struct page_area *p)
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Number of pages:", current_item->numpages);
list_for_each_entry (current_item, begin, list) {
list_foreach_struct (current_item, begin, list) {
printf("%-20s\n%-20s\n", "Page area:","-------------------------");
printf("%-20s %d\n", "Index:", current_item->index);
printf("%-20s %d\n", "Used:", current_item->used);
@@ -38,7 +38,7 @@ void print_subpage_area(struct subpage_area *s)
void print_subpage_area_list(struct subpage_area *s)
{
struct subpage_area *current_item = s;
struct list_head *begin = &s->list;
struct link *begin = &s->list;
if (!current_item) {
printf("Null list.\n");
return;
@@ -52,7 +52,7 @@ void print_subpage_area_list(struct subpage_area *s)
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Head_of_pages:", current_item->head_of_pages);
list_for_each_entry (current_item, begin, list) {
list_foreach_struct (current_item, begin, list) {
print_subpage_area(current_item);
}
}

View File

@@ -40,7 +40,7 @@ void print_areas(struct page_area *ar)
return;
}
print_page_area(cur, areano++);
list_for_each_entry(cur, &ar->list, list) {
list_foreach_struct(cur, &ar->list, list) {
print_page_area(cur, areano++);
}
return;
@@ -64,7 +64,7 @@ void print_caches(struct mem_cache *c)
return;
}
print_cache(cur, caches++);
list_for_each_entry(cur, &c->list, list) {
list_foreach_struct(cur, &c->list, list) {
print_cache(cur, caches++);
}
return;

View File

@@ -159,7 +159,7 @@ struct mem_cache *mem_cache_init(void *start,
area_start = addr_aligned;
}
INIT_LIST_HEAD(&cache->list);
link_init(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;

View File

@@ -37,8 +37,8 @@ void sem_up(struct mutex *mutex)
/* Each producer wakes one consumer in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
list_foreach_struct(wq, &mutex->wq.task_list, task_list) {
list_remove_init(&wq->task_list);
spin_unlock(&mutex->slock);
sleeper = wq->task;
printk("(%d) Waking up consumer (%d)\n", current->tid,
@@ -48,8 +48,8 @@ void sem_up(struct mutex *mutex)
}
} else if (cnt > 0) {
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
link_init(&wq.task_list);
list_insert_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_prepare_sleep();
printk("(%d) produced, now sleeping...\n", current->tid);
@@ -75,8 +75,8 @@ void sem_down(struct mutex *mutex)
/* Each consumer wakes one producer in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
list_foreach_struct(wq, &mutex->wq.task_list, task_list) {
list_remove_init(&wq->task_list);
spin_unlock(&mutex->slock);
sleeper = wq->task;
printk("(%d) Waking up producer (%d)\n", current->tid,
@@ -86,8 +86,8 @@ void sem_down(struct mutex *mutex)
}
} else if (cnt < 0) {
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
link_init(&wq.task_list);
list_insert_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_prepare_sleep();
printk("(%d) Waiting to consume, now sleeping...\n", current->tid);
@@ -122,7 +122,7 @@ int mutex_lock(struct mutex *mutex)
if (!__mutex_lock(&mutex->lock)) { /* Could not lock, sleep. */
CREATE_WAITQUEUE_ON_STACK(wq, current);
task_set_wqh(current, &mutex->wqh, &wq);
list_add_tail(&wq.task_list, &mutex->wqh.task_list);
list_insert_tail(&wq.task_list, &mutex->wqh.task_list);
mutex->wqh.sleepers++;
sched_prepare_sleep();
spin_unlock(&mutex->wqh.slock);
@@ -151,14 +151,14 @@ void mutex_unlock(struct mutex *mutex)
BUG_ON(current->nlocks < 0);
BUG_ON(mutex->wqh.sleepers < 0);
if (mutex->wqh.sleepers > 0) {
struct waitqueue *wq = list_entry(mutex->wqh.task_list.next,
struct waitqueue *wq = link_to_struct(mutex->wqh.task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
task_unset_wqh(sleeper);
BUG_ON(list_empty(&mutex->wqh.task_list));
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
mutex->wqh.sleepers--;
spin_unlock(&mutex->wqh.slock);

View File

@@ -62,7 +62,7 @@ int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq)
{
spin_lock(&wqh->slock);
wqh->sleepers++;
list_add_tail(&wq->task_list, &wqh->task_list);
list_insert_tail(&wq->task_list, &wqh->task_list);
task_set_wqh(current, wqh, wq);
sched_prepare_sleep();
//printk("(%d) waiting on wqh at: 0x%p\n",
@@ -78,7 +78,7 @@ int wait_on(struct waitqueue_head *wqh)
CREATE_WAITQUEUE_ON_STACK(wq, current);
spin_lock(&wqh->slock);
wqh->sleepers++;
list_add_tail(&wq.task_list, &wqh->task_list);
list_insert_tail(&wq.task_list, &wqh->task_list);
task_set_wqh(current, wqh, &wq);
sched_prepare_sleep();
//printk("(%d) waiting on wqh at: 0x%p\n",
@@ -101,13 +101,13 @@ void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
while (wqh->sleepers > 0) {
struct waitqueue *wq = list_entry(wqh->task_list.next,
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
task_unset_wqh(sleeper);
BUG_ON(list_empty(&wqh->task_list));
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqh->sleepers--;
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
@@ -128,12 +128,12 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
if (wqh->sleepers > 0) {
struct waitqueue *wq = list_entry(wqh->task_list.next,
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
BUG_ON(list_empty(&wqh->task_list));
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqh->sleepers--;
task_unset_wqh(sleeper);
if (flags & WAKEUP_INTERRUPT)
@@ -193,7 +193,7 @@ int wake_up_task(struct ktcb *task, unsigned int flags)
}
/* Now we can remove the task from its waitqueue */
list_del_init(&wq->task_list);
list_remove_init(&wq->task_list);
wqh->sleepers--;
task->waiting_on = 0;
task->wq = 0;