mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Converted all wait/wakeup runqueue lock/unlock paths to irq versions.
Irqs can now touch runqueues and do async wakeups. This necessitated that we implement all wake up wait and runqueue locking work with irqs. All this, assumes that in an SMP setup we may have cross-cpu wake ups, runqueue manipulation. If we later decide that we only wake up threads in the current container, (and lock containers to cpus) we won't really need spinlocks, or irq disabling anymore. The current set up might be trivially less responsive, but is more flexible.
This commit is contained in:
@@ -110,7 +110,8 @@ int timer_setup_devices(void)
|
||||
|
||||
/* Map timers to a virtual address region */
|
||||
if (IS_ERR(l4_map((void *)__pfn_to_addr(timer_cap[i].start),
|
||||
(void *)timer[i].base, timer_cap[i].size, MAP_USR_IO_FLAGS,
|
||||
(void *)timer[i].base, timer_cap[i].size,
|
||||
MAP_USR_IO_FLAGS,
|
||||
self_tid()))) {
|
||||
printf("%s: FATAL: Failed to map TIMER device "
|
||||
"%d to a virtual address\n",
|
||||
|
||||
25
conts/libl4/include/l4lib/arch-arm/irq.h
Normal file
25
conts/libl4/include/l4lib/arch-arm/irq.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#ifndef __L4LIB_ARCH_IRQ_H__
|
||||
#define __L4LIB_ARCH_IRQ_H__
|
||||
|
||||
|
||||
/*
|
||||
* Destructive atomic-read.
|
||||
*
|
||||
* Write 0 to byte at @location as its contents are read back.
|
||||
*/
|
||||
static inline char l4_atomic_dest_readb(void *location)
|
||||
{
|
||||
unsigned char zero = 0;
|
||||
unsigned char val;
|
||||
char *loc = location;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"swpb %1, %2, [%0] \n"
|
||||
:: "r" (*loc), "r" (val), "r" (zero)
|
||||
);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
23
conts/libl4/src/irq.c
Normal file
23
conts/libl4/src/irq.c
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Functions for userspace irq handling.
|
||||
*
|
||||
* Copyright (C) 2009 B Labs Ltd.
|
||||
*/
|
||||
#include <l4lib/arch/irq.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4/api/irq.h>
|
||||
|
||||
/*
|
||||
* Reads the irq notification slot. Destructive atomic read ensures that
|
||||
* an irq may write to the slot in sync.
|
||||
*/
|
||||
int l4_irq_read_blocking(int slot, int irqnum)
|
||||
{
|
||||
int irqval = l4_atomic_dest_readb(&l4_get_utcb()->notify[slot]);
|
||||
|
||||
if (!irqval)
|
||||
return l4_irq_control(IRQ_CONTROL_WAIT, 0, irqnum);
|
||||
else
|
||||
return irqval;
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#define __GENERIC_IRQ_H__
|
||||
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/lib/wait.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_ARCH(types.h)
|
||||
|
||||
@@ -45,6 +46,9 @@ struct irq_desc {
|
||||
/* Notification slot for this irq */
|
||||
int task_notify_slot;
|
||||
|
||||
/* Waitqueue head for this irq */
|
||||
struct waitqueue_head wqh_irq;
|
||||
|
||||
/* NOTE: This could be a list for multiple handlers for shared irqs */
|
||||
irq_handler_t handler;
|
||||
};
|
||||
|
||||
@@ -15,7 +15,6 @@ struct waitqueue {
|
||||
enum wakeup_flags {
|
||||
WAKEUP_INTERRUPT = (1 << 0), /* Set interrupt flag for task */
|
||||
WAKEUP_SYNC = (1 << 1), /* Wake it up synchronously */
|
||||
WAKEUP_IRQ = (1 << 2) /* Disable irqs on spinlocks */
|
||||
};
|
||||
|
||||
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
|
||||
@@ -51,18 +50,20 @@ void task_unset_wqh(struct ktcb *task);
|
||||
do { \
|
||||
ret = 0; \
|
||||
for (;;) { \
|
||||
spin_lock(&(wqh)->slock); \
|
||||
unsigned long irqsave; \
|
||||
spin_lock_irq(&(wqh)->slock, &irqsave); \
|
||||
if (condition) { \
|
||||
spin_unlock(&(wqh)->slock); \
|
||||
spin_unlock_irq(&(wqh)->slock, irqsave);\
|
||||
break; \
|
||||
} \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
task_set_wqh(current, wqh, &wq); \
|
||||
(wqh)->sleepers++; \
|
||||
list_insert_tail(&wq.task_list, &(wqh)->task_list);\
|
||||
/* printk("(%d) waiting...\n", current->tid); */ \
|
||||
list_insert_tail(&wq.task_list, \
|
||||
&(wqh)->task_list); \
|
||||
/* printk("(%d) waiting...\n", current->tid); */\
|
||||
sched_prepare_sleep(); \
|
||||
spin_unlock(&(wqh)->slock); \
|
||||
spin_unlock_irq(&(wqh)->slock, irqsave); \
|
||||
schedule(); \
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
@@ -73,6 +74,7 @@ do { \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
|
||||
void wake_up(struct waitqueue_head *wqh, unsigned int flags);
|
||||
int wake_up_task(struct ktcb *task, unsigned int flags);
|
||||
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
#define SIC_CHIP_OFFSET 32
|
||||
|
||||
/* Maximum irqs on VIC and SIC */
|
||||
#define VIC_IRQS_MAX PL190_IRQS_MAX
|
||||
#define SIC_IRQS_MAX PL190_SIC_IRQS_MAX
|
||||
#define VIC_IRQS_MAX 32
|
||||
#define SIC_IRQS_MAX 32
|
||||
|
||||
#define IRQS_MAX VIC_IRQS_MAX + SIC_IRQS_MAX
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ int irq_thread_notify(struct irq_desc *desc)
|
||||
"task id=0x%x err=%d\n"
|
||||
"Destroying task.", __FUNCTION__,
|
||||
desc->task->tid, err);
|
||||
/* FIXME: Racy for irqs. */
|
||||
thread_destroy(desc->task);
|
||||
/* FIXME: Deregister and disable irq as well */
|
||||
}
|
||||
@@ -60,7 +61,7 @@ int irq_thread_notify(struct irq_desc *desc)
|
||||
utcb->notify[desc->task_notify_slot]++;
|
||||
|
||||
/* Async wake up any waiter irq threads */
|
||||
wake_up(&desc->task->wqh_notify, WAKEUP_ASYNC | WAKEUP_IRQ);
|
||||
wake_up(&desc->task->wqh_notify, WAKEUP_ASYNC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -91,9 +92,29 @@ int irq_control_register(struct ktcb *task, int slot, l4id_t irqnum)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int irq_wait()
|
||||
/*
|
||||
* Makes current task wait on the given irq
|
||||
*/
|
||||
int irq_wait(l4id_t irq_index)
|
||||
{
|
||||
return 0;
|
||||
struct irq_desc *desc = irq_desc_array + irq_index;
|
||||
struct utcb *utcb = (struct utcb *)current->utcb_address;
|
||||
int ret;
|
||||
|
||||
/* Index must be valid */
|
||||
if (irq_index > IRQS_MAX || irq_index < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* UTCB must be mapped */
|
||||
if ((ret = tcb_check_and_lazy_map_utcb(current, 1)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Wait until the irq changes slot value */
|
||||
WAIT_EVENT(&desc->wqh_irq,
|
||||
!utcb->notify[desc->task_notify_slot],
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -101,26 +122,24 @@ int irq_wait()
|
||||
* Register/deregister device irqs. Optional synchronous and
|
||||
* asynchronous irq handling.
|
||||
*/
|
||||
int sys_irq_control(unsigned int req, unsigned int flags, l4id_t irqno)
|
||||
int sys_irq_control(unsigned int req, unsigned int flags, l4id_t irqnum)
|
||||
{
|
||||
/* Currently a task is allowed to register only for itself */
|
||||
struct ktcb *task = current;
|
||||
int err;
|
||||
|
||||
if ((err = cap_irq_check(task, req, flags, irqno)) < 0)
|
||||
if ((err = cap_irq_check(task, req, flags, irqnum)) < 0)
|
||||
return err;
|
||||
|
||||
switch (req) {
|
||||
case IRQ_CONTROL_REGISTER:
|
||||
if ((err = irq_control_register(task, flags, irqno)) < 0)
|
||||
return err;
|
||||
break;
|
||||
return irq_control_register(task, flags, irqnum);
|
||||
case IRQ_CONTROL_WAIT:
|
||||
irq_wait();
|
||||
break;
|
||||
return irq_wait(irqnum);
|
||||
default:
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ int thread_exit(struct ktcb *task)
|
||||
return thread_signal(task, TASK_EXITING, TASK_DEAD);
|
||||
}
|
||||
|
||||
static inline int TASK_IS_CHILD(struct ktcb *task)
|
||||
static inline int task_is_child(struct ktcb *task)
|
||||
{
|
||||
return (((task) != current) &&
|
||||
((task)->pagerid == current->tid));
|
||||
@@ -91,7 +91,7 @@ int thread_destroy_children(void)
|
||||
list_foreach_removable_struct(task, n,
|
||||
&curcont->ktcb_list.list,
|
||||
task_list) {
|
||||
if (TASK_IS_CHILD(task)) {
|
||||
if (task_is_child(task)) {
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
thread_destroy_child(task);
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
@@ -136,7 +136,7 @@ int thread_destroy(struct ktcb *task, unsigned int exit_code)
|
||||
{
|
||||
exit_code &= THREAD_EXIT_MASK;
|
||||
|
||||
if (TASK_IS_CHILD(task))
|
||||
if (task_is_child(task))
|
||||
return thread_destroy_child(task);
|
||||
else if (task == current)
|
||||
thread_destroy_self(exit_code);
|
||||
|
||||
@@ -34,6 +34,10 @@ int irq_register(struct ktcb *task, int notify_slot, l4id_t irq_index)
|
||||
if (!this_desc->handler || !this_desc->chip)
|
||||
return -ENOIRQ;
|
||||
|
||||
/* Index must be valid */
|
||||
if (irq_index > IRQS_MAX || irq_index < 0)
|
||||
return -ENOIRQ;
|
||||
|
||||
/* Setup the task and notify slot */
|
||||
this_desc->task = task;
|
||||
this_desc->task_notify_slot = notify_slot;
|
||||
|
||||
@@ -34,16 +34,16 @@ extern unsigned int current_irq_nest_count;
|
||||
static int voluntary_preempt = 0;
|
||||
|
||||
|
||||
void sched_lock_runqueues(void)
|
||||
void sched_lock_runqueues(unsigned long *irqflags)
|
||||
{
|
||||
spin_lock(&scheduler.sched_rq[0].lock);
|
||||
spin_lock_irq(&scheduler.sched_rq[0].lock, irqflags);
|
||||
spin_lock(&scheduler.sched_rq[1].lock);
|
||||
}
|
||||
|
||||
void sched_unlock_runqueues(void)
|
||||
void sched_unlock_runqueues(unsigned long irqflags)
|
||||
{
|
||||
spin_unlock(&scheduler.sched_rq[0].lock);
|
||||
spin_unlock(&scheduler.sched_rq[1].lock);
|
||||
spin_unlock_irq(&scheduler.sched_rq[0].lock, irqflags);
|
||||
}
|
||||
|
||||
int preemptive()
|
||||
@@ -142,24 +142,27 @@ static void sched_rq_swap_runqueues(void)
|
||||
/* Helper for adding a new task to a runqueue */
|
||||
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
BUG_ON(!list_empty(&task->rq_list));
|
||||
|
||||
sched_lock_runqueues();
|
||||
sched_lock_runqueues(&irqflags);
|
||||
if (front)
|
||||
list_insert(&task->rq_list, &rq->task_list);
|
||||
else
|
||||
list_insert_tail(&task->rq_list, &rq->task_list);
|
||||
rq->total++;
|
||||
task->rq = rq;
|
||||
sched_unlock_runqueues();
|
||||
sched_unlock_runqueues(irqflags);
|
||||
}
|
||||
|
||||
/* Helper for removing a task from its runqueue. */
|
||||
static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
{
|
||||
struct runqueue *rq;
|
||||
unsigned long irqflags;
|
||||
|
||||
sched_lock_runqueues();
|
||||
sched_lock_runqueues(&irqflags);
|
||||
|
||||
/*
|
||||
* We must lock both, otherwise rqs may swap and
|
||||
@@ -172,7 +175,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
rq->total--;
|
||||
|
||||
BUG_ON(rq->total < 0);
|
||||
sched_unlock_runqueues();
|
||||
sched_unlock_runqueues(irqflags);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -15,10 +15,12 @@
|
||||
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
|
||||
struct waitqueue *wq)
|
||||
{
|
||||
spin_lock(&task->waitlock);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irq(&task->waitlock, &irqflags);
|
||||
task->waiting_on = wqh;
|
||||
task->wq = wq;
|
||||
spin_unlock(&task->waitlock);
|
||||
spin_unlock_irq(&task->waitlock, irqflags);
|
||||
}
|
||||
|
||||
|
||||
@@ -28,10 +30,12 @@ void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
|
||||
*/
|
||||
void task_unset_wqh(struct ktcb *task)
|
||||
{
|
||||
spin_lock(&task->waitlock);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irq(&task->waitlock, &irqflags);
|
||||
task->waiting_on = 0;
|
||||
task->wq = 0;
|
||||
spin_unlock(&task->waitlock);
|
||||
spin_unlock_irq(&task->waitlock, irqflags);
|
||||
|
||||
}
|
||||
|
||||
@@ -69,17 +73,19 @@ int wait_on_prepared_wait(void)
|
||||
*/
|
||||
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Disable to protect from sleeping by preemption */
|
||||
preempt_disable();
|
||||
|
||||
spin_lock(&wqh->slock);
|
||||
spin_lock_irq(&wqh->slock, &irqflags);
|
||||
wqh->sleepers++;
|
||||
list_insert_tail(&wq->task_list, &wqh->task_list);
|
||||
task_set_wqh(current, wqh, wq);
|
||||
sched_prepare_sleep();
|
||||
//printk("(%d) waiting on wqh at: 0x%p\n",
|
||||
// current->tid, wqh);
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock_irq(&wqh->slock, irqflags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -87,15 +93,17 @@ int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq)
|
||||
/* Sleep without any condition */
|
||||
int wait_on(struct waitqueue_head *wqh)
|
||||
{
|
||||
unsigned long irqsave;
|
||||
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
spin_lock(&wqh->slock);
|
||||
spin_lock_irq(&wqh->slock, &irqsave);
|
||||
wqh->sleepers++;
|
||||
list_insert_tail(&wq.task_list, &wqh->task_list);
|
||||
task_set_wqh(current, wqh, &wq);
|
||||
sched_prepare_sleep();
|
||||
//printk("(%d) waiting on wqh at: 0x%p\n",
|
||||
// current->tid, wqh);
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock_irq(&wqh->slock, irqsave);
|
||||
schedule();
|
||||
|
||||
/* Did we wake up normally or get interrupted */
|
||||
@@ -110,7 +118,9 @@ int wait_on(struct waitqueue_head *wqh)
|
||||
/* Wake up all in the queue */
|
||||
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
|
||||
{
|
||||
spin_lock(&wqh->slock);
|
||||
unsigned long irqsave;
|
||||
|
||||
spin_lock_irq(&wqh->slock, &irqsave);
|
||||
BUG_ON(wqh->sleepers < 0);
|
||||
while (wqh->sleepers > 0) {
|
||||
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
|
||||
@@ -124,16 +134,16 @@ void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
|
||||
if (flags & WAKEUP_INTERRUPT)
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
// printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock_irq(&wqh->slock, irqsave);
|
||||
|
||||
if (flags & WAKEUP_SYNC)
|
||||
sched_resume_sync(sleeper);
|
||||
else
|
||||
sched_resume_async(sleeper);
|
||||
|
||||
spin_lock(&wqh->slock);
|
||||
spin_lock_irq(&wqh->slock, &irqsave);
|
||||
}
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock_irq(&wqh->slock, irqsave);
|
||||
}
|
||||
|
||||
/* Wake up single waiter */
|
||||
@@ -143,15 +153,11 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
|
||||
|
||||
BUG_ON(wqh->sleepers < 0);
|
||||
|
||||
/* Irq version */
|
||||
if (flags & WAKEUP_IRQ)
|
||||
spin_lock_irq(&wqh->slock, &irqflags);
|
||||
else
|
||||
spin_lock(&wqh->slock);
|
||||
spin_lock_irq(&wqh->slock, &irqflags);
|
||||
if (wqh->sleepers > 0) {
|
||||
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
|
||||
struct waitqueue,
|
||||
task_list);
|
||||
struct waitqueue,
|
||||
task_list);
|
||||
struct ktcb *sleeper = wq->task;
|
||||
BUG_ON(list_empty(&wqh->task_list));
|
||||
list_remove_init(&wq->task_list);
|
||||
@@ -160,10 +166,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
|
||||
if (flags & WAKEUP_INTERRUPT)
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
//printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
if (flags & WAKEUP_IRQ)
|
||||
spin_unlock_irq(&wqh->slock, irqflags);
|
||||
else
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock_irq(&wqh->slock, irqflags);
|
||||
|
||||
if (flags & WAKEUP_SYNC)
|
||||
sched_resume_sync(sleeper);
|
||||
@@ -171,10 +174,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
|
||||
sched_resume_async(sleeper);
|
||||
return;
|
||||
}
|
||||
if (flags & WAKEUP_IRQ)
|
||||
spin_unlock_irq(&wqh->slock, irqflags);
|
||||
else
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock_irq(&wqh->slock, irqflags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -184,15 +184,13 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
|
||||
*/
|
||||
int wake_up_task(struct ktcb *task, unsigned int flags)
|
||||
{
|
||||
unsigned long irqflags[2];
|
||||
struct waitqueue_head *wqh;
|
||||
struct waitqueue *wq;
|
||||
|
||||
/* Not yet handled. need spin_lock_irqs */
|
||||
BUG_ON(flags & WAKEUP_IRQ);
|
||||
|
||||
spin_lock(&task->waitlock);
|
||||
spin_lock_irq(&task->waitlock, &irqflags[0]);
|
||||
if (!task->waiting_on) {
|
||||
spin_unlock(&task->waitlock);
|
||||
spin_unlock_irq(&task->waitlock, irqflags[0]);
|
||||
return -1;
|
||||
}
|
||||
wqh = task->waiting_on;
|
||||
@@ -200,25 +198,29 @@ int wake_up_task(struct ktcb *task, unsigned int flags)
|
||||
|
||||
/*
|
||||
* We have found the waitqueue head.
|
||||
*
|
||||
* That needs to be locked first to conform with
|
||||
* lock order and avoid deadlocks. Release task's
|
||||
* waitlock and take the wqh's one.
|
||||
*/
|
||||
spin_unlock(&task->waitlock);
|
||||
spin_unlock_irq(&task->waitlock, irqflags[0]);
|
||||
|
||||
/* -- Task can be woken up by someone else here -- */
|
||||
/*
|
||||
* Task can be woken up by someone else here.
|
||||
*/
|
||||
|
||||
spin_lock(&wqh->slock);
|
||||
spin_lock_irq(&wqh->slock, &irqflags[0]);
|
||||
|
||||
/*
|
||||
* Now lets check if the task is still
|
||||
* waiting and in the same queue
|
||||
* waiting and in the same queue. Not irq version
|
||||
* as we called that once already (so irqs are disabled)
|
||||
*/
|
||||
spin_lock(&task->waitlock);
|
||||
spin_lock_irq(&task->waitlock, &irqflags[1]);
|
||||
if (task->waiting_on != wqh) {
|
||||
/* No, task has been woken by someone else */
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock(&task->waitlock);
|
||||
spin_unlock_irq(&wqh->slock, irqflags[0]);
|
||||
spin_unlock_irq(&task->waitlock, irqflags[1]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -229,8 +231,8 @@ int wake_up_task(struct ktcb *task, unsigned int flags)
|
||||
task->wq = 0;
|
||||
if (flags & WAKEUP_INTERRUPT)
|
||||
task->flags |= TASK_INTERRUPTED;
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock(&task->waitlock);
|
||||
spin_unlock_irq(&wqh->slock, irqflags[0]);
|
||||
spin_unlock_irq(&task->waitlock, irqflags[1]);
|
||||
|
||||
/*
|
||||
* Task is removed from its waitqueue. Now we can
|
||||
|
||||
Reference in New Issue
Block a user