Converted all wait/wakeup runqueue lock/unlock paths to irq versions.

Irqs can now touch runqueues and do async wakeups. This necessitated
that we implement all wake up wait and runqueue locking work with irqs.

All this, assumes that in an SMP setup we may have cross-cpu wake ups,
runqueue manipulation. If we later decide that we only wake up threads
in the current container, (and lock containers to cpus) we won't really
need spinlocks, or irq disabling anymore. The current set up might be
trivially less responsive, but is more flexible.
This commit is contained in:
Bahadir Balban
2009-12-12 01:20:14 +02:00
parent b1614191b3
commit 32c0bb3a76
11 changed files with 155 additions and 72 deletions

View File

@@ -15,7 +15,6 @@ struct waitqueue {
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0), /* Set interrupt flag for task */
WAKEUP_SYNC = (1 << 1), /* Wake it up synchronously */
WAKEUP_IRQ = (1 << 2) /* Disable irqs on spinlocks */
};
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
@@ -51,18 +50,20 @@ void task_unset_wqh(struct ktcb *task);
do { \
ret = 0; \
for (;;) { \
spin_lock(&(wqh)->slock); \
unsigned long irqsave; \
spin_lock_irq(&(wqh)->slock, &irqsave); \
if (condition) { \
spin_unlock(&(wqh)->slock); \
spin_unlock_irq(&(wqh)->slock, irqsave);\
break; \
} \
CREATE_WAITQUEUE_ON_STACK(wq, current); \
task_set_wqh(current, wqh, &wq); \
(wqh)->sleepers++; \
list_insert_tail(&wq.task_list, &(wqh)->task_list);\
/* printk("(%d) waiting...\n", current->tid); */ \
list_insert_tail(&wq.task_list, \
&(wqh)->task_list); \
/* printk("(%d) waiting...\n", current->tid); */\
sched_prepare_sleep(); \
spin_unlock(&(wqh)->slock); \
spin_unlock_irq(&(wqh)->slock, irqsave); \
schedule(); \
/* Did we wake up normally or get interrupted */\
if (current->flags & TASK_INTERRUPTED) { \
@@ -73,6 +74,7 @@ do { \
} \
} while(0);
void wake_up(struct waitqueue_head *wqh, unsigned int flags);
int wake_up_task(struct ktcb *task, unsigned int flags);
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);