Converted all wait/wakeup runqueue lock/unlock paths to irq versions.

Irqs can now touch runqueues and do async wakeups. This necessitated
that we implement all wake up wait and runqueue locking work with irqs.

All this, assumes that in an SMP setup we may have cross-cpu wake ups,
runqueue manipulation. If we later decide that we only wake up threads
in the current container, (and lock containers to cpus) we won't really
need spinlocks, or irq disabling anymore. The current set up might be
trivially less responsive, but is more flexible.
This commit is contained in:
Bahadir Balban
2009-12-12 01:20:14 +02:00
parent b1614191b3
commit 32c0bb3a76
11 changed files with 155 additions and 72 deletions

View File

@@ -34,6 +34,10 @@ int irq_register(struct ktcb *task, int notify_slot, l4id_t irq_index)
if (!this_desc->handler || !this_desc->chip)
return -ENOIRQ;
/* Index must be valid */
if (irq_index > IRQS_MAX || irq_index < 0)
return -ENOIRQ;
/* Setup the task and notify slot */
this_desc->task = task;
this_desc->task_notify_slot = notify_slot;

View File

@@ -34,16 +34,16 @@ extern unsigned int current_irq_nest_count;
static int voluntary_preempt = 0;
void sched_lock_runqueues(void)
void sched_lock_runqueues(unsigned long *irqflags)
{
spin_lock(&scheduler.sched_rq[0].lock);
spin_lock_irq(&scheduler.sched_rq[0].lock, irqflags);
spin_lock(&scheduler.sched_rq[1].lock);
}
void sched_unlock_runqueues(void)
void sched_unlock_runqueues(unsigned long irqflags)
{
spin_unlock(&scheduler.sched_rq[0].lock);
spin_unlock(&scheduler.sched_rq[1].lock);
spin_unlock_irq(&scheduler.sched_rq[0].lock, irqflags);
}
int preemptive()
@@ -142,24 +142,27 @@ static void sched_rq_swap_runqueues(void)
/* Helper for adding a new task to a runqueue */
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
{
unsigned long irqflags;
BUG_ON(!list_empty(&task->rq_list));
sched_lock_runqueues();
sched_lock_runqueues(&irqflags);
if (front)
list_insert(&task->rq_list, &rq->task_list);
else
list_insert_tail(&task->rq_list, &rq->task_list);
rq->total++;
task->rq = rq;
sched_unlock_runqueues();
sched_unlock_runqueues(irqflags);
}
/* Helper for removing a task from its runqueue. */
static inline void sched_rq_remove_task(struct ktcb *task)
{
struct runqueue *rq;
unsigned long irqflags;
sched_lock_runqueues();
sched_lock_runqueues(&irqflags);
/*
* We must lock both, otherwise rqs may swap and
@@ -172,7 +175,7 @@ static inline void sched_rq_remove_task(struct ktcb *task)
rq->total--;
BUG_ON(rq->total < 0);
sched_unlock_runqueues();
sched_unlock_runqueues(irqflags);
}