Irqs are working. Scheduler modified to come back from no tasks.

- Userspace irq handling for timer.
- If no runnable task is left. scheduler busy loops in user context
  of last runnable task until a new task becomes runnable.
This commit is contained in:
Bahadir Balban
2009-12-14 11:48:40 +02:00
parent d85ccdb3fe
commit 27c0c34e3d
7 changed files with 63 additions and 26 deletions

View File

@@ -52,7 +52,24 @@ int cap_read_all()
"complete CAP_CONTROL_READ_CAPS request.\n");
BUG();
}
//cap_array_print(total_caps, caparray);
cap_array_print(total_caps, caparray);
return 0;
}
int cap_share_all_with_space()
{
int err;
/* Share all capabilities */
if ((err = l4_capability_control(CAP_CONTROL_SHARE,
CAP_SHARE_ALL_SPACE, 0)) < 0) {
printf("l4_capability_control() sharing of "
"capabilities failed.\n Could not "
"complete CAP_CONTROL_SHARE request. err=%d\n",
err);
BUG();
}
return 0;
}
@@ -375,6 +392,9 @@ void main(void)
/* Read all capabilities */
cap_read_all();
/* Share all with space */
cap_share_all_with_space();
/* Scan for timer devices in capabilities */
timer_probe_devices();

View File

@@ -18,17 +18,6 @@ static inline void enable_irqs()
);
}
static inline int irqs_enabled()
{
register unsigned int enabled asm("r1");
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"tst r0, #0x80\n" /* ARM_IRQ_BIT. See asm.h for TST inst. */
"moveq r1, #1\n"
"movne r1, #0\n"
);
return enabled;
}
static inline void disable_irqs()
{
@@ -39,6 +28,8 @@ static inline void disable_irqs()
);
}
int irqs_enabled();
/* Disable the irqs unconditionally, but also keep the previous state such that
* if it was already disabled before the call, the restore call would retain
* this state. */
@@ -58,13 +49,7 @@ void irq_local_disable_save(unsigned long *state);
/* Simply change it back to original state supplied in @flags. This might enable
* or retain disabled state of the irqs for example. Useful for nested calls. */
static inline void irq_local_restore(unsigned long state)
{
__asm__ __volatile__ (
"msr cpsr_fc, %0\n"
:: "r" (state)
);
}
void irq_local_restore(unsigned long state);
static inline void irq_local_enable()
{

View File

@@ -61,7 +61,7 @@ do { \
(wqh)->sleepers++; \
list_insert_tail(&wq.task_list, \
&(wqh)->task_list); \
printk("(%d) waiting...\n", current->tid); \
/* printk("(%d) waiting...\n", current->tid); */\
sched_prepare_sleep(); \
spin_unlock_irq(&(wqh)->slock, irqsave); \
schedule(); \

View File

@@ -115,7 +115,7 @@ int irq_wait(l4id_t irq_index)
WAIT_EVENT(&desc->wqh_irq,
utcb->notify[desc->task_notify_slot] != 0,
ret);
printk("Didn't sleep. utcb->notify[%d]=%d\n", desc->task_notify_slot, utcb->notify[desc->task_notify_slot]);
if (ret < 0)
return ret;
else

View File

@@ -15,6 +15,14 @@ BEGIN_PROC(irq_local_disable_save)
mov pc, lr
END_PROC(irq_local_disable_save)
/*
* r0 = last cpsr state
*/
BEGIN_PROC(irq_local_restore)
msr cpsr_fc, r0 @ Write r0 to cpsr
mov pc, lr
END_PROC(irq_local_restore)
/*
* r0 = byte address to read from.
*/
@@ -25,3 +33,11 @@ BEGIN_PROC(l4_atomic_dest_readb)
mov pc, lr @ Return byte location value
END_PROC(l4_atomic_dest_readb)
BEGIN_PROC(irqs_enabled)
mrs r1, cpsr_fc
tst r1, #0x80
moveq r0, #1
movne r0, #0
mov pc, lr
END_PROC(irqs_enabled)

View File

@@ -33,7 +33,6 @@ extern unsigned int current_irq_nest_count;
/* This ensures no scheduling occurs after voluntary preempt_disable() */
static int voluntary_preempt = 0;
void sched_lock_runqueues(unsigned long *irqflags)
{
spin_lock_irq(&scheduler.sched_rq[0].lock, irqflags);
@@ -359,14 +358,16 @@ void schedule()
{
struct ktcb *next;
/* Should not schedule with preemption disabled or in nested irq */
/* Should not schedule with preemption
* disabled or in nested irq */
BUG_ON(voluntary_preempt);
BUG_ON(in_nested_irq_context());
/* Should not have more ticks than SCHED_TICKS */
BUG_ON(current->ticks_left > SCHED_TICKS);
/* Cannot have any irqs that schedule after this */
/* If coming from process path, cannot have
* any irqs that schedule after this */
preempt_disable();
/* Reset schedule flag */
@@ -410,6 +411,7 @@ void schedule()
}
/* Determine the next task to be run */
get_runnable_task:
if (scheduler.rq_runnable->total > 0) {
next = link_to_struct(scheduler.rq_runnable->task_list.next,
struct ktcb, rq_list);
@@ -420,7 +422,20 @@ void schedule()
scheduler.rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
idle_task();
//printk("Idle task.\n");
/* Poll forever for new tasks */
if (in_task_context()) {
goto get_runnable_task;
} else {
/*
* If irq, return to current context without
* putting into runqueue. We want the task to
* get into the get_runnable_task loop in
* process context.
*/
next = current;
goto switch_out;
}
}
}
@@ -443,6 +458,7 @@ void schedule()
next->sched_granule = SCHED_GRANULARITY;
/* Finish */
switch_out:
disable_irqs();
preempt_enable();
context_switch(next);

View File

@@ -165,7 +165,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
task_unset_wqh(sleeper);
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
//printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
spin_unlock_irq(&wqh->slock, irqflags);
if (flags & WAKEUP_SYNC)