From 27c0c34e3dd23a3930ae9a99acb5caa801c43dc1 Mon Sep 17 00:00:00 2001 From: Bahadir Balban Date: Mon, 14 Dec 2009 11:48:40 +0200 Subject: [PATCH] Irqs are working. Scheduler modified to come back from no tasks. - Userspace irq handling for timer. - If no runnable task is left. scheduler busy loops in user context of last runnable task until a new task becomes runnable. --- conts/baremetal/timer_service/main.c | 22 +++++++++++++++++++++- include/l4/arch/arm/exception.h | 21 +++------------------ include/l4/lib/wait.h | 2 +- src/api/irq.c | 2 +- src/arch/arm/v5/irq.S | 16 ++++++++++++++++ src/generic/scheduler.c | 24 ++++++++++++++++++++---- src/lib/wait.c | 2 +- 7 files changed, 63 insertions(+), 26 deletions(-) diff --git a/conts/baremetal/timer_service/main.c b/conts/baremetal/timer_service/main.c index 3b6c74b..c62ac85 100644 --- a/conts/baremetal/timer_service/main.c +++ b/conts/baremetal/timer_service/main.c @@ -52,7 +52,24 @@ int cap_read_all() "complete CAP_CONTROL_READ_CAPS request.\n"); BUG(); } - //cap_array_print(total_caps, caparray); + cap_array_print(total_caps, caparray); + + return 0; +} + +int cap_share_all_with_space() +{ + int err; + + /* Share all capabilities */ + if ((err = l4_capability_control(CAP_CONTROL_SHARE, + CAP_SHARE_ALL_SPACE, 0)) < 0) { + printf("l4_capability_control() sharing of " + "capabilities failed.\n Could not " + "complete CAP_CONTROL_SHARE request. err=%d\n", + err); + BUG(); + } return 0; } @@ -375,6 +392,9 @@ void main(void) /* Read all capabilities */ cap_read_all(); + /* Share all with space */ + cap_share_all_with_space(); + /* Scan for timer devices in capabilities */ timer_probe_devices(); diff --git a/include/l4/arch/arm/exception.h b/include/l4/arch/arm/exception.h index 953b20f..c2c2bca 100644 --- a/include/l4/arch/arm/exception.h +++ b/include/l4/arch/arm/exception.h @@ -18,17 +18,6 @@ static inline void enable_irqs() ); } -static inline int irqs_enabled() -{ - register unsigned int enabled asm("r1"); - __asm__ __volatile__( - "mrs r0, cpsr_fc\n" - "tst r0, #0x80\n" /* ARM_IRQ_BIT. See asm.h for TST inst. */ - "moveq r1, #1\n" - "movne r1, #0\n" - ); - return enabled; -} static inline void disable_irqs() { @@ -39,6 +28,8 @@ static inline void disable_irqs() ); } +int irqs_enabled(); + /* Disable the irqs unconditionally, but also keep the previous state such that * if it was already disabled before the call, the restore call would retain * this state. */ @@ -58,13 +49,7 @@ void irq_local_disable_save(unsigned long *state); /* Simply change it back to original state supplied in @flags. This might enable * or retain disabled state of the irqs for example. Useful for nested calls. */ -static inline void irq_local_restore(unsigned long state) -{ - __asm__ __volatile__ ( - "msr cpsr_fc, %0\n" - :: "r" (state) - ); -} +void irq_local_restore(unsigned long state); static inline void irq_local_enable() { diff --git a/include/l4/lib/wait.h b/include/l4/lib/wait.h index 953a829..ef81fe8 100644 --- a/include/l4/lib/wait.h +++ b/include/l4/lib/wait.h @@ -61,7 +61,7 @@ do { \ (wqh)->sleepers++; \ list_insert_tail(&wq.task_list, \ &(wqh)->task_list); \ - printk("(%d) waiting...\n", current->tid); \ + /* printk("(%d) waiting...\n", current->tid); */\ sched_prepare_sleep(); \ spin_unlock_irq(&(wqh)->slock, irqsave); \ schedule(); \ diff --git a/src/api/irq.c b/src/api/irq.c index 1a6f9b2..cfa1ab2 100644 --- a/src/api/irq.c +++ b/src/api/irq.c @@ -115,7 +115,7 @@ int irq_wait(l4id_t irq_index) WAIT_EVENT(&desc->wqh_irq, utcb->notify[desc->task_notify_slot] != 0, ret); - printk("Didn't sleep. utcb->notify[%d]=%d\n", desc->task_notify_slot, utcb->notify[desc->task_notify_slot]); + if (ret < 0) return ret; else diff --git a/src/arch/arm/v5/irq.S b/src/arch/arm/v5/irq.S index 4bc607f..480e80c 100644 --- a/src/arch/arm/v5/irq.S +++ b/src/arch/arm/v5/irq.S @@ -15,6 +15,14 @@ BEGIN_PROC(irq_local_disable_save) mov pc, lr END_PROC(irq_local_disable_save) +/* + * r0 = last cpsr state + */ +BEGIN_PROC(irq_local_restore) + msr cpsr_fc, r0 @ Write r0 to cpsr + mov pc, lr +END_PROC(irq_local_restore) + /* * r0 = byte address to read from. */ @@ -25,3 +33,11 @@ BEGIN_PROC(l4_atomic_dest_readb) mov pc, lr @ Return byte location value END_PROC(l4_atomic_dest_readb) +BEGIN_PROC(irqs_enabled) + mrs r1, cpsr_fc + tst r1, #0x80 + moveq r0, #1 + movne r0, #0 + mov pc, lr +END_PROC(irqs_enabled) + diff --git a/src/generic/scheduler.c b/src/generic/scheduler.c index 9989e1d..628e99c 100644 --- a/src/generic/scheduler.c +++ b/src/generic/scheduler.c @@ -33,7 +33,6 @@ extern unsigned int current_irq_nest_count; /* This ensures no scheduling occurs after voluntary preempt_disable() */ static int voluntary_preempt = 0; - void sched_lock_runqueues(unsigned long *irqflags) { spin_lock_irq(&scheduler.sched_rq[0].lock, irqflags); @@ -359,14 +358,16 @@ void schedule() { struct ktcb *next; - /* Should not schedule with preemption disabled or in nested irq */ + /* Should not schedule with preemption + * disabled or in nested irq */ BUG_ON(voluntary_preempt); BUG_ON(in_nested_irq_context()); /* Should not have more ticks than SCHED_TICKS */ BUG_ON(current->ticks_left > SCHED_TICKS); - /* Cannot have any irqs that schedule after this */ + /* If coming from process path, cannot have + * any irqs that schedule after this */ preempt_disable(); /* Reset schedule flag */ @@ -410,6 +411,7 @@ void schedule() } /* Determine the next task to be run */ +get_runnable_task: if (scheduler.rq_runnable->total > 0) { next = link_to_struct(scheduler.rq_runnable->task_list.next, struct ktcb, rq_list); @@ -420,7 +422,20 @@ void schedule() scheduler.rq_runnable->task_list.next, struct ktcb, rq_list); } else { - idle_task(); + //printk("Idle task.\n"); + /* Poll forever for new tasks */ + if (in_task_context()) { + goto get_runnable_task; + } else { + /* + * If irq, return to current context without + * putting into runqueue. We want the task to + * get into the get_runnable_task loop in + * process context. + */ + next = current; + goto switch_out; + } } } @@ -443,6 +458,7 @@ void schedule() next->sched_granule = SCHED_GRANULARITY; /* Finish */ +switch_out: disable_irqs(); preempt_enable(); context_switch(next); diff --git a/src/lib/wait.c b/src/lib/wait.c index b3f2e2c..a4d3a9d 100644 --- a/src/lib/wait.c +++ b/src/lib/wait.c @@ -165,7 +165,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags) task_unset_wqh(sleeper); if (flags & WAKEUP_INTERRUPT) sleeper->flags |= TASK_INTERRUPTED; - printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid); + //printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid); spin_unlock_irq(&wqh->slock, irqflags); if (flags & WAKEUP_SYNC)