mirror of
https://github.com/drasko/codezero.git
synced 2026-04-18 01:39:05 +02:00
Added handling of task pending events from scheduler.
Previously all pending events were handled on return of exceptions in process context. This was causing threads that run in userspace and take no exceptions not handle their pending events indefinitely. Now scheduler handles them in irq context as well.
This commit is contained in:
@@ -15,8 +15,10 @@
|
|||||||
|
|
||||||
int exit_test_thread(void *arg)
|
int exit_test_thread(void *arg)
|
||||||
{
|
{
|
||||||
|
while (1)
|
||||||
|
;
|
||||||
//l4_thread_switch(0);
|
//l4_thread_switch(0);
|
||||||
l4_exit(5);
|
//l4_exit(5);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,7 +38,6 @@ int exit_test(void)
|
|||||||
|
|
||||||
// l4_thread_switch(0);
|
// l4_thread_switch(0);
|
||||||
|
|
||||||
#if 0
|
|
||||||
/* Kill it */
|
/* Kill it */
|
||||||
printf("Killing Thread (%d).\n", ids.tid);
|
printf("Killing Thread (%d).\n", ids.tid);
|
||||||
if ((ret = l4_thread_control(THREAD_DESTROY, &ids)) < 0)
|
if ((ret = l4_thread_control(THREAD_DESTROY, &ids)) < 0)
|
||||||
@@ -44,8 +45,8 @@ int exit_test(void)
|
|||||||
else
|
else
|
||||||
printf("Success: Killed Thread (%d)\n", ids.tid);
|
printf("Success: Killed Thread (%d)\n", ids.tid);
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
#if 0
|
||||||
/* Wait on it */
|
/* Wait on it */
|
||||||
printf("Waiting on Thread (%d) to exit.\n", ids.tid);
|
printf("Waiting on Thread (%d) to exit.\n", ids.tid);
|
||||||
if ((ret = l4_thread_control(THREAD_WAIT, &ids)) >= 0)
|
if ((ret = l4_thread_control(THREAD_WAIT, &ids)) >= 0)
|
||||||
@@ -54,6 +55,7 @@ int exit_test(void)
|
|||||||
printf("Error. Wait on (%d) failed. err = %d\n",
|
printf("Error. Wait on (%d) failed. err = %d\n",
|
||||||
ids.tid, ret);
|
ids.tid, ret);
|
||||||
|
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
out_err:
|
out_err:
|
||||||
BUG();
|
BUG();
|
||||||
|
|||||||
@@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
#include INC_ARCH(asm.h)
|
#include INC_ARCH(asm.h)
|
||||||
|
|
||||||
|
|
||||||
static inline void enable_irqs()
|
static inline void enable_irqs()
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
@@ -38,30 +39,29 @@ static inline void disable_irqs()
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0 /* These will be useful for nested irq disable/enable calls */
|
|
||||||
/* Disable the irqs unconditionally, but also keep the previous state such that
|
/* Disable the irqs unconditionally, but also keep the previous state such that
|
||||||
* if it was already disabled before the call, the restore call would retain
|
* if it was already disabled before the call, the restore call would retain
|
||||||
* this state. */
|
* this state. */
|
||||||
static inline void irq_local_disable_save(unsigned long *flags)
|
static inline void irq_local_disable_save(unsigned long *state)
|
||||||
{
|
{
|
||||||
unsigned long temp;
|
unsigned long temp;
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mrs %0, cpsr_fc\n"
|
"mrs %0, cpsr_fc\n"
|
||||||
"orr %1, %0, #0x80\n"
|
"orr %1, %0, #0x80\n"
|
||||||
"msr cpsr_fc, %1\n"
|
"msr cpsr_fc, %1\n"
|
||||||
: "=r"(*flags), "=r" (temp)
|
:: "r" (*state), "r" (temp)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
/* Simply change it back to original state supplied in @flags. This might enable
|
/* Simply change it back to original state supplied in @flags. This might enable
|
||||||
* or retain disabled state of the irqs for example. Useful for nested calls. */
|
* or retain disabled state of the irqs for example. Useful for nested calls. */
|
||||||
static inline void irq_local_restore(unsigned long flags)
|
static inline void irq_local_restore(unsigned long state)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"msr cpsr_fc, %0\n"
|
"msr cpsr_fc, %0\n"
|
||||||
: "r" (flags)
|
:: "r" (state)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
static inline void irq_local_enable()
|
static inline void irq_local_enable()
|
||||||
{
|
{
|
||||||
enable_irqs();
|
enable_irqs();
|
||||||
|
|||||||
@@ -39,19 +39,21 @@ static inline void spin_unlock(struct spinlock *s)
|
|||||||
* - To be used for synchronising against processes and irqs
|
* - To be used for synchronising against processes and irqs
|
||||||
* on other cpus.
|
* on other cpus.
|
||||||
*/
|
*/
|
||||||
static inline void spin_lock_irq(struct spinlock *s)
|
static inline void spin_lock_irq(struct spinlock *s,
|
||||||
|
unsigned long state)
|
||||||
{
|
{
|
||||||
irq_local_disable(); /* Even in UP an irq could deadlock us */
|
irq_local_disable_save(&state);
|
||||||
#if defined(CONFIG_SMP)
|
#if defined(CONFIG_SMP)
|
||||||
__spin_lock(&s->lock);
|
__spin_lock(&s->lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void spin_unlock_irq(struct spinlock *s)
|
static inline void spin_unlock_irq(struct spinlock *s,
|
||||||
|
unsigned long state)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_SMP)
|
#if defined(CONFIG_SMP)
|
||||||
__spin_unlock(&s->lock);
|
__spin_unlock(&s->lock);
|
||||||
#endif
|
#endif
|
||||||
irq_local_enable();
|
irq_local_restore(state);
|
||||||
}
|
}
|
||||||
#endif /* __LIB__SPINLOCK_H__ */
|
#endif /* __LIB__SPINLOCK_H__ */
|
||||||
|
|||||||
@@ -74,15 +74,6 @@ void do_irq(void)
|
|||||||
printk("Spurious or broken irq\n"); BUG();
|
printk("Spurious or broken irq\n"); BUG();
|
||||||
}
|
}
|
||||||
irq_enable(irq_index);
|
irq_enable(irq_index);
|
||||||
#if 0
|
|
||||||
/* Process any pending flags for currently runnable task */
|
|
||||||
if (!in_nested_irq_context()) {
|
|
||||||
if (in_user()) {
|
|
||||||
if (current->flags & TASK_SUSPENDING)
|
|
||||||
sched_suspend_async();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ extern unsigned int current_irq_nest_count;
|
|||||||
/* This ensures no scheduling occurs after voluntary preempt_disable() */
|
/* This ensures no scheduling occurs after voluntary preempt_disable() */
|
||||||
static int voluntary_preempt = 0;
|
static int voluntary_preempt = 0;
|
||||||
|
|
||||||
|
|
||||||
void sched_lock_runqueues(void)
|
void sched_lock_runqueues(void)
|
||||||
{
|
{
|
||||||
spin_lock(&scheduler.sched_rq[0].lock);
|
spin_lock(&scheduler.sched_rq[0].lock);
|
||||||
@@ -89,7 +90,6 @@ int in_task_context(void)
|
|||||||
return !in_irq_context();
|
return !in_irq_context();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In current implementation, if all task are asleep it is considered
|
* In current implementation, if all task are asleep it is considered
|
||||||
* a bug. We use idle_task() to investigate.
|
* a bug. We use idle_task() to investigate.
|
||||||
@@ -241,6 +241,20 @@ void sched_exit_sync(void)
|
|||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sched_exit_async(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
sched_rq_remove_task(current);
|
||||||
|
current->state = TASK_DEAD;
|
||||||
|
current->flags &= ~TASK_EXITING;
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
if (current->pagerid != current->tid)
|
||||||
|
wake_up(¤t->wqh_pager, 0);
|
||||||
|
|
||||||
|
need_resched = 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: Could do these as sched_prepare_suspend()
|
* NOTE: Could do these as sched_prepare_suspend()
|
||||||
* + schedule() or need_resched = 1
|
* + schedule() or need_resched = 1
|
||||||
@@ -368,6 +382,8 @@ void schedule()
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* FIXME: Are these smp-safe?
|
||||||
|
*
|
||||||
* If task is about to sleep and
|
* If task is about to sleep and
|
||||||
* it has pending events, wake it up.
|
* it has pending events, wake it up.
|
||||||
*/
|
*/
|
||||||
@@ -375,6 +391,20 @@ void schedule()
|
|||||||
current->state == TASK_SLEEPING)
|
current->state == TASK_SLEEPING)
|
||||||
wake_up_task(current, WAKEUP_INTERRUPT);
|
wake_up_task(current, WAKEUP_INTERRUPT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If task has pending events, and is in userspace
|
||||||
|
* (guaranteed to have no unfinished jobs in kernel)
|
||||||
|
* handle those events
|
||||||
|
*/
|
||||||
|
if ((current->flags & TASK_PENDING_SIGNAL) &&
|
||||||
|
current->state == TASK_RUNNABLE &&
|
||||||
|
TASK_IN_USER(current)) {
|
||||||
|
if (current->flags & TASK_SUSPENDING)
|
||||||
|
sched_suspend_async();
|
||||||
|
else if (current->flags & TASK_EXITING)
|
||||||
|
sched_exit_async();
|
||||||
|
}
|
||||||
|
|
||||||
/* Determine the next task to be run */
|
/* Determine the next task to be run */
|
||||||
if (scheduler.rq_runnable->total > 0) {
|
if (scheduler.rq_runnable->total > 0) {
|
||||||
next = link_to_struct(scheduler.rq_runnable->task_list.next,
|
next = link_to_struct(scheduler.rq_runnable->task_list.next,
|
||||||
|
|||||||
Reference in New Issue
Block a user