From 2571dabc185ad1ba648845ccf437a562f7851f03 Mon Sep 17 00:00:00 2001 From: Bora Sahin Date: Sun, 18 Oct 2009 20:20:40 +0300 Subject: [PATCH] Fixes to the scheduler timeslice management. One is related to the time distribution when a new child is created. If the parent has one tick left, then both child and parent received zero tick. When combined with current_irq_nest_count = 1 voluntary_preempt = 0 values, this caused the scheduler from being invoked. Second is related to the overall time distribution. When a thread runs out of time, its new time slice is calculated by the below formula: new_timeslice = (thread_prio * SCHED_TICKS) / total_prio If we consider total_prio is equal to the sum of the priorities of all the threads in the system, it imposes a problem of getting zero tick. In the new scenario, total_prio is equal to the priority types in the system so it is fixed. Every thread gets a timeslice in proportion of their priorities. Thus, there is no risk of taking zero tick. --- include/l4/generic/scheduler.h | 3 ++- src/api/thread.c | 5 +++-- src/generic/scheduler.c | 6 ++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/l4/generic/scheduler.h b/include/l4/generic/scheduler.h index ef245d2..ee3298c 100644 --- a/include/l4/generic/scheduler.h +++ b/include/l4/generic/scheduler.h @@ -17,6 +17,7 @@ #define TASK_PRIO_SERVER 6 #define TASK_PRIO_NORMAL 4 #define TASK_PRIO_LOW 2 +#define TASK_PRIO_TOTAL 30 /* Ticks per second, try ticks = 1000 + timeslice = 1 for regressed preemption test. */ #define SCHED_TICKS 100 @@ -25,7 +26,7 @@ * A task can run continuously at this granularity, * even if it has a greater total time slice. */ -#define SCHED_GRANULARITY SCHED_TICKS/10 +#define SCHED_GRANULARITY SCHED_TICKS/50 static inline struct ktcb *current_task(void) { diff --git a/src/api/thread.c b/src/api/thread.c index 5115b7c..65e5ce7 100644 --- a/src/api/thread.c +++ b/src/api/thread.c @@ -256,8 +256,9 @@ int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig, new->context.pc = orig->syscall_regs->lr_usr; /* Distribute original thread's ticks into two threads */ - new->ticks_left = orig->ticks_left / 2; - orig->ticks_left /= 2; + new->ticks_left = (orig->ticks_left + 1) >> 1; + if (!(orig->ticks_left >>= 1)) + orig->ticks_left = 1; return 0; } diff --git a/src/generic/scheduler.c b/src/generic/scheduler.c index b69a92a..55d6534 100644 --- a/src/generic/scheduler.c +++ b/src/generic/scheduler.c @@ -119,7 +119,7 @@ void sched_init(struct scheduler *scheduler) scheduler->rq_runnable = &scheduler->sched_rq[0]; scheduler->rq_expired = &scheduler->sched_rq[1]; - scheduler->prio_total = 0; + scheduler->prio_total = TASK_PRIO_TOTAL; } /* Swap runnable and expired runqueues. */ @@ -463,10 +463,8 @@ void schedule() } /* New tasks affect runqueue total priority. */ - if (next->flags & TASK_RESUMING) { - scheduler.prio_total += next->priority; + if (next->flags & TASK_RESUMING) next->flags &= ~TASK_RESUMING; - } /* Zero ticks indicates task hasn't ran since last rq swap */ if (next->ticks_left == 0) {