mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Fixes to the scheduler timeslice management.
One is related to the time distribution when a new child is created. If the parent has one tick left, then both child and parent received zero tick. When combined with current_irq_nest_count = 1 voluntary_preempt = 0 values, this caused the scheduler from being invoked. Second is related to the overall time distribution. When a thread runs out of time, its new time slice is calculated by the below formula: new_timeslice = (thread_prio * SCHED_TICKS) / total_prio If we consider total_prio is equal to the sum of the priorities of all the threads in the system, it imposes a problem of getting zero tick. In the new scenario, total_prio is equal to the priority types in the system so it is fixed. Every thread gets a timeslice in proportion of their priorities. Thus, there is no risk of taking zero tick.
This commit is contained in:
committed by
Bahadir Balban
parent
90cfaca7a2
commit
2571dabc18
@@ -17,6 +17,7 @@
|
||||
#define TASK_PRIO_SERVER 6
|
||||
#define TASK_PRIO_NORMAL 4
|
||||
#define TASK_PRIO_LOW 2
|
||||
#define TASK_PRIO_TOTAL 30
|
||||
|
||||
/* Ticks per second, try ticks = 1000 + timeslice = 1 for regressed preemption test. */
|
||||
#define SCHED_TICKS 100
|
||||
@@ -25,7 +26,7 @@
|
||||
* A task can run continuously at this granularity,
|
||||
* even if it has a greater total time slice.
|
||||
*/
|
||||
#define SCHED_GRANULARITY SCHED_TICKS/10
|
||||
#define SCHED_GRANULARITY SCHED_TICKS/50
|
||||
|
||||
static inline struct ktcb *current_task(void)
|
||||
{
|
||||
|
||||
@@ -256,8 +256,9 @@ int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
|
||||
new->context.pc = orig->syscall_regs->lr_usr;
|
||||
|
||||
/* Distribute original thread's ticks into two threads */
|
||||
new->ticks_left = orig->ticks_left / 2;
|
||||
orig->ticks_left /= 2;
|
||||
new->ticks_left = (orig->ticks_left + 1) >> 1;
|
||||
if (!(orig->ticks_left >>= 1))
|
||||
orig->ticks_left = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ void sched_init(struct scheduler *scheduler)
|
||||
|
||||
scheduler->rq_runnable = &scheduler->sched_rq[0];
|
||||
scheduler->rq_expired = &scheduler->sched_rq[1];
|
||||
scheduler->prio_total = 0;
|
||||
scheduler->prio_total = TASK_PRIO_TOTAL;
|
||||
}
|
||||
|
||||
/* Swap runnable and expired runqueues. */
|
||||
@@ -463,10 +463,8 @@ void schedule()
|
||||
}
|
||||
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING) {
|
||||
scheduler.prio_total += next->priority;
|
||||
if (next->flags & TASK_RESUMING)
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
}
|
||||
|
||||
/* Zero ticks indicates task hasn't ran since last rq swap */
|
||||
if (next->ticks_left == 0) {
|
||||
|
||||
Reference in New Issue
Block a user