New scheduler and interruptible blocking.

A new scheduler replaces the old one.
  - There are no sched_xxx_notify() calls that ask scheduler to change task state.
  - Tasks now have priorities and different timeslices.
  - One second interval is distributed among processes.
  - There are just runnable and expired queues.
  - SCHED_GRANULARITY determines a maximum running boundary for tasks.
  - Scheduler can now detect a safe point and suspend a task.

Interruptible blocking is implemented.
  - Mutexes, waitqueues and ipc are modified to have an interruptible nature.
  - Sleep information is stored on the ktcb. (which waitqueue? etc.)
This commit is contained in:
Bahadir Balban
2008-10-01 12:43:44 +03:00
parent c54d505709
commit f6d0a79298
21 changed files with 681 additions and 429 deletions

View File

@@ -9,4 +9,7 @@ void preempt_disable(void);
int preemptive(void);
int preempt_count(void);
int in_nested_irq_context(void);
int in_irq_context(void);
int in_task_context(void);
#endif /* __PREEMPT_H__ */

View File

@@ -10,9 +10,22 @@
#include INC_SUBARCH(mm.h)
#include INC_GLUE(memory.h)
/* Task priorities */
#define TASK_PRIO_MAX 10
#define TASK_PRIO_REALTIME 10
#define TASK_PRIO_PAGER 8
#define TASK_PRIO_SERVER 6
#define TASK_PRIO_NORMAL 4
#define TASK_PRIO_LOW 2
/* Ticks per second, try ticks = 1000 + timeslice = 1 for regressed preemption test. */
#define HZ 100
#define TASK_TIMESLICE_DEFAULT HZ/100
#define SCHED_TICKS 100
/*
* A task can run continuously at this granularity,
* even if it has a greater total time slice.
*/
#define SCHED_GRANULARITY SCHED_TICKS/10
static inline struct ktcb *current_task(void)
{
@@ -23,29 +36,10 @@ static inline struct ktcb *current_task(void)
#define current current_task()
#define need_resched (current->ts_need_resched)
/* Flags set by kernel to direct the scheduler about future task state. */
#define __SCHED_FL_SUSPEND 1
#define SCHED_FL_SUSPEND (1 << __SCHED_FL_SUSPEND)
#define __SCHED_FL_RESUME 2
#define SCHED_FL_RESUME (1 << __SCHED_FL_RESUME)
#define __SCHED_FL_SLEEP 3
#define SCHED_FL_SLEEP (1 << __SCHED_FL_SLEEP)
#define SCHED_FL_MASK (SCHED_FL_SLEEP | SCHED_FL_RESUME \
| SCHED_FL_SUSPEND)
void sched_runqueue_init(void);
void sched_init_task(struct ktcb *task);
void sched_start_task(struct ktcb *task);
void sched_resume_task(struct ktcb *task);
void sched_suspend_task(struct ktcb *task);
void sched_tell(struct ktcb *task, unsigned int flags);
void sched_init_task(struct ktcb *task, int priority);
void sched_resume_sync(struct ktcb *task);
void sched_resume_async(struct ktcb *task);
void scheduler_start(void);
void sched_yield(void);
void schedule(void);
/* Asynchronous notifications to scheduler */
void sched_notify_resume(struct ktcb *task);
void sched_notify_sleep(struct ktcb *task);
void sched_notify_suspend(struct ktcb *task);
#endif /* __SCHEDULER_H__ */

View File

@@ -16,6 +16,17 @@
#include INC_GLUE(context.h)
#include INC_SUBARCH(mm.h)
/*
* These are a mixture of flags that indicate the task is
* in a transitional state that could include one or more
* scheduling states.
*/
#define TASK_INTERRUPTED (1 << 0)
#define TASK_SUSPENDING (1 << 1)
#define TASK_RESUMING (1 << 2)
/* Scheduler states */
enum task_state {
TASK_INACTIVE = 0,
TASK_SLEEPING = 1,
@@ -41,15 +52,14 @@ struct ktcb {
/* Runqueue related */
struct list_head rq_list;
struct runqueue *rq;
/* Thread information */
l4id_t tid; /* Global thread id */
l4id_t spid; /* Global space id */
l4id_t tgid; /* Global thread group id */
/* Flags to hint scheduler on future task state */
unsigned int schedfl;
/* Flags to indicate various task status */
unsigned int flags;
/* Lock for blocking thread state modifications via a syscall */
struct mutex thread_control_lock;
@@ -65,7 +75,13 @@ struct ktcb {
/* Thread times */
u32 kernel_time; /* Ticks spent in kernel */
u32 user_time; /* Ticks spent in userland */
u32 ticks_left; /* Ticks left for reschedule */
u32 ticks_left; /* Timeslice ticks left for reschedule */
u32 ticks_assigned; /* Ticks assigned to this task on this HZ */
u32 sched_granule; /* Granularity ticks left for reschedule */
int priority; /* Task's fixed, default priority */
/* Number of locks the task currently has acquired */
int nlocks;
/* Page table information */
pgd_table_t *pgd;
@@ -73,8 +89,12 @@ struct ktcb {
/* Fields for ipc rendezvous */
struct waitqueue_head wqh_recv;
struct waitqueue_head wqh_send;
l4id_t expected_sender;
l4id_t senderid; /* Sender checks this for ipc */
/* Tells where we are when we sleep */
struct spinlock waitlock;
struct waitqueue_head *waiting_on;
struct waitqueue *wq;
};
/* Per thread kernel stack unified on a single page. */