mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
New scheduler and interruptible blocking.
A new scheduler replaces the old one. - There are no sched_xxx_notify() calls that ask scheduler to change task state. - Tasks now have priorities and different timeslices. - One second interval is distributed among processes. - There are just runnable and expired queues. - SCHED_GRANULARITY determines a maximum running boundary for tasks. - Scheduler can now detect a safe point and suspend a task. Interruptible blocking is implemented. - Mutexes, waitqueues and ipc are modified to have an interruptible nature. - Sleep information is stored on the ktcb. (which waitqueue? etc.)
This commit is contained in:
@@ -132,7 +132,7 @@ typedef struct fault_kdata {
|
||||
pte_t pte;
|
||||
} __attribute__ ((__packed__)) fault_kdata_t;
|
||||
|
||||
|
||||
void arch_hardware_flush(pgd_table_t *pgd);
|
||||
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags);
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ void arm_drain_writebuffer(void);
|
||||
void arm_invalidate_tlb(void);
|
||||
void arm_invalidate_itlb(void);
|
||||
void arm_invalidate_dtlb(void);
|
||||
|
||||
static inline void arm_enable_caches(void)
|
||||
{
|
||||
arm_enable_icache();
|
||||
|
||||
@@ -9,4 +9,7 @@ void preempt_disable(void);
|
||||
int preemptive(void);
|
||||
int preempt_count(void);
|
||||
|
||||
int in_nested_irq_context(void);
|
||||
int in_irq_context(void);
|
||||
int in_task_context(void);
|
||||
#endif /* __PREEMPT_H__ */
|
||||
|
||||
@@ -10,9 +10,22 @@
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* Task priorities */
|
||||
#define TASK_PRIO_MAX 10
|
||||
#define TASK_PRIO_REALTIME 10
|
||||
#define TASK_PRIO_PAGER 8
|
||||
#define TASK_PRIO_SERVER 6
|
||||
#define TASK_PRIO_NORMAL 4
|
||||
#define TASK_PRIO_LOW 2
|
||||
|
||||
/* Ticks per second, try ticks = 1000 + timeslice = 1 for regressed preemption test. */
|
||||
#define HZ 100
|
||||
#define TASK_TIMESLICE_DEFAULT HZ/100
|
||||
#define SCHED_TICKS 100
|
||||
|
||||
/*
|
||||
* A task can run continuously at this granularity,
|
||||
* even if it has a greater total time slice.
|
||||
*/
|
||||
#define SCHED_GRANULARITY SCHED_TICKS/10
|
||||
|
||||
static inline struct ktcb *current_task(void)
|
||||
{
|
||||
@@ -23,29 +36,10 @@ static inline struct ktcb *current_task(void)
|
||||
#define current current_task()
|
||||
#define need_resched (current->ts_need_resched)
|
||||
|
||||
/* Flags set by kernel to direct the scheduler about future task state. */
|
||||
#define __SCHED_FL_SUSPEND 1
|
||||
#define SCHED_FL_SUSPEND (1 << __SCHED_FL_SUSPEND)
|
||||
#define __SCHED_FL_RESUME 2
|
||||
#define SCHED_FL_RESUME (1 << __SCHED_FL_RESUME)
|
||||
#define __SCHED_FL_SLEEP 3
|
||||
#define SCHED_FL_SLEEP (1 << __SCHED_FL_SLEEP)
|
||||
#define SCHED_FL_MASK (SCHED_FL_SLEEP | SCHED_FL_RESUME \
|
||||
| SCHED_FL_SUSPEND)
|
||||
|
||||
void sched_runqueue_init(void);
|
||||
void sched_init_task(struct ktcb *task);
|
||||
void sched_start_task(struct ktcb *task);
|
||||
void sched_resume_task(struct ktcb *task);
|
||||
void sched_suspend_task(struct ktcb *task);
|
||||
void sched_tell(struct ktcb *task, unsigned int flags);
|
||||
void sched_init_task(struct ktcb *task, int priority);
|
||||
void sched_resume_sync(struct ktcb *task);
|
||||
void sched_resume_async(struct ktcb *task);
|
||||
void scheduler_start(void);
|
||||
void sched_yield(void);
|
||||
void schedule(void);
|
||||
|
||||
/* Asynchronous notifications to scheduler */
|
||||
void sched_notify_resume(struct ktcb *task);
|
||||
void sched_notify_sleep(struct ktcb *task);
|
||||
void sched_notify_suspend(struct ktcb *task);
|
||||
|
||||
#endif /* __SCHEDULER_H__ */
|
||||
|
||||
@@ -16,6 +16,17 @@
|
||||
#include INC_GLUE(context.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
/*
|
||||
* These are a mixture of flags that indicate the task is
|
||||
* in a transitional state that could include one or more
|
||||
* scheduling states.
|
||||
*/
|
||||
#define TASK_INTERRUPTED (1 << 0)
|
||||
#define TASK_SUSPENDING (1 << 1)
|
||||
#define TASK_RESUMING (1 << 2)
|
||||
|
||||
|
||||
/* Scheduler states */
|
||||
enum task_state {
|
||||
TASK_INACTIVE = 0,
|
||||
TASK_SLEEPING = 1,
|
||||
@@ -41,15 +52,14 @@ struct ktcb {
|
||||
|
||||
/* Runqueue related */
|
||||
struct list_head rq_list;
|
||||
struct runqueue *rq;
|
||||
|
||||
/* Thread information */
|
||||
l4id_t tid; /* Global thread id */
|
||||
l4id_t spid; /* Global space id */
|
||||
l4id_t tgid; /* Global thread group id */
|
||||
|
||||
/* Flags to hint scheduler on future task state */
|
||||
unsigned int schedfl;
|
||||
/* Flags to indicate various task status */
|
||||
unsigned int flags;
|
||||
|
||||
/* Lock for blocking thread state modifications via a syscall */
|
||||
struct mutex thread_control_lock;
|
||||
@@ -65,7 +75,13 @@ struct ktcb {
|
||||
/* Thread times */
|
||||
u32 kernel_time; /* Ticks spent in kernel */
|
||||
u32 user_time; /* Ticks spent in userland */
|
||||
u32 ticks_left; /* Ticks left for reschedule */
|
||||
u32 ticks_left; /* Timeslice ticks left for reschedule */
|
||||
u32 ticks_assigned; /* Ticks assigned to this task on this HZ */
|
||||
u32 sched_granule; /* Granularity ticks left for reschedule */
|
||||
int priority; /* Task's fixed, default priority */
|
||||
|
||||
/* Number of locks the task currently has acquired */
|
||||
int nlocks;
|
||||
|
||||
/* Page table information */
|
||||
pgd_table_t *pgd;
|
||||
@@ -73,8 +89,12 @@ struct ktcb {
|
||||
/* Fields for ipc rendezvous */
|
||||
struct waitqueue_head wqh_recv;
|
||||
struct waitqueue_head wqh_send;
|
||||
l4id_t expected_sender;
|
||||
|
||||
l4id_t senderid; /* Sender checks this for ipc */
|
||||
/* Tells where we are when we sleep */
|
||||
struct spinlock waitlock;
|
||||
struct waitqueue_head *waiting_on;
|
||||
struct waitqueue *wq;
|
||||
};
|
||||
|
||||
/* Per thread kernel stack unified on a single page. */
|
||||
|
||||
@@ -2,5 +2,6 @@
|
||||
#define __LIB_MATH_H__
|
||||
|
||||
#define min(x, y) (((x) < (y)) ? x : y)
|
||||
#define max(x, y) (((x) > (y)) ? x : y)
|
||||
|
||||
#endif /* __LIB_MATH_H__ */
|
||||
|
||||
@@ -16,20 +16,18 @@
|
||||
|
||||
/* A mutex is a binary semaphore that can sleep. */
|
||||
struct mutex {
|
||||
int sleepers; /* Number of sleepers */
|
||||
struct spinlock slock; /* Locks sleeper queue */
|
||||
unsigned int lock; /* The mutex lock itself */
|
||||
struct waitqueue wq; /* Sleeper queue head */
|
||||
struct waitqueue_head wqh;
|
||||
unsigned int lock;
|
||||
};
|
||||
|
||||
static inline void mutex_init(struct mutex *mutex)
|
||||
{
|
||||
memset(mutex, 0, sizeof(struct mutex));
|
||||
INIT_LIST_HEAD(&mutex->wq.task_list);
|
||||
waitqueue_head_init(&mutex->wqh);
|
||||
}
|
||||
|
||||
int mutex_trylock(struct mutex *mutex);
|
||||
void mutex_lock(struct mutex *mutex);
|
||||
int mutex_lock(struct mutex *mutex);
|
||||
void mutex_unlock(struct mutex *mutex);
|
||||
|
||||
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */
|
||||
|
||||
@@ -10,23 +10,16 @@ struct waitqueue {
|
||||
struct ktcb *task;
|
||||
};
|
||||
|
||||
#define DECLARE_WAITQUEUE(wq, tsk) \
|
||||
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
|
||||
struct waitqueue wq = { \
|
||||
.task_list = { &wq.task_list, &wq.task_list }, \
|
||||
.task = tsk, \
|
||||
};
|
||||
// LIST_HEAD_INIT(task_list),
|
||||
|
||||
/*
|
||||
* The waitqueue spinlock ensures waiters are added and removed atomically so
|
||||
* that wake-ups and sleeps occur in sync. Otherwise, a task could try to wake
|
||||
* up a waitqueue **during when a task has decided to sleep but is not in the
|
||||
* queue yet. (** Take "during" here as a pseudo-concurrency term on UP)
|
||||
*/
|
||||
struct waitqueue_head {
|
||||
int sleepers;
|
||||
struct spinlock slock; /* Locks sleeper queue */
|
||||
struct list_head task_list; /* Sleeper queue head */
|
||||
struct spinlock slock;
|
||||
struct list_head task_list;
|
||||
};
|
||||
|
||||
static inline void waitqueue_head_init(struct waitqueue_head *head)
|
||||
@@ -35,11 +28,14 @@ static inline void waitqueue_head_init(struct waitqueue_head *head)
|
||||
INIT_LIST_HEAD(&head->task_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Used for ipc related waitqueues who have special wait queue manipulation
|
||||
* conditions.
|
||||
*/
|
||||
void wake_up(struct waitqueue_head *wqh);
|
||||
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
|
||||
struct waitqueue *wq);
|
||||
|
||||
void task_unset_wqh(struct ktcb *task);
|
||||
|
||||
|
||||
void wake_up(struct waitqueue_head *wqh, int sync);
|
||||
int wake_up_task(struct ktcb *task, int sync);
|
||||
|
||||
#endif /* __LIB_WAIT_H__ */
|
||||
|
||||
|
||||
Reference in New Issue
Block a user