mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Multiple updates on sleeping, vma dropping and thread suspend.
- Updated sleeping paths such that a task is atomically put into a runqueue and made RUNNABLE, or removed from a runqueue and made SLEEPING. - Modified vma dropping sources to handle both copy_on_write() and exit() cases in a common function. - Added the first infrastructure to have a pager to suspend a task and wait for suspend completion from the scheduler.
This commit is contained in:
@@ -37,6 +37,7 @@ static inline struct ktcb *current_task(void)
|
||||
#define need_resched (current->ts_need_resched)
|
||||
|
||||
void sched_init_task(struct ktcb *task, int priority);
|
||||
void sched_prepare_sleep(void);
|
||||
void sched_resume_sync(struct ktcb *task);
|
||||
void sched_resume_async(struct ktcb *task);
|
||||
void scheduler_start(void);
|
||||
|
||||
@@ -25,8 +25,7 @@
|
||||
#define TASK_SUSPENDING (1 << 1)
|
||||
#define TASK_RESUMING (1 << 2)
|
||||
|
||||
|
||||
/* Scheduler states */
|
||||
/* Task states */
|
||||
enum task_state {
|
||||
TASK_INACTIVE = 0,
|
||||
TASK_SLEEPING = 1,
|
||||
@@ -52,6 +51,7 @@ struct ktcb {
|
||||
|
||||
/* Runqueue related */
|
||||
struct list_head rq_list;
|
||||
struct runqueue *rq;
|
||||
|
||||
/* Thread information */
|
||||
l4id_t tid; /* Global thread id */
|
||||
@@ -91,6 +91,9 @@ struct ktcb {
|
||||
struct waitqueue_head wqh_send;
|
||||
l4id_t expected_sender;
|
||||
|
||||
/* Waitqueue for pagers to wait for task states */
|
||||
struct waitqueue_head wqh_pager;
|
||||
|
||||
/* Tells where we are when we sleep */
|
||||
struct spinlock waitlock;
|
||||
struct waitqueue_head *waiting_on;
|
||||
|
||||
@@ -34,8 +34,40 @@ void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
|
||||
void task_unset_wqh(struct ktcb *task);
|
||||
|
||||
|
||||
/*
|
||||
* Sleep if the given condition isn't true.
|
||||
* ret will tell whether condition was met
|
||||
* or we got interrupted.
|
||||
*/
|
||||
#define WAIT_EVENT(wqh, condition, ret) \
|
||||
do { \
|
||||
ret = 0; \
|
||||
for (;;) { \
|
||||
spin_lock(&(wqh)->slock); \
|
||||
if (condition) { \
|
||||
spin_unlock(&(wqh)->slock); \
|
||||
break; \
|
||||
} \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
task_set_wqh(current, wqh, &wq); \
|
||||
(wqh)->sleepers++; \
|
||||
list_add_tail(&wq.task_list, &(wqh)->task_list);\
|
||||
printk("(%d) waiting...\n", current->tid); \
|
||||
sched_prepare_sleep(); \
|
||||
spin_unlock(&(wqh)->slock); \
|
||||
schedule(); \
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
current->flags &= ~TASK_INTERRUPTED; \
|
||||
ret = -EINTR; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
void wake_up(struct waitqueue_head *wqh, int sync);
|
||||
int wake_up_task(struct ktcb *task, int sync);
|
||||
void wake_up_all(struct waitqueue_head *wqh, int sync);
|
||||
|
||||
#endif /* __LIB_WAIT_H__ */
|
||||
|
||||
|
||||
Reference in New Issue
Block a user