mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 19:03:15 +01:00
A new scheduler replaces the old one. - There are no sched_xxx_notify() calls that ask scheduler to change task state. - Tasks now have priorities and different timeslices. - One second interval is distributed among processes. - There are just runnable and expired queues. - SCHED_GRANULARITY determines a maximum running boundary for tasks. - Scheduler can now detect a safe point and suspend a task. Interruptible blocking is implemented. - Mutexes, waitqueues and ipc are modified to have an interruptible nature. - Sleep information is stored on the ktcb. (which waitqueue? etc.)
202 lines
4.7 KiB
C
202 lines
4.7 KiB
C
/*
|
|
* Implementation of wakeup/wait for processes.
|
|
*
|
|
* Copyright (C) 2007, 2008 Bahadir Balban
|
|
*/
|
|
#include <l4/generic/scheduler.h>
|
|
#include <l4/lib/wait.h>
|
|
#include <l4/lib/spinlock.h>
|
|
#include <l4/api/errno.h>
|
|
|
|
/*
|
|
* This sets any wait details of a task so that any arbitrary
|
|
* wakers can know where the task is sleeping.
|
|
*/
|
|
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
|
|
struct waitqueue *wq)
|
|
{
|
|
spin_lock(&task->waitlock);
|
|
task->waiting_on = wqh;
|
|
task->wq = wq;
|
|
spin_unlock(&task->waitlock);
|
|
}
|
|
|
|
|
|
/*
|
|
* This clears all wait details of a task. Used as the
|
|
* task is removed from its queue and is about to wake up.
|
|
*/
|
|
void task_unset_wqh(struct ktcb *task)
|
|
{
|
|
spin_lock(&task->waitlock);
|
|
task->waiting_on = 0;
|
|
task->wq = 0;
|
|
spin_unlock(&task->waitlock);
|
|
|
|
}
|
|
|
|
/*
|
|
* Sleep if the given condition isn't true.
|
|
* ret will tell whether condition was met
|
|
* or we got interrupted.
|
|
*/
|
|
#define WAIT_EVENT(wqh, condition, ret) \
|
|
do { \
|
|
ret = 0; \
|
|
for (;;) { \
|
|
if (condition) \
|
|
break; \
|
|
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
|
spin_lock(&wqh->slock); \
|
|
task_set_wqh(current, wqh, wq); \
|
|
wqh->sleepers++; \
|
|
list_add_tail(&wq.task_list, &wqh->task_list); \
|
|
task->state = TASK_SLEEPING; \
|
|
printk("(%d) waiting...\n", current->tid); \
|
|
spin_unlock(&wqh->slock); \
|
|
schedule(); \
|
|
/* Did we wake up normally or get interrupted */\
|
|
if (current->flags & TASK_INTERRUPTED) { \
|
|
current->flags &= ~TASK_INTERRUPTED; \
|
|
ret = -EINTR; \
|
|
break; \
|
|
} \
|
|
} \
|
|
} while(0);
|
|
|
|
/* Sleep without any condition */
|
|
#define WAIT_ON(wqh, ret) \
|
|
do { \
|
|
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
|
spin_lock(&wqh->slock); \
|
|
task_set_wqh(current, wqh, &wq); \
|
|
wqh->sleepers++; \
|
|
list_add_tail(&wq.task_list, &wqh->task_list); \
|
|
current->state = TASK_SLEEPING; \
|
|
printk("(%d) waiting on wqh at: 0x%p\n", \
|
|
current->tid, wqh); \
|
|
spin_unlock(&wqh->slock); \
|
|
schedule(); \
|
|
\
|
|
/* Did we wake up normally or get interrupted */\
|
|
if (current->flags & TASK_INTERRUPTED) { \
|
|
current->flags &= ~TASK_INTERRUPTED; \
|
|
ret = -EINTR; \
|
|
} else \
|
|
ret = 0; \
|
|
} while(0);
|
|
|
|
/* Sleep without any condition */
|
|
int wait_on(struct waitqueue_head *wqh)
|
|
{
|
|
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
|
spin_lock(&wqh->slock);
|
|
task_set_wqh(current, wqh, &wq);
|
|
wqh->sleepers++;
|
|
list_add_tail(&wq.task_list, &wqh->task_list);
|
|
current->state = TASK_SLEEPING;
|
|
printk("(%d) waiting on wqh at: 0x%p\n",
|
|
current->tid, wqh);
|
|
spin_unlock(&wqh->slock);
|
|
schedule();
|
|
|
|
/* Did we wake up normally or get interrupted */
|
|
if (current->flags & TASK_INTERRUPTED) {
|
|
current->flags &= ~TASK_INTERRUPTED;
|
|
return -EINTR;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/* Wake up single waiter */
|
|
void wake_up(struct waitqueue_head *wqh, int sync)
|
|
{
|
|
BUG_ON(wqh->sleepers < 0);
|
|
spin_lock(&wqh->slock);
|
|
if (wqh->sleepers > 0) {
|
|
struct waitqueue *wq = list_entry(wqh->task_list.next,
|
|
struct waitqueue,
|
|
task_list);
|
|
struct ktcb *sleeper = wq->task;
|
|
task_unset_wqh(sleeper);
|
|
BUG_ON(list_empty(&wqh->task_list));
|
|
list_del_init(&wq->task_list);
|
|
wqh->sleepers--;
|
|
sleeper->state = TASK_RUNNABLE;
|
|
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
|
spin_unlock(&wqh->slock);
|
|
|
|
if (sync)
|
|
sched_resume_sync(sleeper);
|
|
else
|
|
sched_resume_async(sleeper);
|
|
return;
|
|
}
|
|
spin_unlock(&wqh->slock);
|
|
}
|
|
|
|
/*
|
|
* Wakes up a task. If task is not waiting, or has been woken up
|
|
* as we were peeking on it, returns -1. @sync makes us immediately
|
|
* yield or else leave it to scheduler's discretion.
|
|
*/
|
|
int wake_up_task(struct ktcb *task, int sync)
|
|
{
|
|
struct waitqueue_head *wqh;
|
|
struct waitqueue *wq;
|
|
|
|
spin_lock(&task->waitlock);
|
|
if (!task->waiting_on) {
|
|
spin_unlock(&task->waitlock);
|
|
return -1;
|
|
}
|
|
|
|
/*
|
|
* We have found the waitqueue head.
|
|
* That needs to be locked first to conform with
|
|
* lock order and avoid deadlocks. Release task's
|
|
* waitlock and take the wqh's one.
|
|
*/
|
|
wqh = task->waiting_on;
|
|
wq = task->wq;
|
|
spin_unlock(&task->waitlock);
|
|
|
|
/* -- Task can be woken up by someone else here -- */
|
|
|
|
spin_lock(&wqh->slock);
|
|
|
|
/*
|
|
* Now lets check if the task is still
|
|
* waiting and in the same queue
|
|
*/
|
|
spin_lock(&task->waitlock);
|
|
if (task->waiting_on != wqh) {
|
|
/* No, task has been woken by someone else */
|
|
spin_unlock(&wqh->slock);
|
|
spin_unlock(&task->waitlock);
|
|
return -1;
|
|
}
|
|
|
|
/* Now we can remove the task from its waitqueue */
|
|
list_del_init(&wq->task_list);
|
|
wqh->sleepers--;
|
|
task->waiting_on = 0;
|
|
task->wq = 0;
|
|
task->state = TASK_RUNNABLE;
|
|
spin_unlock(&wqh->slock);
|
|
spin_unlock(&task->waitlock);
|
|
|
|
/* Removed from waitqueue, we can now safely resume task */
|
|
if (sync)
|
|
sched_resume_sync(task);
|
|
else
|
|
sched_resume_async(task);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
|