mirror of
https://github.com/drasko/codezero.git
synced 2026-01-13 11:23:16 +01:00
Multiple updates on sleeping, vma dropping and thread suspend.
- Updated sleeping paths such that a task is atomically put into a runqueue and made RUNNABLE, or removed from a runqueue and made SLEEPING. - Modified vma dropping sources to handle both copy_on_write() and exit() cases in a common function. - Added the first infrastructure to have a pager to suspend a task and wait for suspend completion from the scheduler.
This commit is contained in:
@@ -82,6 +82,7 @@ int ipc_send(l4id_t recv_tid)
|
||||
/* Remove from waitqueue */
|
||||
list_del_init(&wq->task_list);
|
||||
wqhr->sleepers--;
|
||||
task_unset_wqh(receiver);
|
||||
|
||||
/* Release locks */
|
||||
spin_unlock(&wqhr->slock);
|
||||
@@ -103,7 +104,7 @@ int ipc_send(l4id_t recv_tid)
|
||||
wqhs->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqhs->task_list);
|
||||
task_set_wqh(current, wqhs, &wq);
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
spin_unlock(&wqhr->slock);
|
||||
spin_unlock(&wqhs->slock);
|
||||
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
|
||||
@@ -168,7 +169,7 @@ int ipc_recv(l4id_t senderid)
|
||||
wqhr->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqhr->task_list);
|
||||
task_set_wqh(current, wqhr, &wq);
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
|
||||
// current->tid, current->expected_sender);
|
||||
spin_unlock(&wqhr->slock);
|
||||
|
||||
@@ -87,8 +87,8 @@ void do_exchange_registers(struct ktcb *task, struct exregs_data *exregs)
|
||||
* the register context of a thread. The thread's registers can be
|
||||
* set only when the thread is in user mode. A newly created thread
|
||||
* that is the copy of another thread (forked or cloned) will also
|
||||
* be given its user mode context so such threads can also be
|
||||
* modified by this call before execution.
|
||||
* be given its user mode context on the first chance to execute so
|
||||
* such threads can also be modified by this call before execution.
|
||||
*
|
||||
* A thread executing in the kernel cannot be modified since this
|
||||
* would compromise the kernel. Also the thread must be in suspended
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/wait.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
@@ -20,8 +21,68 @@ int sys_thread_switch(syscall_context_t *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This suspends a thread which is in either suspended,
|
||||
* sleeping or runnable state.
|
||||
*/
|
||||
int thread_suspend(struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task;
|
||||
int ret;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if (task->state == TASK_INACTIVE)
|
||||
return 0;
|
||||
|
||||
/* First show our intention to suspend thread */
|
||||
task->flags |= TASK_SUSPENDING;
|
||||
|
||||
/*
|
||||
* Interrupt the task in case it was sleeping
|
||||
* so that it will be caught and suspended by
|
||||
* the scheduler.
|
||||
*/
|
||||
wake_up_task(task, 1);
|
||||
|
||||
/* Wait until scheduler wakes us up */
|
||||
WAIT_EVENT(&task->wqh_pager,
|
||||
task->state == TASK_INACTIVE, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int thread_destroy(struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task;
|
||||
int ret;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if ((ret = thread_suspend(ids)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Delete it from global list so any callers will get -ESRCH */
|
||||
list_del(&task->task_list);
|
||||
|
||||
/*
|
||||
* If there are any sleepers on any of the task's
|
||||
* waitqueues, we need to wake those tasks up.
|
||||
*/
|
||||
wake_up_all(&task->wqh_send, 0);
|
||||
wake_up_all(&task->wqh_recv, 0);
|
||||
|
||||
/*
|
||||
* The thread cannot have a pager waiting for it
|
||||
* since we ought to be the pager.
|
||||
*/
|
||||
BUG_ON(task->wqh_pager.sleepers > 0);
|
||||
|
||||
/* We can now safely delete the task */
|
||||
free_page(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -257,6 +318,7 @@ out:
|
||||
/* Initialise ipc waitqueues */
|
||||
waitqueue_head_init(&new->wqh_send);
|
||||
waitqueue_head_init(&new->wqh_recv);
|
||||
waitqueue_head_init(&new->wqh_pager);
|
||||
|
||||
arch_setup_new_thread(new, task, flags);
|
||||
|
||||
@@ -290,6 +352,8 @@ int sys_thread_control(syscall_context_t *regs)
|
||||
case THREAD_RESUME:
|
||||
ret = thread_resume(ids);
|
||||
break;
|
||||
case THREAD_DESTROY:
|
||||
ret = thread_destroy(ids);
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ struct runqueue {
|
||||
struct spinlock lock; /* Lock */
|
||||
struct list_head task_list; /* List of tasks in rq */
|
||||
unsigned int total; /* Total tasks */
|
||||
int recalc_timeslice; /* Need timeslice redistribution */
|
||||
};
|
||||
|
||||
#define SCHED_RQ_TOTAL 2
|
||||
@@ -136,14 +135,20 @@ static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
|
||||
else
|
||||
list_add_tail(&task->rq_list, &rq->task_list);
|
||||
rq->total++;
|
||||
task->rq = rq;
|
||||
spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
/* NOTE: Do we need an rq_lock on tcb? */
|
||||
|
||||
/* Helper for removing a task from its runqueue. */
|
||||
static inline void sched_rq_remove_task(struct ktcb *task, struct runqueue *rq)
|
||||
static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
{
|
||||
struct runqueue *rq = task->rq;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
list_del_init(&task->rq_list);
|
||||
task->rq = 0;
|
||||
rq->total--;
|
||||
|
||||
BUG_ON(rq->total < 0);
|
||||
@@ -161,11 +166,25 @@ void sched_init_task(struct ktcb *task, int prio)
|
||||
task->flags |= TASK_RESUMING;
|
||||
}
|
||||
|
||||
/*
|
||||
* Takes all the action that will make a task sleep
|
||||
* in the scheduler. If the task is woken up before
|
||||
* it schedules, then operations here are simply
|
||||
* undone and task remains as runnable.
|
||||
*/
|
||||
void sched_prepare_sleep()
|
||||
{
|
||||
preempt_disable();
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_SLEEPING;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Synchronously resumes a task */
|
||||
void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
@@ -177,8 +196,8 @@ void sched_resume_sync(struct ktcb *task)
|
||||
*/
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
}
|
||||
|
||||
@@ -251,20 +270,23 @@ void schedule()
|
||||
/* Cannot have any irqs that schedule after this */
|
||||
preempt_disable();
|
||||
|
||||
#if 0
|
||||
/* NOTE:
|
||||
* We could avoid double-scheduling by detecting a task
|
||||
* that's about to schedule voluntarily and skipping the
|
||||
* schedule() call in irq mode.
|
||||
* We could avoid unnecessary scheduling by detecting
|
||||
* a task that has been just woken up.
|
||||
*/
|
||||
if ((task->flags & TASK_WOKEN_UP) && in_process_context()) {
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Reset schedule flag */
|
||||
need_resched = 0;
|
||||
|
||||
/* Remove from runnable queue */
|
||||
sched_rq_remove_task(current, rq_runnable);
|
||||
|
||||
/* Put it into appropriate runqueue */
|
||||
/* Remove from runnable and put into appropriate runqueue */
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current, rq_runnable, RQ_ADD_BEHIND);
|
||||
else
|
||||
@@ -277,7 +299,8 @@ void schedule()
|
||||
* The task should have no locks and be in a runnable state.
|
||||
* (e.g. properly woken up by the suspender)
|
||||
*/
|
||||
if (current->nlocks == 0 && current->state == TASK_RUNNABLE) {
|
||||
if (current->nlocks == 0 &&
|
||||
current->state == TASK_RUNNABLE) {
|
||||
/* Suspend it if suitable */
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
@@ -290,7 +313,13 @@ void schedule()
|
||||
*/
|
||||
prio_total -= current->priority;
|
||||
BUG_ON(prio_total <= 0);
|
||||
|
||||
/* Prepare to wake up any waiters */
|
||||
wake_up(¤t->wqh_pager, 0);
|
||||
} else {
|
||||
if (current->state == TASK_RUNNABLE)
|
||||
sched_rq_remove_task(current);
|
||||
|
||||
/*
|
||||
* Top up task's ticks temporarily, and
|
||||
* wait for it to release its locks.
|
||||
|
||||
@@ -317,6 +317,7 @@ void init_pager(char *name, struct task_ids *ids)
|
||||
/* Task's rendezvous point */
|
||||
waitqueue_head_init(&task->wqh_send);
|
||||
waitqueue_head_init(&task->wqh_recv);
|
||||
waitqueue_head_init(&task->wqh_pager);
|
||||
|
||||
/* Global hashlist that keeps all existing tasks */
|
||||
add_task_global(task);
|
||||
|
||||
@@ -51,7 +51,7 @@ void sem_up(struct mutex *mutex)
|
||||
INIT_LIST_HEAD(&wq.task_list);
|
||||
list_add_tail(&wq.task_list, &mutex->wq.task_list);
|
||||
mutex->sleepers++;
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) produced, now sleeping...\n", current->tid);
|
||||
spin_unlock(&mutex->slock);
|
||||
schedule();
|
||||
@@ -89,7 +89,7 @@ void sem_down(struct mutex *mutex)
|
||||
INIT_LIST_HEAD(&wq.task_list);
|
||||
list_add_tail(&wq.task_list, &mutex->wq.task_list);
|
||||
mutex->sleepers++;
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) Waiting to consume, now sleeping...\n", current->tid);
|
||||
spin_unlock(&mutex->slock);
|
||||
schedule();
|
||||
@@ -124,7 +124,7 @@ int mutex_lock(struct mutex *mutex)
|
||||
task_set_wqh(current, &mutex->wqh, &wq);
|
||||
list_add_tail(&wq.task_list, &mutex->wqh.task_list);
|
||||
mutex->wqh.sleepers++;
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
spin_unlock(&mutex->wqh.slock);
|
||||
printk("(%d) sleeping...\n", current->tid);
|
||||
schedule();
|
||||
@@ -160,7 +160,6 @@ void mutex_unlock(struct mutex *mutex)
|
||||
BUG_ON(list_empty(&mutex->wqh.task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
mutex->wqh.sleepers--;
|
||||
sleeper->state = TASK_RUNNABLE;
|
||||
spin_unlock(&mutex->wqh.slock);
|
||||
|
||||
/*
|
||||
|
||||
@@ -35,57 +35,6 @@ void task_unset_wqh(struct ktcb *task)
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Sleep if the given condition isn't true.
|
||||
* ret will tell whether condition was met
|
||||
* or we got interrupted.
|
||||
*/
|
||||
#define WAIT_EVENT(wqh, condition, ret) \
|
||||
do { \
|
||||
ret = 0; \
|
||||
for (;;) { \
|
||||
if (condition) \
|
||||
break; \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
spin_lock(&wqh->slock); \
|
||||
task_set_wqh(current, wqh, wq); \
|
||||
wqh->sleepers++; \
|
||||
list_add_tail(&wq.task_list, &wqh->task_list); \
|
||||
task->state = TASK_SLEEPING; \
|
||||
printk("(%d) waiting...\n", current->tid); \
|
||||
spin_unlock(&wqh->slock); \
|
||||
schedule(); \
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
current->flags &= ~TASK_INTERRUPTED; \
|
||||
ret = -EINTR; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
/* Sleep without any condition */
|
||||
#define WAIT_ON(wqh, ret) \
|
||||
do { \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
spin_lock(&wqh->slock); \
|
||||
task_set_wqh(current, wqh, &wq); \
|
||||
wqh->sleepers++; \
|
||||
list_add_tail(&wq.task_list, &wqh->task_list); \
|
||||
current->state = TASK_SLEEPING; \
|
||||
printk("(%d) waiting on wqh at: 0x%p\n", \
|
||||
current->tid, wqh); \
|
||||
spin_unlock(&wqh->slock); \
|
||||
schedule(); \
|
||||
\
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
current->flags &= ~TASK_INTERRUPTED; \
|
||||
ret = -EINTR; \
|
||||
} else \
|
||||
ret = 0; \
|
||||
} while(0);
|
||||
|
||||
/* Sleep without any condition */
|
||||
int wait_on(struct waitqueue_head *wqh)
|
||||
{
|
||||
@@ -94,7 +43,7 @@ int wait_on(struct waitqueue_head *wqh)
|
||||
task_set_wqh(current, wqh, &wq);
|
||||
wqh->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqh->task_list);
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) waiting on wqh at: 0x%p\n",
|
||||
current->tid, wqh);
|
||||
spin_unlock(&wqh->slock);
|
||||
@@ -109,6 +58,31 @@ int wait_on(struct waitqueue_head *wqh)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wake up all */
|
||||
void wake_up_all(struct waitqueue_head *wqh, int sync)
|
||||
{
|
||||
BUG_ON(wqh->sleepers < 0);
|
||||
spin_lock(&wqh->slock);
|
||||
while (wqh->sleepers > 0) {
|
||||
struct waitqueue *wq = list_entry(wqh->task_list.next,
|
||||
struct waitqueue,
|
||||
task_list);
|
||||
struct ktcb *sleeper = wq->task;
|
||||
task_unset_wqh(sleeper);
|
||||
BUG_ON(list_empty(&wqh->task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
wqh->sleepers--;
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
spin_unlock(&wqh->slock);
|
||||
|
||||
if (sync)
|
||||
sched_resume_sync(sleeper);
|
||||
else
|
||||
sched_resume_async(sleeper);
|
||||
}
|
||||
spin_unlock(&wqh->slock);
|
||||
}
|
||||
|
||||
/* Wake up single waiter */
|
||||
void wake_up(struct waitqueue_head *wqh, int sync)
|
||||
@@ -124,7 +98,7 @@ void wake_up(struct waitqueue_head *wqh, int sync)
|
||||
BUG_ON(list_empty(&wqh->task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
wqh->sleepers--;
|
||||
sleeper->state = TASK_RUNNABLE;
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
spin_unlock(&wqh->slock);
|
||||
|
||||
@@ -152,6 +126,8 @@ int wake_up_task(struct ktcb *task, int sync)
|
||||
spin_unlock(&task->waitlock);
|
||||
return -1;
|
||||
}
|
||||
wqh = task->waiting_on;
|
||||
wq = task->wq;
|
||||
|
||||
/*
|
||||
* We have found the waitqueue head.
|
||||
@@ -159,8 +135,6 @@ int wake_up_task(struct ktcb *task, int sync)
|
||||
* lock order and avoid deadlocks. Release task's
|
||||
* waitlock and take the wqh's one.
|
||||
*/
|
||||
wqh = task->waiting_on;
|
||||
wq = task->wq;
|
||||
spin_unlock(&task->waitlock);
|
||||
|
||||
/* -- Task can be woken up by someone else here -- */
|
||||
@@ -184,11 +158,15 @@ int wake_up_task(struct ktcb *task, int sync)
|
||||
wqh->sleepers--;
|
||||
task->waiting_on = 0;
|
||||
task->wq = 0;
|
||||
task->state = TASK_RUNNABLE;
|
||||
task->flags |= TASK_INTERRUPTED;
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock(&task->waitlock);
|
||||
|
||||
/* Removed from waitqueue, we can now safely resume task */
|
||||
/*
|
||||
* Task is removed from its waitqueue. Now we can
|
||||
* safely resume it without locks as this is the only
|
||||
* code path that can resume the task.
|
||||
*/
|
||||
if (sync)
|
||||
sched_resume_sync(task);
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user