Added different paths of exiting for pager and its children.

Created a task_dead list on pager for children to move to when
they exit.
This commit is contained in:
Bahadir Balban
2009-10-30 12:00:05 +02:00
parent a6c61e05b9
commit 26aa62d2ea
8 changed files with 120 additions and 28 deletions

View File

@@ -255,7 +255,12 @@ void tcb_delete_schedule(void)
}
#endif
void sched_die_sync(void)
/*
* A self-paging thread deletes itself,
* schedules and disappears from the system.
*/
void sched_die_pager(void)
{
/* Remove from its list, callers get -ESRCH */
tcb_remove(current);
@@ -290,6 +295,80 @@ void sched_die_sync(void)
BUG();
}
/*
* A paged-thread leaves the system and waits on
* its pager's task_dead queue.
*/
void sched_die_child(void)
{
/*
* Find pager, he _must_ be there because he never
* quits before quitting us
*/
struct ktcb *pager = tcb_find(current->pagerid);
/* Lock its task_dead queue */
mutex_lock(&pager->task_dead.list_lock);
/* Remove from container task list,
* callers get -ESRCH */
tcb_remove(current);
/*
* If the pager searches for us to destroy,
* he won't find us but he will block on the
* task_dead mutex when searching whether
* we died already.
*/
/*
* If there are any sleepers on any of the task's
* waitqueues, we need to wake those tasks up.
*/
wake_up_all(&current->wqh_send, 0);
wake_up_all(&current->wqh_recv, 0);
/*
* Add self to pager's dead tasks list,
* to be deleted by pager
*/
ktcb_list_add(current, &pager->task_dead);
/* Now quit the scheduler */
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_INACTIVE;
current->flags &= ~TASK_SUSPENDING;
scheduler.prio_total -= current->priority;
BUG_ON(scheduler.prio_total < 0);
preempt_enable();
/*
* Unlock task_dead queue,
* pager can safely delete us
*/
mutex_unlock(&pager->task_dead.list_lock);
BUG();
}
void sched_die_sync(void)
{
if (current->tid == current->pagerid)
sched_die_pager();
else
sched_die_child();
}
/*
* TODO:
* Instead of sched_suspend_sync()
* call sched_die_sync() on killer suspends:
* (e.g. if also kill flag set, call sched_die_sync instead)
* and handle dying on its own and dying over a pager
* in there. (e.g. put yourself in a task_dead queue, take
* care of pager calling destroy on you, calling wait on you etc.)
*/
/*
* NOTE: Could do these as sched_prepare_suspend()
* + schedule() or need_resched = 1
@@ -304,9 +383,6 @@ void sched_suspend_sync(void)
BUG_ON(scheduler.prio_total < 0);
preempt_enable();
if (current->flags & TASK_EXITING)
task_make_zombie(current);
/*
* Async wake up any waiting pagers
*

View File

@@ -20,7 +20,7 @@
void init_ktcb_list(struct ktcb_list *ktcb_list)
{
memset(ktcb_list, 0, sizeof(*ktcb_list));
spin_lock_init(&ktcb_list->list_lock);
mutex_init(&ktcb_list->list_lock);
link_init(&ktcb_list->list);
}
@@ -30,6 +30,8 @@ void tcb_init(struct ktcb *new)
link_init(&new->task_list);
mutex_init(&new->thread_control_lock);
init_ktcb_list(&new->task_dead);
cap_list_init(&new->cap_list);
/* Initialise task's scheduling state and parameters. */
@@ -101,14 +103,14 @@ struct ktcb *tcb_find_by_space(l4id_t spid)
{
struct ktcb *task;
spin_lock(&curcont->ktcb_list.list_lock);
mutex_lock(&curcont->ktcb_list.list_lock);
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
if (task->space->spid == spid) {
spin_unlock(&curcont->ktcb_list.list_lock);
mutex_unlock(&curcont->ktcb_list.list_lock);
return task;
}
}
spin_unlock(&curcont->ktcb_list.list_lock);
mutex_unlock(&curcont->ktcb_list.list_lock);
return 0;
}
@@ -119,44 +121,44 @@ struct ktcb *tcb_find(l4id_t tid)
if (current->tid == tid)
return current;
spin_lock(&curcont->ktcb_list.list_lock);
mutex_lock(&curcont->ktcb_list.list_lock);
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&curcont->ktcb_list.list_lock);
mutex_unlock(&curcont->ktcb_list.list_lock);
return task;
}
}
spin_unlock(&curcont->ktcb_list.list_lock);
mutex_unlock(&curcont->ktcb_list.list_lock);
return 0;
}
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list)
{
spin_lock(&ktcb_list->list_lock);
mutex_lock(&ktcb_list->list_lock);
BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++ktcb_list->count);
list_insert(&new->task_list, &ktcb_list->list);
spin_unlock(&ktcb_list->list_lock);
mutex_unlock(&ktcb_list->list_lock);
}
void tcb_add(struct ktcb *new)
{
struct container *c = new->container;
spin_lock(&c->ktcb_list.list_lock);
mutex_lock(&c->ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++c->ktcb_list.count);
list_insert(&new->task_list, &c->ktcb_list.list);
spin_unlock(&c->ktcb_list.list_lock);
mutex_unlock(&c->ktcb_list.list_lock);
}
void tcb_remove(struct ktcb *new)
{
spin_lock(&curcont->ktcb_list.list_lock);
mutex_lock(&curcont->ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list));
BUG_ON(--curcont->ktcb_list.count < 0);
list_remove_init(&new->task_list);
spin_unlock(&curcont->ktcb_list.list_lock);
mutex_unlock(&curcont->ktcb_list.list_lock);
}
/* Offsets for ktcb fields that are accessed from assembler */