Added different paths of exiting for pager and its children.

Created a task_dead list on pager for children to move to when
they exit.
This commit is contained in:
Bahadir Balban
2009-10-30 12:00:05 +02:00
parent a6c61e05b9
commit 26aa62d2ea
8 changed files with 120 additions and 28 deletions

View File

@@ -55,6 +55,7 @@ int main(void)
/* Now quit to demo self-paging quit */ /* Now quit to demo self-paging quit */
l4_exit(0); l4_exit(0);
/* Now quit by null pointer */ /* Now quit by null pointer */
// *((int *)0) = 5; // *((int *)0) = 5;

View File

@@ -10,6 +10,7 @@
/* Number of containers defined at compile-time */ /* Number of containers defined at compile-time */
#include <l4/generic/capability.h> #include <l4/generic/capability.h>
#include <l4/lib/list.h> #include <l4/lib/list.h>
#include <l4/lib/mutex.h>
#include <l4/lib/idpool.h> #include <l4/lib/idpool.h>
#include INC_SUBARCH(mm.h) #include INC_SUBARCH(mm.h)
@@ -43,7 +44,7 @@ container_head_init(struct container_head *chead)
/* Hash table for all existing tasks */ /* Hash table for all existing tasks */
struct ktcb_list { struct ktcb_list {
struct link list; struct link list;
struct spinlock list_lock; struct mutex list_lock;
int count; int count;
}; };

View File

@@ -80,6 +80,7 @@ struct ktcb {
enum task_state state; enum task_state state;
struct link task_list; /* Global task list. */ struct link task_list; /* Global task list. */
struct ktcb_list task_dead; /* List of dead children */
/* UTCB related, see utcb.txt in docs */ /* UTCB related, see utcb.txt in docs */
unsigned long utcb_address; /* Virtual ref to task's utcb area */ unsigned long utcb_address; /* Virtual ref to task's utcb area */

View File

@@ -183,23 +183,32 @@ void thread_destroy_current(void)
{ {
struct ktcb *task, *n; struct ktcb *task, *n;
/* Suspend all threads under control of this pager */ /* Signal death to all threads under control of this pager */
spin_lock(&curcont->ktcb_list.list_lock); mutex_lock(&curcont->ktcb_list.list_lock);
list_foreach_removable_struct(task, n, list_foreach_removable_struct(task, n,
&curcont->ktcb_list.list, &curcont->ktcb_list.list,
task_list) { task_list) {
if (task->tid == current->tid || if (task->tid == current->tid ||
task->pagerid != current->tid) task->pagerid != current->tid)
continue; continue;
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
/* Here we wait for each to die */
thread_suspend(task, TASK_EXITING); thread_suspend(task, TASK_EXITING);
spin_lock(&curcont->ktcb_list.list_lock); mutex_lock(&curcont->ktcb_list.list_lock);
} }
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
/* Indicate we want to become zombie on suspend */ /* Destroy all children */
current->flags |= TASK_EXITING; mutex_lock(&current->task_dead.list_lock);
list_foreach_removable_struct(task, n,
&current->task_dead.list,
task_list) {
tcb_delete(task);
}
mutex_unlock(&current->task_dead.list_lock);
/* Destroy self */
sched_die_sync(); sched_die_sync();
} }

View File

@@ -123,8 +123,7 @@ void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
"returned error (%d). Suspend and exiting thread.\n", "returned error (%d). Suspend and exiting thread.\n",
current->tid, err); current->tid, err);
BUG_ON(current->nlocks); BUG_ON(current->nlocks);
current->flags |= TASK_EXITING; sched_die_sync();
sched_suspend_sync();
} }
} }

View File

@@ -255,7 +255,12 @@ void tcb_delete_schedule(void)
} }
#endif #endif
void sched_die_sync(void)
/*
* A self-paging thread deletes itself,
* schedules and disappears from the system.
*/
void sched_die_pager(void)
{ {
/* Remove from its list, callers get -ESRCH */ /* Remove from its list, callers get -ESRCH */
tcb_remove(current); tcb_remove(current);
@@ -290,6 +295,80 @@ void sched_die_sync(void)
BUG(); BUG();
} }
/*
* A paged-thread leaves the system and waits on
* its pager's task_dead queue.
*/
void sched_die_child(void)
{
/*
* Find pager, he _must_ be there because he never
* quits before quitting us
*/
struct ktcb *pager = tcb_find(current->pagerid);
/* Lock its task_dead queue */
mutex_lock(&pager->task_dead.list_lock);
/* Remove from container task list,
* callers get -ESRCH */
tcb_remove(current);
/*
* If the pager searches for us to destroy,
* he won't find us but he will block on the
* task_dead mutex when searching whether
* we died already.
*/
/*
* If there are any sleepers on any of the task's
* waitqueues, we need to wake those tasks up.
*/
wake_up_all(&current->wqh_send, 0);
wake_up_all(&current->wqh_recv, 0);
/*
* Add self to pager's dead tasks list,
* to be deleted by pager
*/
ktcb_list_add(current, &pager->task_dead);
/* Now quit the scheduler */
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_INACTIVE;
current->flags &= ~TASK_SUSPENDING;
scheduler.prio_total -= current->priority;
BUG_ON(scheduler.prio_total < 0);
preempt_enable();
/*
* Unlock task_dead queue,
* pager can safely delete us
*/
mutex_unlock(&pager->task_dead.list_lock);
BUG();
}
void sched_die_sync(void)
{
if (current->tid == current->pagerid)
sched_die_pager();
else
sched_die_child();
}
/*
* TODO:
* Instead of sched_suspend_sync()
* call sched_die_sync() on killer suspends:
* (e.g. if also kill flag set, call sched_die_sync instead)
* and handle dying on its own and dying over a pager
* in there. (e.g. put yourself in a task_dead queue, take
* care of pager calling destroy on you, calling wait on you etc.)
*/
/* /*
* NOTE: Could do these as sched_prepare_suspend() * NOTE: Could do these as sched_prepare_suspend()
* + schedule() or need_resched = 1 * + schedule() or need_resched = 1
@@ -304,9 +383,6 @@ void sched_suspend_sync(void)
BUG_ON(scheduler.prio_total < 0); BUG_ON(scheduler.prio_total < 0);
preempt_enable(); preempt_enable();
if (current->flags & TASK_EXITING)
task_make_zombie(current);
/* /*
* Async wake up any waiting pagers * Async wake up any waiting pagers
* *

View File

@@ -20,7 +20,7 @@
void init_ktcb_list(struct ktcb_list *ktcb_list) void init_ktcb_list(struct ktcb_list *ktcb_list)
{ {
memset(ktcb_list, 0, sizeof(*ktcb_list)); memset(ktcb_list, 0, sizeof(*ktcb_list));
spin_lock_init(&ktcb_list->list_lock); mutex_init(&ktcb_list->list_lock);
link_init(&ktcb_list->list); link_init(&ktcb_list->list);
} }
@@ -30,6 +30,8 @@ void tcb_init(struct ktcb *new)
link_init(&new->task_list); link_init(&new->task_list);
mutex_init(&new->thread_control_lock); mutex_init(&new->thread_control_lock);
init_ktcb_list(&new->task_dead);
cap_list_init(&new->cap_list); cap_list_init(&new->cap_list);
/* Initialise task's scheduling state and parameters. */ /* Initialise task's scheduling state and parameters. */
@@ -101,14 +103,14 @@ struct ktcb *tcb_find_by_space(l4id_t spid)
{ {
struct ktcb *task; struct ktcb *task;
spin_lock(&curcont->ktcb_list.list_lock); mutex_lock(&curcont->ktcb_list.list_lock);
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) { list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
if (task->space->spid == spid) { if (task->space->spid == spid) {
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
return task; return task;
} }
} }
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
return 0; return 0;
} }
@@ -119,44 +121,44 @@ struct ktcb *tcb_find(l4id_t tid)
if (current->tid == tid) if (current->tid == tid)
return current; return current;
spin_lock(&curcont->ktcb_list.list_lock); mutex_lock(&curcont->ktcb_list.list_lock);
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) { list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
if (task->tid == tid) { if (task->tid == tid) {
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
return task; return task;
} }
} }
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
return 0; return 0;
} }
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list) void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list)
{ {
spin_lock(&ktcb_list->list_lock); mutex_lock(&ktcb_list->list_lock);
BUG_ON(!list_empty(&new->task_list)); BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++ktcb_list->count); BUG_ON(!++ktcb_list->count);
list_insert(&new->task_list, &ktcb_list->list); list_insert(&new->task_list, &ktcb_list->list);
spin_unlock(&ktcb_list->list_lock); mutex_unlock(&ktcb_list->list_lock);
} }
void tcb_add(struct ktcb *new) void tcb_add(struct ktcb *new)
{ {
struct container *c = new->container; struct container *c = new->container;
spin_lock(&c->ktcb_list.list_lock); mutex_lock(&c->ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list)); BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++c->ktcb_list.count); BUG_ON(!++c->ktcb_list.count);
list_insert(&new->task_list, &c->ktcb_list.list); list_insert(&new->task_list, &c->ktcb_list.list);
spin_unlock(&c->ktcb_list.list_lock); mutex_unlock(&c->ktcb_list.list_lock);
} }
void tcb_remove(struct ktcb *new) void tcb_remove(struct ktcb *new)
{ {
spin_lock(&curcont->ktcb_list.list_lock); mutex_lock(&curcont->ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list)); BUG_ON(list_empty(&new->task_list));
BUG_ON(--curcont->ktcb_list.count < 0); BUG_ON(--curcont->ktcb_list.count < 0);
list_remove_init(&new->task_list); list_remove_init(&new->task_list);
spin_unlock(&curcont->ktcb_list.list_lock); mutex_unlock(&curcont->ktcb_list.list_lock);
} }
/* Offsets for ktcb fields that are accessed from assembler */ /* Offsets for ktcb fields that are accessed from assembler */

View File

@@ -169,7 +169,10 @@ int syscall(syscall_context_t *regs, unsigned long swi_addr)
if (current->flags & TASK_SUSPENDING) { if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks); BUG_ON(current->nlocks);
sched_suspend_sync(); if (current->flags & TASK_EXITING)
sched_die_sync();
else
sched_suspend_sync();
} }
return ret; return ret;