Reimplemented kill/suspend

It seems to work fine except an undefined instruction is generated
from posix userspace occasionally
This commit is contained in:
Bahadir Balban
2009-10-30 21:34:10 +02:00
parent f3c0a38fa9
commit c3c6c10cf7
10 changed files with 114 additions and 157 deletions

View File

@@ -60,7 +60,8 @@ extern struct scheduler scheduler;
void sched_init_runqueue(struct runqueue *rq);
void sched_init_task(struct ktcb *task, int priority);
void sched_prepare_sleep(void);
void sched_die_sync(void);
void sched_pager_exit(void);
void sched_exit_sync(void);
void sched_suspend_sync(void);
void sched_suspend_async(void);
void sched_resume_sync(struct ktcb *task);

View File

@@ -29,6 +29,7 @@
#define TASK_SUSPENDING (1 << 1)
#define TASK_RESUMING (1 << 2)
#define TASK_EXITING (1 << 3)
#define TASK_PENDING_SIGNAL (TASK_SUSPENDING | TASK_EXITING)
/* Task states */
enum task_state {
@@ -80,8 +81,7 @@ struct ktcb {
enum task_state state;
struct link task_list; /* Global task list. */
struct link task_dead_list; /* List of dead children */
struct mutex task_dead_mutex; /* Dead children list mutex */
struct ktcb_list child_exit_list;
/* UTCB related, see utcb.txt in docs */
unsigned long utcb_address; /* Virtual ref to task's utcb area */
@@ -156,6 +156,7 @@ struct ktcb *tcb_alloc_init(void);
void tcb_delete(struct ktcb *tcb);
void ktcb_list_remove(struct ktcb *task, struct ktcb_list *ktcb_list);
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list);
void init_ktcb_list(struct ktcb_list *ktcb_list);
void task_update_utcb(struct ktcb *task);

View File

@@ -11,7 +11,6 @@ void thread_id_pool_init(void);
int thread_id_new(void);
int thread_id_del(int tid);
void thread_destroy_current(void);
void task_make_zombie(struct ktcb *task);
void thread_destroy(struct ktcb *);
#endif /* __GENERIC_THREAD_H__ */

View File

@@ -24,44 +24,36 @@ int sys_thread_switch(void)
}
/*
* This suspends a thread which is in either suspended,
* sleeping or runnable state. `flags' field specifies an additional
* status for the thread, that implies an additional action as well
* as suspending. For example, a TASK_EXITING flag ensures the task
* is moved to a zombie queue during suspension.
*
* Why no race?
*
* There is no race between the pager setting TASK_SUSPENDING,
* and waiting for TASK_INACTIVE non-atomically because the target
* task starts suspending only when it sees TASK_SUSPENDING set and
* it only wakes up the pager after it has switched state to
* TASK_INACTIVE.
*
* If the pager hasn't come to wait_event() and the wake up call is
* already gone, the state is already TASK_INACTIVE so the pager
* won't sleep at all.
* This signals a thread so that the thread stops what it is
* doing, and take action on the signal provided. Currently this
* may be a suspension or an exit signal.
*/
int thread_suspend(struct ktcb *task, unsigned int flags)
int thread_signal_sync(struct ktcb *task, unsigned int flags,
unsigned int task_state)
{
int ret = 0;
if (task->state == TASK_INACTIVE)
if (task->state == task_state)
return 0;
/* Signify we want to suspend the thread */
task->flags |= TASK_SUSPENDING | flags;
task->flags |= flags;
/* Wake it up if it's sleeping */
wake_up_task(task, WAKEUP_INTERRUPT | WAKEUP_SYNC);
/* Wait until task suspends itself */
WAIT_EVENT(&task->wqh_pager,
task->state == TASK_INACTIVE, ret);
task->state == task_state, ret);
return ret;
}
int thread_suspend(struct ktcb *task)
{
return thread_signal_sync(task, TASK_SUSPENDING, TASK_INACTIVE);
}
int arch_clear_thread(struct ktcb *tcb)
{
/* Remove from the global list */
@@ -99,7 +91,7 @@ int thread_recycle(struct ktcb *task)
{
int ret;
if ((ret = thread_suspend(task, 0)) < 0)
if ((ret = thread_suspend(task)) < 0)
return ret;
/*
@@ -121,97 +113,54 @@ int thread_recycle(struct ktcb *task)
return 0;
}
void thread_destroy_current();
int thread_destroy(struct ktcb *task)
{
int ret;
/*
* Pager destroying itself
*/
if (task == current) {
thread_destroy_current();
/* If we're a self-destructing pager */
if (task == current &&
current->tid == current->pagerid) {
struct ktcb *child, *n;
/* It should not return */
/* Make all children exit synchronously */
spin_lock(&curcont->ktcb_list.list_lock);
list_foreach_removable_struct(child, n,
&curcont->ktcb_list.list,
task_list) {
if (child->pagerid == current->tid &&
child != current) {
spin_unlock(&curcont->ktcb_list.list_lock);
BUG_ON(thread_signal_sync(child, TASK_EXITING,
TASK_INACTIVE) < 0);
spin_lock(&curcont->ktcb_list.list_lock);
}
}
spin_unlock(&curcont->ktcb_list.list_lock);
/* Delete all exited children */
spin_lock(&current->child_exit_list.list_lock);
list_foreach_removable_struct(child, n,
&current->child_exit_list.list,
task_list) {
list_remove(&child->task_list);
tcb_delete(child);
}
spin_unlock(&current->child_exit_list.list_lock);
/* Destroy yourself */
sched_pager_exit();
BUG();
}
if ((ret = thread_suspend(task, 0)) < 0)
if ((ret = thread_signal_sync(task, TASK_EXITING, TASK_INACTIVE)) < 0)
return ret;
/* Remove tcb from global list so any callers will get -ESRCH */
tcb_remove(task);
/*
* If there are any sleepers on any of the task's
* waitqueues, we need to wake those tasks up.
*/
wake_up_all(&task->wqh_send, 0);
wake_up_all(&task->wqh_recv, 0);
/* We can now safely delete the task */
ktcb_list_remove(task, &current->child_exit_list);
tcb_delete(task);
return 0;
}
void task_make_zombie(struct ktcb *task)
{
/* Remove from its list, callers get -ESRCH */
tcb_remove(task);
/*
* If there are any sleepers on any of the task's
* waitqueues, we need to wake those tasks up.
*/
wake_up_all(&task->wqh_send, 0);
wake_up_all(&task->wqh_recv, 0);
BUG_ON(!(task->flags & TASK_EXITING));
/* Add to zombie list, to be destroyed later */
ktcb_list_add(task, &kernel_resources.zombie_list);
}
/*
* Pagers destroy themselves either by accessing an illegal
* address or voluntarily. All threads managed also get
* destroyed.
*/
void thread_destroy_current(void)
{
struct ktcb *task, *n;
/* Signal death to all threads under control of this pager */
spin_lock(&curcont->ktcb_list.list_lock);
list_foreach_removable_struct(task, n,
&curcont->ktcb_list.list,
task_list) {
if (task->tid == current->tid ||
task->pagerid != current->tid)
continue;
spin_unlock(&curcont->ktcb_list.list_lock);
/* Here we wait for each to die */
thread_suspend(task, TASK_EXITING);
spin_lock(&curcont->ktcb_list.list_lock);
}
spin_unlock(&curcont->ktcb_list.list_lock);
/* Destroy all children */
mutex_lock(&current->task_dead_mutex);
list_foreach_removable_struct(task, n,
&current->task_dead_list,
task_list) {
tcb_delete(task);
}
mutex_unlock(&current->task_dead_mutex);
/* Destroy self */
sched_die_sync();
}
/* Runs a thread for the first time */
int thread_start(struct ktcb *task)
{
@@ -436,7 +385,7 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
ret = thread_start(task);
break;
case THREAD_SUSPEND:
ret = thread_suspend(task, flags);
ret = thread_suspend(task);
break;
case THREAD_DESTROY:
ret = thread_destroy(task);

View File

@@ -113,7 +113,7 @@ void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
if (current->tid == current->pagerid) {
printk("Pager (%d) faulted on itself. FAR: 0x%x, PC: 0x%x Exiting.\n",
current->tid, fault->far, fault->faulty_pc);
thread_destroy_current();
thread_destroy(current);
}
/* Send ipc to the task's pager */
@@ -124,11 +124,9 @@ void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
current->tid, err);
BUG_ON(current->nlocks);
/* Declare our exit, currently only used by bug_on checks */
/* Exit as if signalled */
current->flags |= TASK_EXITING;
/* Try to die forever */
while (1)
sched_die_sync();
sched_exit_sync();
}
}
@@ -256,6 +254,8 @@ void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
dbg_abort("Data abort @ PC: ", faulted_pc);
//printk("Data abort: %d, PC: 0x%x\n", current->tid, faulted_pc);
/* Check for more details */
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by any pager.\n");
@@ -304,9 +304,12 @@ error:
;
}
void dump_undef_abort(u32 undef_addr)
void dump_undef_abort(u32 undef_addr, unsigned int spsr)
{
dprintk("Undefined instruction at address: ", undef_addr);
printk("Undefined instruction: %d, PC: 0x%x, Mode: %s\n",
current->tid, undef_addr,
(spsr & ARM_MODE_MASK) == ARM_MODE_SVC ? "SVC" : "User");
printascii("Halting system...\n");
BUG();
}

View File

@@ -55,6 +55,7 @@ END_PROC(arm_reset_exception)
BEGIN_PROC(arm_undef_exception)
sub lr, lr, #4
mov r0, lr @ Get undefined abort address
mrs r1, spsr @ Get previous abort state
mov r5, lr @ Save it in r5 in case r0 is trashed
mov lr, pc @ Save return address
ldr pc, =dump_undef_abort

View File

@@ -226,41 +226,11 @@ void sched_resume_async(struct ktcb *task)
RQ_ADD_FRONT);
}
#if 0
/* FIXME: Disables preemption for unbounded time !!! */
void tcb_delete_schedule(void)
{
/* We lock all possible locks to do with */
address_space_lock();
/*
* Lock ktcb mutex cache so that nobody can get me
* during this period
*/
mutex_lock(&kernel_resources.ktcb_cache.lock);
tcb_delete(current);
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_INACTIVE;
scheduler.prio_total -= current->priority;
BUG_ON(scheduler.prio_total < 0);
ktcb_list_unlock();
address_space_list_unlock();
preempt_enable();
schedule();
}
#endif
/*
* A self-paging thread deletes itself,
* schedules and disappears from the system.
*/
void sched_die_pager(void)
void sched_pager_exit(void)
{
printk("Pager (%d) Exiting...\n", current->tid);
/* Remove from its list, callers get -ESRCH */
@@ -296,6 +266,7 @@ void sched_die_pager(void)
BUG();
}
#if 0
/*
* A paged-thread leaves the system and waits on
* its pager's task_dead queue.
@@ -362,13 +333,37 @@ void sched_die_child(void)
schedule();
BUG();
}
#endif
void sched_die_sync(void)
void sched_exit_sync(void)
{
if (current->tid == current->pagerid)
sched_die_pager();
else
sched_die_child();
struct ktcb *pager = tcb_find(current->pagerid);
/* Quit global list */
tcb_remove(current);
/* Wake up waiters */
wake_up_all(&current->wqh_send, 0);
wake_up_all(&current->wqh_recv, 0);
/* Go to exit list */
ktcb_list_add(current, &pager->child_exit_list);
preempt_disable();
/* Hint pager we're ready */
wake_up(&current->wqh_pager, 0);
sched_rq_remove_task(current);
current->state = TASK_INACTIVE;
current->flags &= ~TASK_SUSPENDING;
scheduler.prio_total -= current->priority;
BUG_ON(scheduler.prio_total < 0);
preempt_enable();
/* Quit */
schedule();
BUG();
}
/*
@@ -517,7 +512,7 @@ void schedule()
* If task is about to sleep and
* it has pending events, wake it up.
*/
if (current->flags & TASK_SUSPENDING &&
if ((current->flags & TASK_PENDING_SIGNAL) &&
current->state == TASK_SLEEPING)
wake_up_task(current, WAKEUP_INTERRUPT);

View File

@@ -30,8 +30,7 @@ void tcb_init(struct ktcb *new)
link_init(&new->task_list);
mutex_init(&new->thread_control_lock);
mutex_init(&new->task_dead_mutex);
link_init(&new->task_dead_list);
init_ktcb_list(&new->child_exit_list);
cap_list_init(&new->cap_list);
/* Initialise task's scheduling state and parameters. */
@@ -162,6 +161,15 @@ void tcb_remove(struct ktcb *new)
spin_unlock(&curcont->ktcb_list.list_lock);
}
void ktcb_list_remove(struct ktcb *new, struct ktcb_list *ktcb_list)
{
spin_lock(&ktcb_list->list_lock);
BUG_ON(list_empty(&new->task_list));
BUG_ON(--ktcb_list->count < 0);
list_remove(&new->task_list);
spin_unlock(&ktcb_list->list_lock);
}
/* Offsets for ktcb fields that are accessed from assembler */
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);

View File

@@ -169,10 +169,10 @@ int syscall(syscall_context_t *regs, unsigned long swi_addr)
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
if (current->flags & TASK_EXITING)
sched_die_sync();
else
sched_suspend_sync();
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return ret;

View File

@@ -117,7 +117,7 @@ int mutex_lock(struct mutex *mutex)
* undeterministic as to how many retries will result in success.
* We may need to add priority-based locking.
*/
printk("Thread (%d) locking (%p) nlocks: %d\n", current->tid, mutex, current->nlocks);
//printk("Thread (%d) locking (%p) nlocks: %d\n", current->tid, mutex, current->nlocks);
for (;;) {
spin_lock(&mutex->wqh.slock);
if (!__mutex_lock(&mutex->lock)) { /* Could not lock, sleep. */
@@ -132,7 +132,7 @@ int mutex_lock(struct mutex *mutex)
/* Did we wake up normally or get interrupted */
if (current->flags & TASK_INTERRUPTED) {
printk("XXXXXXXXXXXXXXXX (%d) Interrupted\n", current->tid);
//printk("XXXXXXXXXXXXXXXX (%d) Interrupted\n", current->tid);
current->flags &= ~TASK_INTERRUPTED;
return -EINTR;
}
@@ -142,14 +142,14 @@ int mutex_lock(struct mutex *mutex)
}
}
spin_unlock(&mutex->wqh.slock);
printk("Thread (%d) locked (%p) nlocks: %d\n", current->tid, mutex, current->nlocks);
//printk("Thread (%d) locked (%p) nlocks: %d\n", current->tid, mutex, current->nlocks);
return 0;
}
static inline void mutex_unlock_common(struct mutex *mutex, int sync)
{
struct ktcb *c = current; if (c);
printk("Thread (%d) unlocking (%p) nlocks: %d\n", c->tid, mutex, c->nlocks);
//printk("Thread (%d) unlocking (%p) nlocks: %d\n", c->tid, mutex, c->nlocks);
spin_lock(&mutex->wqh.slock);
__mutex_unlock(&mutex->lock);
current->nlocks--;