exit() almost there.

- Implemented reasonable way to suspend task.
  - A task that has a pending suspend would be interrupted
    from its sleep via the suspender task.
  - If suspend was raised and right after, task became about to sleep,
    then scheduler wakes it up.
  - If suspend was raised when task was in user mode, then an irq suspends it.
  - Also suspends are checked at the end of a syscall so that if suspend was
    raised because of a syscall from the task, the task is suspended before it
    goes back to user mode.

  - This mechanism is very similar to signals, and it may lead as a base for
    implementing signal handling.

- Implemented common vma dropping for shadow vm object dropping and task exiting.
This commit is contained in:
Bahadir Balban
2008-10-20 12:56:30 +03:00
parent 0db0f7e334
commit aa2be891cd
15 changed files with 150 additions and 138 deletions

View File

@@ -36,8 +36,11 @@ static inline struct ktcb *current_task(void)
#define current current_task()
#define need_resched (current->ts_need_resched)
void sched_init_task(struct ktcb *task, int priority);
void sched_prepare_sleep(void);
void sched_suspend_sync(void);
void sched_suspend_async(void);
void sched_resume_sync(struct ktcb *task);
void sched_resume_async(struct ktcb *task);
void scheduler_start(void);

View File

@@ -148,5 +148,7 @@ extern struct id_pool *thread_id_pool;
extern struct id_pool *space_id_pool;
extern struct id_pool *tgroup_id_pool;
void task_process_pending_flags(void);
#endif /* __TCB_H__ */

View File

@@ -10,6 +10,11 @@ struct waitqueue {
struct ktcb *task;
};
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0),
WAKEUP_SYNC = (1 << 1)
};
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
struct waitqueue wq = { \
.task_list = { &wq.task_list, &wq.task_list }, \
@@ -65,9 +70,9 @@ do { \
} \
} while(0);
void wake_up(struct waitqueue_head *wqh, int sync);
int wake_up_task(struct ktcb *task, int sync);
void wake_up_all(struct waitqueue_head *wqh, int sync);
void wake_up(struct waitqueue_head *wqh, unsigned int flags);
int wake_up_task(struct ktcb *task, unsigned int flags);
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
#endif /* __LIB_WAIT_H__ */

View File

@@ -28,7 +28,7 @@ int sys_thread_switch(syscall_context_t *regs)
int thread_suspend(struct task_ids *ids)
{
struct ktcb *task;
int ret;
int ret = 0;
if (!(task = find_task(ids->tid)))
return -ESRCH;
@@ -36,17 +36,13 @@ int thread_suspend(struct task_ids *ids)
if (task->state == TASK_INACTIVE)
return 0;
/* First show our intention to suspend thread */
/* Signify we want to suspend the thread */
task->flags |= TASK_SUSPENDING;
/*
* Interrupt the task in case it was sleeping
* so that it will be caught and suspended by
* the scheduler.
*/
wake_up_task(task, 1);
/* Wake it up if it's sleeping */
wake_up_task(task, WAKEUP_INTERRUPT | WAKEUP_SYNC);
/* Wait until scheduler wakes us up */
/* Wait until task suspends itself */
WAIT_EVENT(&task->wqh_pager,
task->state == TASK_INACTIVE, ret);

View File

@@ -7,10 +7,11 @@
*/
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/platform.h>
#include <l4/generic/tcb.h>
#include <l4/generic/irq.h>
#include <l4/lib/mutex.h>
#include <l4/generic/scheduler.h>
#include <l4/lib/printk.h>
#include INC_PLAT(irq.h)
#include INC_ARCH(exception.h)
@@ -73,5 +74,11 @@ void do_irq(void)
printk("Spurious or broken irq\n"); BUG();
}
irq_enable(irq_index);
/* Process any pending flags for currently runnable task */
task_process_pending_flags();
}

View File

@@ -228,6 +228,44 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
SCHED_TICKS * task->priority / prio_total;
}
/*
* NOTE: Could do these as sched_prepare_suspend()
* + schedule() or need_resched = 1
*/
void sched_suspend_sync(void)
{
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_INACTIVE;
current->flags &= ~TASK_SUSPENDING;
prio_total -= current->priority;
BUG_ON(prio_total <= 0);
preempt_enable();
/* Async wake up any waiters */
wake_up_task(find_task(current->pagerid), 0);
schedule();
}
void sched_suspend_async(void)
{
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_INACTIVE;
current->flags &= ~TASK_SUSPENDING;
prio_total -= current->priority;
BUG_ON(prio_total <= 0);
/* This will make sure we yield soon */
preempt_enable();
/* Async wake up any waiters */
wake_up_task(find_task(current->pagerid), 0);
need_resched = 1;
}
/*
* Tasks come here, either by setting need_resched (via next irq),
* or by directly calling it (in process context).
@@ -270,17 +308,6 @@ void schedule()
/* Cannot have any irqs that schedule after this */
preempt_disable();
#if 0
/* NOTE:
* We could avoid unnecessary scheduling by detecting
* a task that has been just woken up.
*/
if ((task->flags & TASK_WOKEN_UP) && in_process_context()) {
preempt_enable();
return 0;
}
#endif
/* Reset schedule flag */
need_resched = 0;
@@ -293,43 +320,13 @@ void schedule()
sched_rq_add_task(current, rq_expired, RQ_ADD_BEHIND);
}
/* Check if there's a pending suspend for thread */
if (current->flags & TASK_SUSPENDING) {
/*
* The task should have no locks and be in a runnable state.
* (e.g. properly woken up by the suspender)
*/
if (current->nlocks == 0 &&
current->state == TASK_RUNNABLE) {
/* Suspend it if suitable */
current->state = TASK_INACTIVE;
current->flags &= ~TASK_SUSPENDING;
/*
* The task has been made inactive here.
* A suspended task affects timeslices whereas
* a sleeping task doesn't as it is believed
* sleepers would become runnable soon.
*/
prio_total -= current->priority;
BUG_ON(prio_total <= 0);
/* Prepare to wake up any waiters */
wake_up(&current->wqh_pager, 0);
} else {
if (current->state == TASK_RUNNABLE)
sched_rq_remove_task(current);
/*
* Top up task's ticks temporarily, and
* wait for it to release its locks.
*/
current->state = TASK_RUNNABLE;
current->ticks_left = max(current->ticks_left,
SCHED_GRANULARITY);
sched_rq_add_task(current, rq_runnable, RQ_ADD_FRONT);
}
}
/*
* If task is about to sleep and
* it has pending events, wake it up.
*/
if (current->flags & TASK_SUSPENDING &&
current->state == TASK_SLEEPING)
wake_up_task(current, WAKEUP_INTERRUPT);
/* Determine the next task to be run */
if (rq_runnable->total > 0) {

View File

@@ -5,7 +5,10 @@
*/
#include <l4/generic/tcb.h>
#include <l4/generic/space.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/preempt.h>
#include <l4/lib/idpool.h>
#include INC_ARCH(exception.h)
/* ID pools for threads and spaces. */
struct id_pool *thread_id_pool;
@@ -20,6 +23,25 @@ unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
/*
* When there is an asynchronous pending event to be handled by
* the task (e.g. task is suspending), normally it is processed
* when the task is returning to user mode from the kernel. If
* the event is raised when the task is in userspace, this call
* in irq context makes sure it is handled.
*/
void task_process_pending_flags(void)
{
if (TASK_IN_USER(current)) {
if (current->flags & TASK_SUSPENDING) {
if (in_irq_context())
sched_suspend_async();
else
sched_suspend_sync();
}
}
}
#if 0
int task_suspend(struct ktcb *task)
{

View File

@@ -6,6 +6,7 @@
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/generic/space.h>
#include <l4/generic/scheduler.h>
#include <l4/api/errno.h>
#include INC_GLUE(memlayout.h)
#include INC_GLUE(syscall.h)
@@ -60,13 +61,15 @@ void syscall_init()
/* Checks a syscall is legitimate and dispatches to appropriate handler. */
int syscall(syscall_context_t *regs, unsigned long swi_addr)
{
int ret = 0;
/* Check if genuine system call, coming from the syscall page */
if ((swi_addr & ARM_SYSCALL_PAGE) == ARM_SYSCALL_PAGE) {
/* Check within syscall offset boundary */
if (((swi_addr & syscall_offset_mask) >= 0) &&
((swi_addr & syscall_offset_mask) <= syscalls_end_offset)) {
/* Quick jump, rather than compare each */
return (*syscall_table[(swi_addr & 0xFF) >> 2])(regs);
ret = (*syscall_table[(swi_addr & 0xFF) >> 2])(regs);
} else {
printk("System call received from call @ 0x%lx."
"Instruction: 0x%lx.\n", swi_addr,
@@ -78,5 +81,12 @@ int syscall(syscall_context_t *regs, unsigned long swi_addr)
"Discarding.\n", swi_addr);
return -ENOSYS;
}
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
}
return ret;
}

View File

@@ -59,7 +59,7 @@ int wait_on(struct waitqueue_head *wqh)
}
/* Wake up all */
void wake_up_all(struct waitqueue_head *wqh, int sync)
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
{
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
@@ -72,11 +72,12 @@ void wake_up_all(struct waitqueue_head *wqh, int sync)
BUG_ON(list_empty(&wqh->task_list));
list_del_init(&wq->task_list);
wqh->sleepers--;
sleeper->flags |= TASK_INTERRUPTED;
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
spin_unlock(&wqh->slock);
if (sync)
if (flags & WAKEUP_SYNC)
sched_resume_sync(sleeper);
else
sched_resume_async(sleeper);
@@ -85,7 +86,7 @@ void wake_up_all(struct waitqueue_head *wqh, int sync)
}
/* Wake up single waiter */
void wake_up(struct waitqueue_head *wqh, int sync)
void wake_up(struct waitqueue_head *wqh, unsigned int flags)
{
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
@@ -98,11 +99,12 @@ void wake_up(struct waitqueue_head *wqh, int sync)
BUG_ON(list_empty(&wqh->task_list));
list_del_init(&wq->task_list);
wqh->sleepers--;
sleeper->flags |= TASK_INTERRUPTED;
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
spin_unlock(&wqh->slock);
if (sync)
if (flags & WAKEUP_SYNC)
sched_resume_sync(sleeper);
else
sched_resume_async(sleeper);
@@ -116,7 +118,7 @@ void wake_up(struct waitqueue_head *wqh, int sync)
* as we were peeking on it, returns -1. @sync makes us immediately
* yield or else leave it to scheduler's discretion.
*/
int wake_up_task(struct ktcb *task, int sync)
int wake_up_task(struct ktcb *task, unsigned int flags)
{
struct waitqueue_head *wqh;
struct waitqueue *wq;
@@ -158,7 +160,8 @@ int wake_up_task(struct ktcb *task, int sync)
wqh->sleepers--;
task->waiting_on = 0;
task->wq = 0;
task->flags |= TASK_INTERRUPTED;
if (flags & WAKEUP_INTERRUPT)
task->flags |= TASK_INTERRUPTED;
spin_unlock(&wqh->slock);
spin_unlock(&task->waitlock);
@@ -167,7 +170,7 @@ int wake_up_task(struct ktcb *task, int sync)
* safely resume it without locks as this is the only
* code path that can resume the task.
*/
if (sync)
if (flags & WAKEUP_SYNC)
sched_resume_sync(task);
else
sched_resume_async(task);
@@ -175,5 +178,3 @@ int wake_up_task(struct ktcb *task, int sync)
return 0;
}

View File

@@ -5,20 +5,22 @@
#include <unistd.h>
#include <l4/macros.h>
static inline void l4_exit(int status)
static inline void __attribute__ ((noreturn)) l4_exit(int status)
{
int err;
int ret;
write_mr(L4SYS_ARG0, status);
/* Call pager with exit() request. */
err = l4_send(PAGER_TID, L4_IPC_TAG_EXIT);
printf("%s: L4 IPC Error: %d.\n", __FUNCTION__, err);
/* Call pager with exit() request and block on its receive phase */
ret = l4_sendrecv(PAGER_TID, PAGER_TID, L4_IPC_TAG_EXIT);
/* This call should not fail or return */
printf("%s: L4 IPC returned: %d.\n", __FUNCTION__, ret);
BUG();
}
void exit(int status)
void __attribute__ ((noreturn)) _exit(int status)
{
l4_exit(status);
}

View File

@@ -123,6 +123,11 @@ void handle_requests(void)
ret = sys_fork(sender);
break;
}
case L4_IPC_TAG_EXIT: {
/* An exiting task has no receive phase */
sys_exit(sender, (int)mr[0]);
return;
}
case L4_IPC_TAG_BRK: {
// ret = sys_brk(sender, (void *)mr[0]);
// break;

View File

@@ -365,7 +365,20 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
/* This version is used when exiting. */
int vma_drop_merge_delete_all(struct vm_area *vma)
{
struct vm_obj_link *vmo_link;
struct vm_obj_link *vmo_link, *n;
/* Vma cannot be empty */
BUG_ON(list_empty(&vma->vm_obj_list));
/* Traverse and get rid of all links */
list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list)
vma_drop_merge_delete(vma, vmo_link);
return 0;
}
int vma_drop_merge_delete_all_old(struct vm_area *vma)
{
struct vm_obj_link *vmo_link, *n;
/* Get the first link on the vma */
BUG_ON(list_empty(&vma->vm_obj_list));
@@ -373,6 +386,9 @@ int vma_drop_merge_delete_all(struct vm_area *vma)
struct vm_obj_link, list);
/* Traverse and get rid of all links */
list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list) {
vma_drop_merge_delete(vma, vmo_link);
}
do {
vma_drop_merge_delete(vma, vmo_link);
@@ -382,6 +398,7 @@ int vma_drop_merge_delete_all(struct vm_area *vma)
return 0;
}
/* TODO:
* - Why not allocate a swap descriptor in vma_create_shadow() rather than
* a bare vm_object? It will be needed.

View File

@@ -106,19 +106,6 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
return shm->shm_addr;
}
/* TODO: Do we need this?
* MM0 never needs a task's utcb page. vfs needs it.
* UTCBs get special treatment here. If the task
* is attaching to its utcb, mm0 prefaults it so
* that it can access it later on whether or not
* the task makes a syscall to mm0 without first
* faulting the utcb.
*/
/*
if ((unsigned long)shmaddr == task->utcb_address)
utcb_prefault(task, VM_READ | VM_WRITE);
*/
void *sys_shmat(struct tcb *task, l4id_t shmid, void *shmaddr, int shmflg)
{
struct vm_file *shm_file, *n;

View File

@@ -1,9 +1,8 @@
/*
* Utcb address allocation for user tasks.
* utcb address allocation for user tasks.
*
* Copyright (C) 2008 Bahadir Balban
*/
#include <stdio.h>
#include <utcb.h>
#include <lib/addr.h>
@@ -71,44 +70,3 @@ void *task_send_utcb_address(struct tcb *sender, l4id_t taskid)
return 0;
}
#if 0
To be ditched
/*
* Triggered during a sys_shmat() by a client task when mapping its utcb.
* This prefaults the utcb and maps it in to mm0 so that it can freely
* access it anytime later.
*/
int utcb_prefault(struct tcb *task, unsigned int vmflags)
{
int err;
struct page *pg;
/* First map in the page to task with given flags, e.g. read/write */
if ((err = prefault_page(task, task->utcb_address, vmflags)) < 0) {
printf("%s: Failed: %d\n", __FUNCTION__, err);
return err;
}
/*
* Get the topmost page. Since we did both a VM_READ and VM_WRITE
* prefault, this gets a writeable instead of a read-only page.
*/
pg = task_virt_to_page(task, task->utcb_address);
if (!pg || IS_ERR(pg)) {
printf("%s: Cannot retrieve task %d's utcb page.\n",
__FUNCTION__, task->tid);
BUG();
}
/* Map it in to self */
l4_map((void *)page_to_phys(pg), (void *)task->utcb_address, 1,
MAP_USR_RW_FLAGS, self_tid());
/* Flag that says this task's utcb is mapped to mm0 as r/w */
task->utcb_mapped = 1;
return 0;
}
#endif

View File

@@ -32,7 +32,7 @@ int forktest(void)
printf("PID: %d, my global: %d\n", myid, global);
printf("-- PASSED --\n");
out:
while(1)
;
printf("PID: %d exiting...\n", myid);
_exit(0);
}