diff --git a/include/l4/generic/scheduler.h b/include/l4/generic/scheduler.h index 539b63c..9298e74 100644 --- a/include/l4/generic/scheduler.h +++ b/include/l4/generic/scheduler.h @@ -36,8 +36,11 @@ static inline struct ktcb *current_task(void) #define current current_task() #define need_resched (current->ts_need_resched) + void sched_init_task(struct ktcb *task, int priority); void sched_prepare_sleep(void); +void sched_suspend_sync(void); +void sched_suspend_async(void); void sched_resume_sync(struct ktcb *task); void sched_resume_async(struct ktcb *task); void scheduler_start(void); diff --git a/include/l4/generic/tcb.h b/include/l4/generic/tcb.h index 87d4d80..b33a25d 100644 --- a/include/l4/generic/tcb.h +++ b/include/l4/generic/tcb.h @@ -148,5 +148,7 @@ extern struct id_pool *thread_id_pool; extern struct id_pool *space_id_pool; extern struct id_pool *tgroup_id_pool; +void task_process_pending_flags(void); + #endif /* __TCB_H__ */ diff --git a/include/l4/lib/wait.h b/include/l4/lib/wait.h index aa25448..a8739e9 100644 --- a/include/l4/lib/wait.h +++ b/include/l4/lib/wait.h @@ -10,6 +10,11 @@ struct waitqueue { struct ktcb *task; }; +enum wakeup_flags { + WAKEUP_INTERRUPT = (1 << 0), + WAKEUP_SYNC = (1 << 1) +}; + #define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \ struct waitqueue wq = { \ .task_list = { &wq.task_list, &wq.task_list }, \ @@ -65,9 +70,9 @@ do { \ } \ } while(0); -void wake_up(struct waitqueue_head *wqh, int sync); -int wake_up_task(struct ktcb *task, int sync); -void wake_up_all(struct waitqueue_head *wqh, int sync); +void wake_up(struct waitqueue_head *wqh, unsigned int flags); +int wake_up_task(struct ktcb *task, unsigned int flags); +void wake_up_all(struct waitqueue_head *wqh, unsigned int flags); #endif /* __LIB_WAIT_H__ */ diff --git a/src/api/thread.c b/src/api/thread.c index cfe2da8..aa4c0b7 100644 --- a/src/api/thread.c +++ b/src/api/thread.c @@ -28,7 +28,7 @@ int sys_thread_switch(syscall_context_t *regs) int thread_suspend(struct task_ids *ids) { struct ktcb *task; - int ret; + int ret = 0; if (!(task = find_task(ids->tid))) return -ESRCH; @@ -36,17 +36,13 @@ int thread_suspend(struct task_ids *ids) if (task->state == TASK_INACTIVE) return 0; - /* First show our intention to suspend thread */ + /* Signify we want to suspend the thread */ task->flags |= TASK_SUSPENDING; - /* - * Interrupt the task in case it was sleeping - * so that it will be caught and suspended by - * the scheduler. - */ - wake_up_task(task, 1); + /* Wake it up if it's sleeping */ + wake_up_task(task, WAKEUP_INTERRUPT | WAKEUP_SYNC); - /* Wait until scheduler wakes us up */ + /* Wait until task suspends itself */ WAIT_EVENT(&task->wqh_pager, task->state == TASK_INACTIVE, ret); diff --git a/src/generic/irq.c b/src/generic/irq.c index 9cba89d..ab56200 100644 --- a/src/generic/irq.c +++ b/src/generic/irq.c @@ -7,10 +7,11 @@ */ #include #include +#include #include +#include #include #include -#include #include #include INC_PLAT(irq.h) #include INC_ARCH(exception.h) @@ -73,5 +74,11 @@ void do_irq(void) printk("Spurious or broken irq\n"); BUG(); } irq_enable(irq_index); + + /* Process any pending flags for currently runnable task */ + task_process_pending_flags(); } + + + diff --git a/src/generic/scheduler.c b/src/generic/scheduler.c index dc78fb6..ebc0a15 100644 --- a/src/generic/scheduler.c +++ b/src/generic/scheduler.c @@ -228,6 +228,44 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total) SCHED_TICKS * task->priority / prio_total; } +/* + * NOTE: Could do these as sched_prepare_suspend() + * + schedule() or need_resched = 1 + */ +void sched_suspend_sync(void) +{ + preempt_disable(); + sched_rq_remove_task(current); + current->state = TASK_INACTIVE; + current->flags &= ~TASK_SUSPENDING; + prio_total -= current->priority; + BUG_ON(prio_total <= 0); + preempt_enable(); + + /* Async wake up any waiters */ + wake_up_task(find_task(current->pagerid), 0); + schedule(); +} + +void sched_suspend_async(void) +{ + preempt_disable(); + sched_rq_remove_task(current); + current->state = TASK_INACTIVE; + current->flags &= ~TASK_SUSPENDING; + prio_total -= current->priority; + BUG_ON(prio_total <= 0); + + /* This will make sure we yield soon */ + preempt_enable(); + + /* Async wake up any waiters */ + wake_up_task(find_task(current->pagerid), 0); + need_resched = 1; +} + + + /* * Tasks come here, either by setting need_resched (via next irq), * or by directly calling it (in process context). @@ -270,17 +308,6 @@ void schedule() /* Cannot have any irqs that schedule after this */ preempt_disable(); -#if 0 - /* NOTE: - * We could avoid unnecessary scheduling by detecting - * a task that has been just woken up. - */ - if ((task->flags & TASK_WOKEN_UP) && in_process_context()) { - preempt_enable(); - return 0; - } -#endif - /* Reset schedule flag */ need_resched = 0; @@ -293,43 +320,13 @@ void schedule() sched_rq_add_task(current, rq_expired, RQ_ADD_BEHIND); } - /* Check if there's a pending suspend for thread */ - if (current->flags & TASK_SUSPENDING) { - /* - * The task should have no locks and be in a runnable state. - * (e.g. properly woken up by the suspender) - */ - if (current->nlocks == 0 && - current->state == TASK_RUNNABLE) { - /* Suspend it if suitable */ - current->state = TASK_INACTIVE; - current->flags &= ~TASK_SUSPENDING; - - /* - * The task has been made inactive here. - * A suspended task affects timeslices whereas - * a sleeping task doesn't as it is believed - * sleepers would become runnable soon. - */ - prio_total -= current->priority; - BUG_ON(prio_total <= 0); - - /* Prepare to wake up any waiters */ - wake_up(¤t->wqh_pager, 0); - } else { - if (current->state == TASK_RUNNABLE) - sched_rq_remove_task(current); - - /* - * Top up task's ticks temporarily, and - * wait for it to release its locks. - */ - current->state = TASK_RUNNABLE; - current->ticks_left = max(current->ticks_left, - SCHED_GRANULARITY); - sched_rq_add_task(current, rq_runnable, RQ_ADD_FRONT); - } - } + /* + * If task is about to sleep and + * it has pending events, wake it up. + */ + if (current->flags & TASK_SUSPENDING && + current->state == TASK_SLEEPING) + wake_up_task(current, WAKEUP_INTERRUPT); /* Determine the next task to be run */ if (rq_runnable->total > 0) { diff --git a/src/generic/tcb.c b/src/generic/tcb.c index fa4d2b5..d94fa96 100644 --- a/src/generic/tcb.c +++ b/src/generic/tcb.c @@ -5,7 +5,10 @@ */ #include #include +#include +#include #include +#include INC_ARCH(exception.h) /* ID pools for threads and spaces. */ struct id_pool *thread_id_pool; @@ -20,6 +23,25 @@ unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched); unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs); +/* + * When there is an asynchronous pending event to be handled by + * the task (e.g. task is suspending), normally it is processed + * when the task is returning to user mode from the kernel. If + * the event is raised when the task is in userspace, this call + * in irq context makes sure it is handled. + */ +void task_process_pending_flags(void) +{ + if (TASK_IN_USER(current)) { + if (current->flags & TASK_SUSPENDING) { + if (in_irq_context()) + sched_suspend_async(); + else + sched_suspend_sync(); + } + } +} + #if 0 int task_suspend(struct ktcb *task) { diff --git a/src/glue/arm/systable.c b/src/glue/arm/systable.c index cdaefe7..9f78d82 100644 --- a/src/glue/arm/systable.c +++ b/src/glue/arm/systable.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include INC_GLUE(memlayout.h) #include INC_GLUE(syscall.h) @@ -60,13 +61,15 @@ void syscall_init() /* Checks a syscall is legitimate and dispatches to appropriate handler. */ int syscall(syscall_context_t *regs, unsigned long swi_addr) { + int ret = 0; + /* Check if genuine system call, coming from the syscall page */ if ((swi_addr & ARM_SYSCALL_PAGE) == ARM_SYSCALL_PAGE) { /* Check within syscall offset boundary */ if (((swi_addr & syscall_offset_mask) >= 0) && ((swi_addr & syscall_offset_mask) <= syscalls_end_offset)) { /* Quick jump, rather than compare each */ - return (*syscall_table[(swi_addr & 0xFF) >> 2])(regs); + ret = (*syscall_table[(swi_addr & 0xFF) >> 2])(regs); } else { printk("System call received from call @ 0x%lx." "Instruction: 0x%lx.\n", swi_addr, @@ -78,5 +81,12 @@ int syscall(syscall_context_t *regs, unsigned long swi_addr) "Discarding.\n", swi_addr); return -ENOSYS; } + + if (current->flags & TASK_SUSPENDING) { + BUG_ON(current->nlocks); + sched_suspend_sync(); + } + + return ret; } diff --git a/src/lib/wait.c b/src/lib/wait.c index e89964e..b9e73fb 100644 --- a/src/lib/wait.c +++ b/src/lib/wait.c @@ -59,7 +59,7 @@ int wait_on(struct waitqueue_head *wqh) } /* Wake up all */ -void wake_up_all(struct waitqueue_head *wqh, int sync) +void wake_up_all(struct waitqueue_head *wqh, unsigned int flags) { BUG_ON(wqh->sleepers < 0); spin_lock(&wqh->slock); @@ -72,11 +72,12 @@ void wake_up_all(struct waitqueue_head *wqh, int sync) BUG_ON(list_empty(&wqh->task_list)); list_del_init(&wq->task_list); wqh->sleepers--; - sleeper->flags |= TASK_INTERRUPTED; + if (flags & WAKEUP_INTERRUPT) + sleeper->flags |= TASK_INTERRUPTED; printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid); spin_unlock(&wqh->slock); - if (sync) + if (flags & WAKEUP_SYNC) sched_resume_sync(sleeper); else sched_resume_async(sleeper); @@ -85,7 +86,7 @@ void wake_up_all(struct waitqueue_head *wqh, int sync) } /* Wake up single waiter */ -void wake_up(struct waitqueue_head *wqh, int sync) +void wake_up(struct waitqueue_head *wqh, unsigned int flags) { BUG_ON(wqh->sleepers < 0); spin_lock(&wqh->slock); @@ -98,11 +99,12 @@ void wake_up(struct waitqueue_head *wqh, int sync) BUG_ON(list_empty(&wqh->task_list)); list_del_init(&wq->task_list); wqh->sleepers--; - sleeper->flags |= TASK_INTERRUPTED; + if (flags & WAKEUP_INTERRUPT) + sleeper->flags |= TASK_INTERRUPTED; printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid); spin_unlock(&wqh->slock); - if (sync) + if (flags & WAKEUP_SYNC) sched_resume_sync(sleeper); else sched_resume_async(sleeper); @@ -116,7 +118,7 @@ void wake_up(struct waitqueue_head *wqh, int sync) * as we were peeking on it, returns -1. @sync makes us immediately * yield or else leave it to scheduler's discretion. */ -int wake_up_task(struct ktcb *task, int sync) +int wake_up_task(struct ktcb *task, unsigned int flags) { struct waitqueue_head *wqh; struct waitqueue *wq; @@ -158,7 +160,8 @@ int wake_up_task(struct ktcb *task, int sync) wqh->sleepers--; task->waiting_on = 0; task->wq = 0; - task->flags |= TASK_INTERRUPTED; + if (flags & WAKEUP_INTERRUPT) + task->flags |= TASK_INTERRUPTED; spin_unlock(&wqh->slock); spin_unlock(&task->waitlock); @@ -167,7 +170,7 @@ int wake_up_task(struct ktcb *task, int sync) * safely resume it without locks as this is the only * code path that can resume the task. */ - if (sync) + if (flags & WAKEUP_SYNC) sched_resume_sync(task); else sched_resume_async(task); @@ -175,5 +178,3 @@ int wake_up_task(struct ktcb *task, int sync) return 0; } - - diff --git a/tasks/libposix/exit.c b/tasks/libposix/exit.c index 4914025..79083ad 100644 --- a/tasks/libposix/exit.c +++ b/tasks/libposix/exit.c @@ -5,20 +5,22 @@ #include #include -static inline void l4_exit(int status) +static inline void __attribute__ ((noreturn)) l4_exit(int status) { - int err; + int ret; write_mr(L4SYS_ARG0, status); - /* Call pager with exit() request. */ - err = l4_send(PAGER_TID, L4_IPC_TAG_EXIT); - printf("%s: L4 IPC Error: %d.\n", __FUNCTION__, err); + /* Call pager with exit() request and block on its receive phase */ + ret = l4_sendrecv(PAGER_TID, PAGER_TID, L4_IPC_TAG_EXIT); + /* This call should not fail or return */ + printf("%s: L4 IPC returned: %d.\n", __FUNCTION__, ret); BUG(); } -void exit(int status) +void __attribute__ ((noreturn)) _exit(int status) { l4_exit(status); } + diff --git a/tasks/mm0/main.c b/tasks/mm0/main.c index 29c93a4..790fba3 100644 --- a/tasks/mm0/main.c +++ b/tasks/mm0/main.c @@ -123,6 +123,11 @@ void handle_requests(void) ret = sys_fork(sender); break; } + case L4_IPC_TAG_EXIT: { + /* An exiting task has no receive phase */ + sys_exit(sender, (int)mr[0]); + return; + } case L4_IPC_TAG_BRK: { // ret = sys_brk(sender, (void *)mr[0]); // break; diff --git a/tasks/mm0/src/fault.c b/tasks/mm0/src/fault.c index 92bf132..c6ee09c 100644 --- a/tasks/mm0/src/fault.c +++ b/tasks/mm0/src/fault.c @@ -365,7 +365,20 @@ int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link) /* This version is used when exiting. */ int vma_drop_merge_delete_all(struct vm_area *vma) { - struct vm_obj_link *vmo_link; + struct vm_obj_link *vmo_link, *n; + + /* Vma cannot be empty */ + BUG_ON(list_empty(&vma->vm_obj_list)); + + /* Traverse and get rid of all links */ + list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list) + vma_drop_merge_delete(vma, vmo_link); + + return 0; +} +int vma_drop_merge_delete_all_old(struct vm_area *vma) +{ + struct vm_obj_link *vmo_link, *n; /* Get the first link on the vma */ BUG_ON(list_empty(&vma->vm_obj_list)); @@ -373,6 +386,9 @@ int vma_drop_merge_delete_all(struct vm_area *vma) struct vm_obj_link, list); /* Traverse and get rid of all links */ + list_for_each_entry_safe(vmo_link, n, &vma->vm_obj_list, list) { + vma_drop_merge_delete(vma, vmo_link); + } do { vma_drop_merge_delete(vma, vmo_link); @@ -382,6 +398,7 @@ int vma_drop_merge_delete_all(struct vm_area *vma) return 0; } + /* TODO: * - Why not allocate a swap descriptor in vma_create_shadow() rather than * a bare vm_object? It will be needed. diff --git a/tasks/mm0/src/shm.c b/tasks/mm0/src/shm.c index 71d7a0e..a3e34aa 100644 --- a/tasks/mm0/src/shm.c +++ b/tasks/mm0/src/shm.c @@ -106,19 +106,6 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg, return shm->shm_addr; } -/* TODO: Do we need this? - * MM0 never needs a task's utcb page. vfs needs it. - * UTCBs get special treatment here. If the task - * is attaching to its utcb, mm0 prefaults it so - * that it can access it later on whether or not - * the task makes a syscall to mm0 without first - * faulting the utcb. - */ -/* - if ((unsigned long)shmaddr == task->utcb_address) - utcb_prefault(task, VM_READ | VM_WRITE); -*/ - void *sys_shmat(struct tcb *task, l4id_t shmid, void *shmaddr, int shmflg) { struct vm_file *shm_file, *n; diff --git a/tasks/mm0/src/utcb.c b/tasks/mm0/src/utcb.c index d201e54..33d9ebe 100644 --- a/tasks/mm0/src/utcb.c +++ b/tasks/mm0/src/utcb.c @@ -1,9 +1,8 @@ /* - * Utcb address allocation for user tasks. + * utcb address allocation for user tasks. * * Copyright (C) 2008 Bahadir Balban */ - #include #include #include @@ -71,44 +70,3 @@ void *task_send_utcb_address(struct tcb *sender, l4id_t taskid) return 0; } -#if 0 - -To be ditched -/* - * Triggered during a sys_shmat() by a client task when mapping its utcb. - * This prefaults the utcb and maps it in to mm0 so that it can freely - * access it anytime later. - */ -int utcb_prefault(struct tcb *task, unsigned int vmflags) -{ - int err; - struct page *pg; - - /* First map in the page to task with given flags, e.g. read/write */ - if ((err = prefault_page(task, task->utcb_address, vmflags)) < 0) { - printf("%s: Failed: %d\n", __FUNCTION__, err); - return err; - } - - /* - * Get the topmost page. Since we did both a VM_READ and VM_WRITE - * prefault, this gets a writeable instead of a read-only page. - */ - pg = task_virt_to_page(task, task->utcb_address); - if (!pg || IS_ERR(pg)) { - printf("%s: Cannot retrieve task %d's utcb page.\n", - __FUNCTION__, task->tid); - BUG(); - } - - /* Map it in to self */ - l4_map((void *)page_to_phys(pg), (void *)task->utcb_address, 1, - MAP_USR_RW_FLAGS, self_tid()); - - /* Flag that says this task's utcb is mapped to mm0 as r/w */ - task->utcb_mapped = 1; - - return 0; -} - -#endif diff --git a/tasks/test0/src/forktest.c b/tasks/test0/src/forktest.c index 0edd217..6074150 100644 --- a/tasks/test0/src/forktest.c +++ b/tasks/test0/src/forktest.c @@ -32,7 +32,7 @@ int forktest(void) printf("PID: %d, my global: %d\n", myid, global); printf("-- PASSED --\n"); out: - while(1) - ; + printf("PID: %d exiting...\n", myid); + _exit(0); }