mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 10:53:16 +01:00
A new scheduler replaces the old one. - There are no sched_xxx_notify() calls that ask scheduler to change task state. - Tasks now have priorities and different timeslices. - One second interval is distributed among processes. - There are just runnable and expired queues. - SCHED_GRANULARITY determines a maximum running boundary for tasks. - Scheduler can now detect a safe point and suspend a task. Interruptible blocking is implemented. - Mutexes, waitqueues and ipc are modified to have an interruptible nature. - Sleep information is stored on the ktcb. (which waitqueue? etc.)
300 lines
8.1 KiB
C
300 lines
8.1 KiB
C
/*
|
|
* Thread related system calls.
|
|
*
|
|
* Copyright (C) 2007 Bahadir Balban
|
|
*/
|
|
#include <l4/generic/scheduler.h>
|
|
#include <l4/api/thread.h>
|
|
#include <l4/api/syscall.h>
|
|
#include <l4/api/errno.h>
|
|
#include <l4/generic/tcb.h>
|
|
#include <l4/lib/idpool.h>
|
|
#include <l4/lib/mutex.h>
|
|
#include <l4/generic/pgalloc.h>
|
|
#include INC_ARCH(asm.h)
|
|
#include INC_SUBARCH(mm.h)
|
|
|
|
int sys_thread_switch(syscall_context_t *regs)
|
|
{
|
|
schedule();
|
|
return 0;
|
|
}
|
|
|
|
int thread_suspend(struct task_ids *ids)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int thread_resume(struct task_ids *ids)
|
|
{
|
|
struct ktcb *task;
|
|
|
|
if (!(task = find_task(ids->tid)))
|
|
return -ESRCH;
|
|
|
|
if (!mutex_trylock(&task->thread_control_lock))
|
|
return -EAGAIN;
|
|
|
|
/* Put task into runqueue as runnable */
|
|
sched_resume_async(task);
|
|
|
|
/* Release lock and return */
|
|
mutex_unlock(&task->thread_control_lock);
|
|
return 0;
|
|
}
|
|
|
|
/* Runs a thread for the first time */
|
|
int thread_start(struct task_ids *ids)
|
|
{
|
|
struct ktcb *task;
|
|
|
|
if (!(task = find_task(ids->tid)))
|
|
return -ESRCH;
|
|
|
|
if (!mutex_trylock(&task->thread_control_lock))
|
|
return -EAGAIN;
|
|
|
|
/* Notify scheduler of task resume */
|
|
sched_resume_async(task);
|
|
|
|
/* Release lock and return */
|
|
mutex_unlock(&task->thread_control_lock);
|
|
return 0;
|
|
}
|
|
|
|
int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig, unsigned int flags)
|
|
{
|
|
/* New threads just need their mode set up */
|
|
if (flags == THREAD_NEW_SPACE) {
|
|
BUG_ON(orig);
|
|
new->context.spsr = ARM_MODE_USR;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* For duplicated threads pre-syscall context is saved on
|
|
* the kernel stack. We copy this context of original
|
|
* into the duplicate thread's current context structure
|
|
*
|
|
* We don't lock for context modification because the
|
|
* thread is not known to the system yet.
|
|
*/
|
|
new->context.spsr = orig->syscall_regs->spsr; /* User mode */
|
|
new->context.r0 = orig->syscall_regs->r0;
|
|
new->context.r1 = orig->syscall_regs->r1;
|
|
new->context.r2 = orig->syscall_regs->r2;
|
|
new->context.r3 = orig->syscall_regs->r3;
|
|
new->context.r4 = orig->syscall_regs->r4;
|
|
new->context.r5 = orig->syscall_regs->r5;
|
|
new->context.r6 = orig->syscall_regs->r6;
|
|
new->context.r7 = orig->syscall_regs->r7;
|
|
new->context.r8 = orig->syscall_regs->r8;
|
|
new->context.r9 = orig->syscall_regs->r9;
|
|
new->context.r10 = orig->syscall_regs->r10;
|
|
new->context.r11 = orig->syscall_regs->r11;
|
|
new->context.r12 = orig->syscall_regs->r12;
|
|
new->context.sp = orig->syscall_regs->sp_usr;
|
|
/* Skip lr_svc since it's not going to be used */
|
|
new->context.pc = orig->syscall_regs->lr_usr;
|
|
|
|
/* Copy other relevant fields from original ktcb */
|
|
new->pagerid = orig->pagerid;
|
|
|
|
/* Distribute original thread's ticks into two threads */
|
|
new->ticks_left = orig->ticks_left / 2;
|
|
orig->ticks_left /= 2;
|
|
|
|
return 0;
|
|
}
|
|
|
|
extern unsigned int return_from_syscall;
|
|
|
|
/*
|
|
* Copies the pre-syscall context of original thread into the kernel
|
|
* stack of new thread. Modifies new thread's context registers so that
|
|
* when it schedules it executes as if it is returning from a syscall,
|
|
* i.e. the syscall return path where the previous context copied to its
|
|
* stack is restored. It also modifies r0 to ensure POSIX child return
|
|
* semantics.
|
|
*/
|
|
int arch_setup_new_thread_orig(struct ktcb *new, struct ktcb *orig)
|
|
{
|
|
/*
|
|
* Pre-syscall context is saved on the kernel stack upon
|
|
* a system call exception. We need the location where it
|
|
* is saved relative to the start of ktcb.
|
|
*/
|
|
unsigned long syscall_context_offset =
|
|
((unsigned long)(orig->syscall_regs) - (unsigned long)orig);
|
|
|
|
/*
|
|
* Copy the saved context from original thread's
|
|
* stack to new thread stack.
|
|
*/
|
|
memcpy((void *)((unsigned long)new + syscall_context_offset),
|
|
(void *)((unsigned long)orig + syscall_context_offset),
|
|
sizeof(syscall_context_t));
|
|
|
|
/*
|
|
* Set new thread's syscall_regs offset since its
|
|
* normally set during syscall entry
|
|
*/
|
|
new->syscall_regs = (syscall_context_t *)
|
|
((unsigned long)new + syscall_context_offset);
|
|
|
|
/*
|
|
* Modify the return register value with 0 to ensure new thread
|
|
* returns with that value. This is a POSIX requirement and enforces
|
|
* policy on the microkernel, but it is currently the best solution.
|
|
*
|
|
* A cleaner but slower way would be the pager setting child registers
|
|
* via exchange_registers() and start the child thread afterwards.
|
|
*/
|
|
KTCB_REF_MR0(new)[MR_RETURN] = 0;
|
|
|
|
/*
|
|
* Set up the stack pointer, saved program status register and the
|
|
* program counter so that next time the new thread schedules, it
|
|
* executes the end part of the system call exception where the
|
|
* previous context is restored.
|
|
*/
|
|
new->context.sp = (unsigned long)new->syscall_regs;
|
|
new->context.pc = (unsigned long)&return_from_syscall;
|
|
new->context.spsr = (unsigned long)orig->context.spsr;
|
|
|
|
/* Copy other relevant fields from original ktcb */
|
|
new->pagerid = orig->pagerid;
|
|
|
|
/* Distribute original thread's ticks into two threads */
|
|
new->ticks_left = orig->ticks_left / 2;
|
|
orig->ticks_left /= 2;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
|
|
struct ktcb *new, struct ktcb *orig)
|
|
{
|
|
/* For tid, allocate requested id if it's available, else a new one */
|
|
if ((ids->tid = id_get(thread_id_pool, ids->tid)) < 0)
|
|
ids->tid = id_new(thread_id_pool);
|
|
|
|
/*
|
|
* If thread space is new or copied,
|
|
* allocate a new space id and tgid
|
|
*/
|
|
if (flags == THREAD_NEW_SPACE ||
|
|
flags == THREAD_COPY_SPACE) {
|
|
/*
|
|
* Allocate requested id if
|
|
* it's available, else a new one
|
|
*/
|
|
if ((ids->spid = id_get(space_id_pool,
|
|
ids->spid)) < 0)
|
|
ids->spid = id_new(space_id_pool);
|
|
|
|
/* It also gets a thread group id */
|
|
if ((ids->tgid = id_get(tgroup_id_pool,
|
|
ids->tgid)) < 0)
|
|
ids->tgid = id_new(tgroup_id_pool);
|
|
}
|
|
|
|
/* If thread space is the same, tgid is either new or existing one */
|
|
if (flags == THREAD_SAME_SPACE) {
|
|
/* Check if same tgid is expected */
|
|
if (ids->tgid != orig->tgid) {
|
|
if ((ids->tgid = id_get(tgroup_id_pool,
|
|
ids->tgid)) < 0)
|
|
ids->tgid = id_new(tgroup_id_pool);
|
|
}
|
|
}
|
|
|
|
/* Set all ids */
|
|
set_task_ids(new, ids);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Creates a thread, with a new thread id, and depending on the flags,
|
|
* either creates a new space, uses the same space as another thread,
|
|
* or creates a new space copying the space of another thread. These
|
|
* are respectively used when creating a brand new task, creating a
|
|
* new thread in an existing address space, or forking a task.
|
|
*/
|
|
int thread_create(struct task_ids *ids, unsigned int flags)
|
|
{
|
|
struct ktcb *task = 0, *new = (struct ktcb *)zalloc_page();
|
|
flags &= THREAD_CREATE_MASK;
|
|
|
|
if (flags == THREAD_NEW_SPACE) {
|
|
/* Allocate new pgd and copy all kernel areas */
|
|
new->pgd = alloc_pgd();
|
|
copy_pgd_kern_all(new->pgd);
|
|
} else {
|
|
/* Existing space will be used, find it from all tasks */
|
|
list_for_each_entry(task, &global_task_list, task_list) {
|
|
/* Space ids match, can use existing space */
|
|
if (task->spid == ids->spid) {
|
|
if (flags == THREAD_SAME_SPACE)
|
|
new->pgd = task->pgd;
|
|
else
|
|
new->pgd = copy_page_tables(task->pgd);
|
|
goto out;
|
|
}
|
|
}
|
|
printk("Could not find given space, is "
|
|
"SAMESPC/COPYSPC the right flag?\n");
|
|
BUG();
|
|
}
|
|
out:
|
|
/* Set up new thread's tid, spid, tgid according to flags */
|
|
thread_setup_new_ids(ids, flags, new, task);
|
|
|
|
/* Initialise task's scheduling state and parameters. */
|
|
sched_init_task(new, TASK_PRIO_NORMAL);
|
|
|
|
/* Initialise ipc waitqueues */
|
|
waitqueue_head_init(&new->wqh_send);
|
|
waitqueue_head_init(&new->wqh_recv);
|
|
|
|
arch_setup_new_thread(new, task, flags);
|
|
|
|
/* Add task to global hlist of tasks */
|
|
add_task_global(new);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Creates, destroys and modifies threads. Also implicitly creates an address
|
|
* space for a thread that doesn't already have one, or destroys it if the last
|
|
* thread that uses it is destroyed.
|
|
*/
|
|
int sys_thread_control(syscall_context_t *regs)
|
|
{
|
|
int ret = 0;
|
|
unsigned int flags = regs->r0;
|
|
struct task_ids *ids = (struct task_ids *)regs->r1;
|
|
|
|
switch (flags & THREAD_ACTION_MASK) {
|
|
case THREAD_CREATE:
|
|
ret = thread_create(ids, flags);
|
|
break;
|
|
case THREAD_RUN:
|
|
ret = thread_start(ids);
|
|
break;
|
|
case THREAD_SUSPEND:
|
|
ret = thread_suspend(ids);
|
|
break;
|
|
case THREAD_RESUME:
|
|
ret = thread_resume(ids);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|