Towards finishing exchange_registers()

- Added mutex_trylock()
- Implemented most of exchange_registers()
- thread_control() now needs a lock for operations that can modify thread context.
- thread_start() does not initialise scheduler flags, now done in thread_create.

TODO:
- Fork/clone'ed threads should retain their context in tcb, not syscall stack.
- exchange_registers() calls in userspace need cleaning up.
This commit is contained in:
Bahadir Balban
2008-09-13 18:07:00 +03:00
parent 0b3ab05a98
commit 4fb5277123
23 changed files with 460 additions and 98 deletions

View File

@@ -144,6 +144,9 @@
#define EOWNERDEAD 130 /* Owner died */
#define ENOTRECOVERABLE 131 /* State not recoverable */
/* Codezero specific error codes */
#define EACTIVE 132 /* Task active */
#ifdef __KERNEL__
/* Should never be seen by user programs */

22
include/l4/api/exregs.h Normal file
View File

@@ -0,0 +1,22 @@
/*
* Exchange registers system call data.
*
* Copyright (C) 2008 Bahadir Balban
*/
#ifndef __EXREGS_H__
#define __EXREGS_H__
#include <l4/macros.h>
#include INC_GLUE(syscall.h)
#include INC_GLUE(context.h)
#include <l4/types.h>
/* Structure passed by userspace pagers for exchanging registers */
struct exregs_data {
exregs_context_t context;
u32 valid_vect;
};
#endif /* __EXREGS_H__ */

View File

@@ -1,16 +1,18 @@
#ifndef __THREAD_H__
#define __THREAD_H__
#define THREAD_FLAGS_MASK 0x00F0
/* Create new thread, copy given space */
#define THREAD_CREATE_COPYSPC 0x0010
#define THREAD_CREATE_MASK 0x00F0
/* Create new thread and new space */
#define THREAD_CREATE_NEWSPC 0x0020
#define THREAD_NEW_SPACE 0x0010
/* Create new thread, copy given space */
#define THREAD_COPY_SPACE 0x0020
/* Create new thread, use given space */
#define THREAD_CREATE_SAMESPC 0x0030
#define THREAD_SAME_SPACE 0x0030
#define THREAD_ACTION_MASK 0x000F

View File

@@ -75,6 +75,17 @@ static inline void irq_local_disable()
/* This is filled on entry to irq handler, only if a process was interrupted.*/
extern unsigned int preempted_psr;
#include <l4/generic/tcb.h>
static inline int task_in_kernel(struct tcb *t)
{
return ((t->context.spsr & ARM_MODE_MASK) == ARM_MODE_SVC) ? 1 : 0;
}
static inline int task_in_user(struct tcb *t)
{
return !task_in_kernel(t);
}
static inline int in_kernel()
{
return (((preempted_psr & ARM_MODE_MASK) == ARM_MODE_SVC)) ? 1 : 0;

View File

@@ -34,10 +34,6 @@ static inline struct ktcb *current_task(void)
#define SCHED_FL_MASK (SCHED_FL_SLEEP | SCHED_FL_RESUME \
| SCHED_FL_SUSPEND)
#define __IPC_FL_WAIT 4
#define IPC_FL_WAIT (1 << __IPC_FL_WAIT)
#define IPC_FL_MASK IPC_FL_WAIT
void sched_runqueue_init(void);
void sched_start_task(struct ktcb *task);
void sched_resume_task(struct ktcb *task);

View File

@@ -13,6 +13,7 @@
#include INC_GLUE(memory.h)
#include INC_GLUE(syscall.h)
#include INC_GLUE(message.h)
#include INC_GLUE(context.h)
#include INC_SUBARCH(mm.h)
enum task_state {
@@ -21,32 +22,6 @@ enum task_state {
TASK_RUNNABLE = 2,
};
/*
* This describes the user space register context of each task. Simply set them
* as regular structure fields, and they'll be copied onto real registers upon
* a context switch. In the ARM case, they're copied from memory to userspace
* registers using the LDM instruction with ^, no-pc flavor. See ARMARM.
*/
typedef struct arm_context {
u32 spsr; /* 0x0 */
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) task_context_t;
#define TASK_ID_INVALID -1
struct task_ids {
l4id_t tid;
@@ -75,7 +50,9 @@ struct ktcb {
/* Flags to hint scheduler on future task state */
unsigned int schedfl;
unsigned int flags;
/* Lock for blocking thread state modifications via a syscall */
struct mutex thread_control_lock;
/* Other related threads */
l4id_t pagerid;

View File

@@ -0,0 +1,53 @@
#ifndef __ARM_CONTEXT_H__
#define __ARM_CONTEXT_H__
#include <l4/types.h>
/*
* This describes the register context of each task. Simply set
* them as regular structure fields, and they'll be copied onto
* real registers upon a context switch to that task. Normally
* exchange_registers() system call is designed for this, whose
* input structure is defined further below.
*/
typedef struct arm_context {
u32 spsr; /* 0x0 */
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) task_context_t;
typedef struct arm_exregs_context {
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) exregs_context_t;
#endif /* __ARM_CONTEXT_H__ */

View File

@@ -68,4 +68,7 @@
#define KERN_ADDR(x) ((x >= KERNEL_AREA_START) && (x < KERNEL_AREA_END))
#define USER_ADDR(x) ((x >= USER_AREA_START) && (x < USER_AREA_END))
#define PRIVILEGED_ADDR(x) (KERN_ADDR(x) || (x >= ARM_HIGH_VECTOR) || \
(x >= IO_AREA_START && x < IO_AREA_END))
#endif /* __MEMLAYOUT_H__ */

View File

@@ -2,12 +2,13 @@
* ARM-specific system call details.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#ifndef __ARM_GLUE_SYSCALL_H__
#define __ARM_GLUE_SYSCALL_H__
#include <l4/types.h>
/* Only specific call is the trap that gives back the kip address
* from which other system calls can be discovered. */
#define L4_TRAP_KIP 0xB4
@@ -52,7 +53,8 @@ typedef struct msg_regs {
/* NOTE:
* These references are valid only when they have been explicitly set
* by a kernel entry point, e.g. a system call, a data abort handler.
* by a kernel entry point, e.g. a system call, a data abort handler
* that imitates a page fault ipc etc.
*/
#define KTCB_REF_ARG0(ktcb) (&(ktcb)->syscall_regs->r0)
#define KTCB_REF_MR0(ktcb) (&(ktcb)->syscall_regs->r3)

View File

@@ -28,6 +28,7 @@ static inline void mutex_init(struct mutex *mutex)
INIT_LIST_HEAD(&mutex->wq.task_list);
}
int mutex_trylock(struct mutex *mutex);
void mutex_lock(struct mutex *mutex);
void mutex_unlock(struct mutex *mutex);

View File

@@ -20,7 +20,7 @@ static inline void spin_lock_init(struct spinlock *s)
*/
static inline void spin_lock(struct spinlock *s)
{
preempt_disable();
preempt_disable(); /* This must disable local preempt */
#if defined(CONFIG_SMP)
__spin_lock(&s->lock);
#endif

View File

@@ -75,6 +75,9 @@
#define printk printf
#endif
/* Converts an int-sized field offset in a struct into a bit offset in a word */
#define FIELD_TO_BIT(type, field) (1 << (offsetof(type, field) >> 2))
/* Functions who may either return a pointer or an error code can use these: */
#define PTR_ERR(x) ((void *)(x))
/* checks up to -1000, the rest might be valid pointers!!! E.g. 0xE0000000 */

View File

@@ -14,6 +14,7 @@
#include <l4/api/ipc.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include <l4/api/exregs.h>
#include INC_API(syscall.h)
#include INC_ARCH(exception.h)
@@ -27,28 +28,236 @@ void print_syscall_context(struct ktcb *t)
r->r5, r->r6, r->r7, r->r8, r->sp_usr, r->lr_usr);
}
int sys_exchange_registers(syscall_context_t *regs)
/*
* Bigger, slower but typed, i.e. if task_context_t or syscall_context_t
* fields are reordered in the future, this would not break.
*/
void do_exchange_registers_bigslow(struct tcb *task, struct exregs_data *exregs)
{
struct ktcb *task;
unsigned int pc = regs->r0;
unsigned int sp = regs->r1;
unsigned int pagerid = regs->r2;
l4id_t tid = regs->r3;
unsigned int create_flags = task->flags;
task_context_t *context = &task->context;
syscall_context_t *sysregs = task->syscall_regs;
/* Find tcb from its hash list */
if ((task = find_task(tid)))
goto found;
/* FIXME: Whatif not found??? Recover gracefully. */
BUG();
/*
* NOTE:
* We don't care if register values point at invalid addresses
* since memory protection would prevent any kernel corruption.
* We do however, make sure spsr is not modified, and pc is
* modified only for the userspace.
*/
/*
* If the thread is returning from a syscall,
* we modify the register state pushed to syscall stack.
*/
if ((create_flags == THREAD_COPY_SPACE) ||
(create_flags == THREAD_SAME_SPACE)) {
/* Check register valid bit and copy registers */
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r0))
syscall_regs->r0 = exregs->context.r0;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r1))
syscall_regs->r1 = exregs->context.r1;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r2))
syscall_regs->r2 = exregs->context.r2;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r3))
syscall_regs->r3 = exregs->context.r3;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r4))
syscall_regs->r4 = exregs->context.r4;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r5))
syscall_regs->r5 = exregs->context.r5;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r6))
syscall_regs->r6 = exregs->context.r6;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r7))
syscall_regs->r7 = exregs->context.r7;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r8))
syscall_regs->r8 = exregs->context.r8;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r9))
syscall_regs->r9 = exregs->context.r9;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r10))
syscall_regs->r10 = exregs->context.r10;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r11))
syscall_regs->r11 = exregs->context.r11;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, r12))
syscall_regs->r12 = exregs->context.r12;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, sp_usr))
syscall_regs->sp_usr = exregs->context.sp;
if (exregs.valid_vect & FIELD_TO_BIT(syscall_regs_t, sp_lr))
syscall_regs->sp_lr = exregs->context.lr;
/* Cannot modify program counter of a thread in kernel */
/* If it's a new thread or it's in userspace, modify actual context */
} else if ((create_flags == THREAD_NEW_SPACE) ||
(!create_flags && task_in_user(task))) {
/* Copy registers */
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r0))
context->r0 = exregs->context.r0;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r1))
context->r1 = exregs->context.r1;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r2))
context->r2 = exregs->context.r2;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r3))
context->r3 = exregs->context.r3;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r4))
context->r4 = exregs->context.r4;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r5))
context->r5 = exregs->context.r5;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r6))
context->r6 = exregs->context.r6;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r7))
context->r7 = exregs->context.r7;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r8))
context->r8 = exregs->context.r8;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r9))
context->r9 = exregs->context.r9;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r10))
context->r10 = exregs->context.r10;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r11))
context->r11 = exregs->context.r11;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, r12))
context->r12 = exregs->context.r12;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, sp))
context->sp = exregs->context.sp;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, lr))
context->lr = exregs->context.lr;
if (exregs.valid_vect & FIELD_TO_BIT(task_context_t, pc))
context->pc = exregs->context.pc;
/* Set spsr as user mode if thread is new */
if (create_flags == THREAD_NEW_SPACE)
task->context.spsr = ARM_MODE_USR;
} else
BUG();
}
/*
* This is smaller and faster but would break if task_context_t or
* syscall_regs_t types change, i.e. if their fields are reordered.
*/
void do_exchange_registers(struct tcb *task, struct exregs_data *exregs)
{
unsigned int create_flags = task->flags;
u32 *context_ptr, *exregs_ptr = (u32 *)&exregs.context;
/*
* NOTE:
* We don't care if register values point at invalid addresses
* since memory protection would prevent any kernel corruption.
*/
/*
* If the thread is returning from a syscall,
* we modify the register state pushed to syscall stack.
*/
if ((create_flags == THREAD_COPY_SPACE) ||
(create_flags == THREAD_SAME_SPACE)) {
context_ptr = (u32 *)&task->syscall_regs->r0;
} else if (create_flags == THREAD_NEW_SPACE) {
context_ptr = (u32 *)&task->context.r0;
task->context.spsr = ARM_MODE_USR;
} else
BUG();
/* Traverse the validity bit vector and copy exregs to task context */
for (int i = 0; i < (sizeof(exregs->context) / sizeof(u32)); i++) {
if (exregs.valid_vect & (1 << i)) {
/* NOTE: If structures change, this may break. */
context_ptr[i] = exregs_ptr[i];
}
}
if (create_flags == THREAD_NEW_SPACE)
found:
/* Set its registers */
task->context.pc = pc;
task->context.sp = sp;
task->context.spsr = ARM_MODE_USR;
/* Set its pager */
task->pagerid = pagerid;
}
/*
* exchange_registers()
*
* This call is used by the pagers to set (and in the future read)
* the register context of a thread. The thread's registers can be
* set in 2 thread states:
*
* 1) The thread is executing in userspace:
* i. A newly created thread with a new address space.
* ii. An existing thread that is in userspace.
*
* 2) The thread is executing in the kernel, but suspended when it
* is about to execute "return_from_syscall":
* i. A thread that is just created in an existing address space.
* ii. A thread that is just created copying an existing address space.
*
* These conditions are detected and accordingly the task context is
* modified. A thread executing in the kernel cannot be modified
* since this would compromise the kernel. Also the thread must be
* in suspended condition so that it does not start to execute as we
* modify its context.
*
* TODO: This is an arch-specific call, can move it to ARM
*
*/
int sys_exchange_registers(syscall_context_t *regs)
{
struct ktcb *task;
struct exregs_data *exregs = regs->r0;
unsigned int pagerid = regs->r1;
l4id_t tid = regs->r2;
unsigned int create_flags = task->flags & TASK_CREATE_FLAGS;
int err;
/* Find tcb from its list */
if (!(task = find_task(tid)))
return -ESRCH;
/*
* This lock ensures task is not
* inadvertently resumed by a syscall
*/
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
/* Now check that the task is suspended */
if (task->state != TASK_INACTIVE) {
mutex_unlock(&task->thread_control_lock);
return -EACTIVE;
}
/*
* Check that it is legitimate to modify
* the task registers state
*/
if (!create_flags) {
/*
* Task is not new. We only allow such tasks
* to be modified in userspace.
*/
if (!task_in_user(task))
return -EPERM;
} else { /* TODO: Simplify it here. */
/* New threads with new address space */
if (create_flags == THREAD_NEW_SPACE)
do_exchange_registers_bigslow(task, exregs);
else if ((create_flags == THREAD_COPY_SPACE) ||
(create_flags == THREAD_SAME_SPACE)) {
/*
* Further check that the task is in
* the kernel but about to exit.
*/
if (task->context.pc != &return_from_syscall ||
!task_in_kernel(task)) {
/* Actually its a bug if not true */
BUG();
return -EPERM;
}
do_exchange_registers_bigslow(task, exregs);
}
}
/* Set its pager if one is supplied */
if (pagerid != THREAD_ID_INVALID)
task->pagerid = pagerid;
return 0;
}

View File

@@ -4,12 +4,12 @@
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include INC_API(syscall.h)
#include <l4/api/thread.h>
#include <l4/api/syscall.h>
#include <l4/api/errno.h>
#include <l4/generic/tcb.h>
#include <l4/lib/idpool.h>
#include <l4/lib/mutex.h>
#include <l4/generic/pgalloc.h>
#include INC_ARCH(asm.h)
#include INC_SUBARCH(mm.h)
@@ -24,42 +24,58 @@ int thread_suspend(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_suspend_task(task);
return 0;
}
if (!(task = find_task(ids->tid)))
return -ESRCH;
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
return -EINVAL;
/*
* The thread_control_lock is protecting from
* indirect modification of thread context, this
* does not cause any such operation so we don't
* need to acquire that lock here.
*/
sched_suspend_task(task);
return 0;
}
int thread_resume(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_resume_task(task);
return 0;
}
if (!(task = find_task(ids->tid)))
return -ESRCH;
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
return -EINVAL;
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
/* Notify scheduler of task resume */
sched_notify_resume(task);
/* Release lock and return */
mutex_unlock(&task->thread_control_lock);
return 0;
}
int thread_start(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_start_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
BUG();
return -EINVAL;
if (!(task = find_task(ids->tid)))
return -ESRCH;
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
/* Clear creation flags if thread is new */
if (task->flags & THREAD_CREATE_FLAGS)
task->flags &= ~THREAD_CREATE_FLAGS;
/* Notify scheduler of task resume */
sched_notify_resume(task);
/* Release lock and return */
mutex_unlock(&task->thread_control_lock);
return 0;
}
@@ -139,8 +155,8 @@ int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
* If thread space is new or copied,
* allocate a new space id and tgid
*/
if (flags == THREAD_CREATE_NEWSPC ||
flags == THREAD_CREATE_COPYSPC) {
if (flags == THREAD_NEW_SPACE ||
flags == THREAD_COPY_SPACE) {
/*
* Allocate requested id if
* it's available, else a new one
@@ -156,7 +172,7 @@ int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
}
/* If thread space is the same, tgid is either new or existing one */
if (flags == THREAD_CREATE_SAMESPC) {
if (flags == THREAD_SAME_SPACE) {
/* Check if same tgid is expected */
if (ids->tgid != orig->tgid) {
if ((ids->tgid = id_get(tgroup_id_pool,
@@ -181,9 +197,9 @@ int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
int thread_create(struct task_ids *ids, unsigned int flags)
{
struct ktcb *task = 0, *new = (struct ktcb *)zalloc_page();
flags &= THREAD_FLAGS_MASK;
flags &= THREAD_CREATE_MASK;
if (flags == THREAD_CREATE_NEWSPC) {
if (flags == THREAD_NEW_SPACE) {
/* Allocate new pgd and copy all kernel areas */
new->pgd = alloc_pgd();
copy_pgd_kern_all(new->pgd);
@@ -192,7 +208,7 @@ int thread_create(struct task_ids *ids, unsigned int flags)
list_for_each_entry(task, &global_task_list, task_list) {
/* Space ids match, can use existing space */
if (task->spid == ids->spid) {
if (flags == THREAD_CREATE_SAMESPC)
if (flags == THREAD_SAME_SPACE)
new->pgd = task->pgd;
else
new->pgd = copy_page_tables(task->pgd);
@@ -207,8 +223,8 @@ out:
/* Set up new thread's tid, spid, tgid according to flags */
thread_setup_new_ids(ids, flags, new, task);
/* Set task state. */
new->state = TASK_INACTIVE;
/* Initialise task's scheduling state and parameters. */
sched_init_task(new);
/* Initialise ipc waitqueues */
waitqueue_head_init(&new->wqh_send);
@@ -219,10 +235,16 @@ out:
* system call return environment so that it can safely
* return as a copy of its original thread.
*/
if (flags == THREAD_CREATE_COPYSPC ||
flags == THREAD_CREATE_SAMESPC)
if (flags == THREAD_COPY_SPACE ||
flags == THREAD_SAME_SPACE)
arch_setup_new_thread(new, task);
/*
* Set thread's creation flags. They will clear
* when the thread is run for the first time
*/
new->flags = THREAD_CREATE_MASK & flags;
/* Add task to global hlist of tasks */
add_task_global(new);
@@ -253,7 +275,7 @@ int sys_thread_control(syscall_context_t *regs)
case THREAD_RESUME:
ret = thread_resume(ids);
break;
/* TODO: THREAD_DESTROY! */
/* TODO: Add THREAD_DESTROY! */
default:
ret = -EINVAL;
}

View File

@@ -226,7 +226,7 @@ void sched_notify_resume(struct ktcb *task)
/* NOTE: Might as well just set need_resched instead of full yield.
* This would work on irq context as well. */
/* Same as resume, but also yields. */
void sched_resume_task(struct ktcb *task)
int sched_resume_task(struct ktcb *task)
{
sched_notify_resume(task);
sched_yield();

View File

@@ -93,6 +93,18 @@ void sem_down(struct mutex *mutex)
}
}
/* Non-blocking attempt to lock mutex */
int mutex_trylock(struct mutex *mutex)
{
int success;
spin_lock(&mutex->slock);
success = __mutex_lock(&mutex->lock);
spin_unlock(&mutex->slock);
return success;
}
void mutex_lock(struct mutex *mutex)
{
/* NOTE:

View File

@@ -59,10 +59,10 @@ typedef int (*__l4_ipc_control_t)(unsigned int action, l4id_t blocked_sender,
extern __l4_ipc_control_t __l4_ipc_control;
int l4_ipc_control(unsigned int, l4id_t blocked_sender, u32 blocked_tag);
typedef int (*__l4_exchange_registers_t)(unsigned int pc, unsigned int sp,
typedef int (*__l4_exchange_registers_t)(void *exregs_struct,
l4id_t pager, l4id_t tid);
extern __l4_exchange_registers_t __l4_exchange_registers;
int l4_exchange_registers(unsigned int pc, unsigned int sp, int pager, l4id_t tid);
int l4_exchange_registers(void *exregs_struct, l4id_t pager, l4id_t tid);
typedef int (*__l4_kmem_control_t)(unsigned long pfn, int npages, int grant);
extern __l4_kmem_control_t __l4_kmem_control;

View File

@@ -186,7 +186,7 @@ END_PROC(l4_space_control)
/*
* Sets registers of a thread and its pager.
* @r0 = pc to set, @r1 = sp to set @r2 = pager id, @r3 = tid of thread.
* @r0 = ptr to exchange_registers structure, @r1 = pager id, @r2 = tid of thread.
*/
BEGIN_PROC(l4_exchange_registers)
stmfd sp!, {lr}

View File

@@ -0,0 +1,22 @@
#ifndef __MM0_EXREGS_H__
#define __MM0_EXREGS_H__
#include <l4/api/exregs.h>
void exregs_set_stack(struct exregs_data *s, unsigned long sp);
void exregs_set_mr_return(struct exregs_data *s, unsigned long retreg);
void exregs_set_pc(struct exregs_data *s, unsigned long pc);
/*
exregs_set_stack(unsigned long sp)
exregs_set_pc(unsigned long pc)
exregs_set_return(unsigned long retreg)
exregs_set_arg0(unsigned long arg0)
exregs_set_mr0(unsigned long mr0)
exregs_set_mr_sender(unsigned long sender)
exregs_set_mr_return(unsigned long retreg)
exregs_set_all(unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3,
unsigned long sp, unsigned long pc, u32 valid_vector, l4id_t pager);
*/
#endif /* __MM0_EXREGS_H__ */

View File

@@ -168,7 +168,7 @@ int self_spawn(void)
ids.tgid = self->tgid;
/* Create a new L4 thread in current thread's address space. */
self_child = task_create(self, &ids, THREAD_CREATE_SAMESPC,
self_child = task_create(self, &ids, THREAD_SAME_SPACE,
TCB_SHARED_VM | TCB_SHARED_FILES);
if (IS_ERR(self_child = tcb_alloc_init(TCB_SHARED_VM

View File

@@ -0,0 +1,21 @@
/*
* Generic to arch-specific interface for
* exchange_registers()
*
* Copyright (C) 2008 Bahadir Balban
*/
#include <exregs.h>
void exregs_set_stack(struct exregs_data *s, unsigned long sp)
{
s->context.sp = sp;
s->valid_vect |= 1 << (offsetof(task_context_t, sp) >> 2);
}
void exregs_set_pc(struct exregs_data *s, unsigned long pc)
{
s->context.pc = pc;
s->valid_vect |= 1 << (offsetof(task_context_t, pc) >> 2);
}

View File

@@ -64,7 +64,7 @@ int do_fork(struct tcb *parent)
* Create a new L4 thread with parent's page tables
* kernel stack and kernel-side tcb copied
*/
if (IS_ERR(child = task_create(parent, &ids, THREAD_CREATE_COPYSPC,
if (IS_ERR(child = task_create(parent, &ids, THREAD_COPY_SPACE,
TCB_NO_SHARING))) {
l4_ipc_return((int)child);
return 0;
@@ -121,7 +121,6 @@ int sys_clone(l4id_t sender, void *child_stack, unsigned int flags)
struct task_ids ids;
struct vm_file *utcb_shm;
struct tcb *parent, *child;
unsigned long stack, stack_size;
BUG_ON(!(parent = find_task(sender)));
@@ -129,7 +128,7 @@ int sys_clone(l4id_t sender, void *child_stack, unsigned int flags)
ids.spid = parent->spid;
ids.tgid = parent->tgid;
if (IS_ERR(child = task_create(parent, &ids, THREAD_CREATE_SAMESPC,
if (IS_ERR(child = task_create(parent, &ids, THREAD_SAME_SPACE,
TCB_SHARED_VM | TCB_SHARED_FILES))) {
l4_ipc_return((int)child);
return 0;

View File

@@ -26,6 +26,7 @@
#include <task.h>
#include <shm.h>
#include <mmap.h>
#include <exregs.h>
struct tcb_head {
struct list_head list;
@@ -303,6 +304,7 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
unsigned int sp, l4id_t pager)
{
int err;
struct exregs_data regs;
/* Set up task's registers to default. */
if (!sp)
@@ -313,7 +315,9 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
pager = self_tid();
/* Set up the task's thread details, (pc, sp, pager etc.) */
if ((err = l4_exchange_registers(pc, sp, pager, task->tid) < 0)) {
exregs_set_stack(&regs, sp);
exregs_set_pc(&regs, pc);
if ((err = l4_exchange_registers(&regs, pager, task->tid) < 0)) {
printf("l4_exchange_registers failed with %d.\n", err);
return err;
}
@@ -367,7 +371,7 @@ int task_exec(struct vm_file *f, unsigned long task_region_start,
struct tcb *task;
int err;
if (IS_ERR(task = task_create(0, ids, THREAD_CREATE_NEWSPC,
if (IS_ERR(task = task_create(0, ids, THREAD_NEW_SPACE,
TCB_NO_SHARING)))
return (int)task;