Kernel updates since December 2009

This commit is contained in:
Bahadir Balban
2010-03-25 01:12:40 +02:00
parent 16818191b3
commit 74b5963fcb
487 changed files with 22477 additions and 3857 deletions

View File

@@ -3,7 +3,7 @@ Import('env')
Import('symbols')
# The set of source files associated with this SConscript file.
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'map.c', 'mutex.c', 'cap.c', 'exregs.c', 'irq.c']
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'map.c', 'mutex.c', 'cap.c', 'exregs.c', 'irq.c', 'cache.c']
obj = env.Object(src_local)

50
src/api/cache.c Normal file
View File

@@ -0,0 +1,50 @@
/*
* Low level cache control functions.
*
* Copyright (C) 2009 - 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/api/errno.h>
#include <l4/generic/tcb.h>
#include <l4/api/cache.h>
#include <l4/generic/capability.h>
#include INC_GLUE(cache.h)
int sys_cache_control(unsigned long start, unsigned long end,
unsigned int flags)
{
int ret = 0;
if ((ret = cap_cache_check(start, end, flags)) < 0)
return ret;
switch (flags) {
case L4_INVALIDATE_ICACHE:
arch_invalidate_icache(start, end);
break;
case L4_INVALIDATE_DCACHE:
arch_invalidate_dcache(start, end);
break;
case L4_CLEAN_DCACHE:
arch_clean_dcache(start, end);
break;
case L4_CLEAN_INVALIDATE_DCACHE:
arch_clean_invalidate_dcache(start, end);
break;
case L4_INVALIDATE_TLB:
arch_invalidate_tlb(start, end);
break;
default:
ret = -EINVAL;
}
return ret;
}

View File

@@ -673,14 +673,14 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
case CAP_CONTROL_NCAPS:
if ((err = check_access((unsigned long)userbuf,
sizeof(int),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
break;
case CAP_CONTROL_READ:
if ((err = check_access((unsigned long)userbuf,
cap_count(current) *
sizeof(struct capability),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
break;
case CAP_CONTROL_SHARE:
@@ -694,7 +694,7 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
case CAP_CONTROL_DESTROY:
if ((err = check_access((unsigned long)userbuf,
sizeof(struct capability),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
break;
default:

View File

@@ -71,7 +71,7 @@ flags:
/*
* If task is the one currently runnable,
* update kip utcb reference
* update utcb reference
*/
if (task == current)
task_update_utcb(task);
@@ -161,7 +161,7 @@ int sys_exchange_registers(struct exregs_data *exregs, l4id_t tid)
if ((err = check_access((unsigned long)exregs,
sizeof(*exregs),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/* Find tcb from its list */

View File

@@ -208,7 +208,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
struct waitqueue_head *wqhs, *wqhr;
int ret = 0;
if (!(receiver = tcb_find(recv_tid)))
if (!(receiver = tcb_find_lock(recv_tid)))
return -ESRCH;
wqhs = &receiver->wqh_send;
@@ -240,8 +240,11 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
// printk("%s: (%d) Waking up (%d)\n", __FUNCTION__,
// current->tid, receiver->tid);
/* Wake it up, we can yield here. */
sched_resume_sync(receiver);
/* Wake it up async */
sched_resume_async(receiver);
/* Release thread lock (protects for delete) */
spin_unlock(&receiver->thread_lock);
return ret;
}
@@ -253,6 +256,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
sched_prepare_sleep();
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
spin_unlock(&receiver->thread_lock);
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
// current->tid, recv_tid);
schedule();
@@ -405,7 +409,7 @@ int ipc_recv_extended(l4id_t sendertid, unsigned int flags)
/* Page fault user pages if needed */
if ((err = check_access(ipc_address, size,
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/*
@@ -455,7 +459,7 @@ int ipc_send_extended(l4id_t recv_tid, unsigned int flags)
/* Page fault those pages on the current task if needed */
if ((err = check_access(ipc_address, size,
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/*

View File

@@ -5,5 +5,5 @@
*/
#include INC_API(kip.h)
SECTION(".data.kip") struct kip kip;
SECTION(".data.kip") ALIGN(SZ_4K) struct kip kip;

View File

@@ -8,9 +8,29 @@
#include INC_SUBARCH(mm.h)
#include <l4/api/errno.h>
#include <l4/api/space.h>
#include INC_GLUE(mapping.h)
int sys_map(unsigned long phys, unsigned long virt, unsigned long npages,
unsigned int flags, l4id_t tid)
/*
* Userspace syscall requests can only map
* using read/write/exec userspace flags.
*/
int user_map_flags_validate(unsigned int flags)
{
switch (flags) {
case MAP_USR_RO:
case MAP_USR_RW:
case MAP_USR_RWX:
case MAP_USR_RX:
case MAP_USR_IO:
return 1;
default:
return 0;
}
return 0;
}
int sys_map(unsigned long phys, unsigned long virt,
unsigned long npages, unsigned int flags, l4id_t tid)
{
struct ktcb *target;
int err;
@@ -18,10 +38,18 @@ int sys_map(unsigned long phys, unsigned long virt, unsigned long npages,
if (!(target = tcb_find(tid)))
return -ESRCH;
/* Check flags validity */
if (!user_map_flags_validate(flags))
return -EINVAL;
if (!npages || !phys || !virt)
return -EINVAL;
if ((err = cap_map_check(target, phys, virt, npages, flags)) < 0)
return err;
add_mapping_pgd(phys, virt, npages << PAGE_BITS, flags, TASK_PGD(target));
add_mapping_pgd(phys, virt, npages << PAGE_BITS,
flags, TASK_PGD(target));
return 0;
}
@@ -36,17 +64,22 @@ int sys_unmap(unsigned long virtual, unsigned long npages, unsigned int tid)
struct ktcb *target;
int ret = 0, retval = 0;
if (tid == current->tid)
target = current;
else if (!(target = tcb_find(tid)))
if (!(target = tcb_find(tid)))
return -ESRCH;
if (!npages || !virtual)
return -EINVAL;
if ((ret = cap_unmap_check(target, virtual, npages)) < 0)
return ret;
for (int i = 0; i < npages; i++) {
ret = remove_mapping_pgd(virtual + i * PAGE_SIZE, TASK_PGD(target));
ret = remove_mapping_pgd(TASK_PGD(target),
virtual + i * PAGE_SIZE);
if (ret)
retval = ret;
}
return retval;
return ret;
}

View File

@@ -15,6 +15,7 @@
#include INC_API(syscall.h)
#include INC_ARCH(exception.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
void init_mutex_queue_head(struct mutex_queue_head *mqhead)
{
@@ -205,6 +206,22 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead,
return wait_on_prepared_wait();
}
/*
* Note, the mutex in userspace was left free before the
* syscall was entered.
*
* It is possible that a thread has acquired it, another
* contended on it and the holder made it to the kernel
* quicker than us. We detect this situation here.
*/
if (mutex_queue->wqh_holders.sleepers) {
/*
* Let the first holder do all the waking up
*/
mutex_queue_head_unlock(mqhead);
return 0;
}
/*
* Found it, if it exists, there are contenders,
* now wake all of them up in FIFO order.
@@ -226,6 +243,8 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
unsigned long mutex_physical;
int ret = 0;
// printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
/* Check valid operation */
if (mutex_op != MUTEX_CONTROL_LOCK &&
mutex_op != MUTEX_CONTROL_UNLOCK) {
@@ -249,8 +268,7 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
* capabilities of current task.
*/
if (!(mutex_physical =
virt_to_phys_by_pgd(mutex_address,
TASK_PGD(current))))
virt_to_phys_by_pgd(TASK_PGD(current), mutex_address)))
return -EINVAL;
switch (mutex_op) {

View File

@@ -39,6 +39,12 @@ int sys_schedule(void)
int sys_getid(struct task_ids *ids)
{
struct ktcb *this = current;
int err;
if ((err = check_access((unsigned long)ids,
sizeof(struct task_ids),
MAP_USR_RW, 1)) < 0)
return err;
ids->tid = this->tid;
ids->spid = this->space->spid;

View File

@@ -16,6 +16,7 @@
#include <l4/generic/capability.h>
#include INC_ARCH(asm.h)
#include INC_SUBARCH(mm.h)
#include INC_GLUE(mapping.h)
int sys_thread_switch(void)
{
@@ -56,8 +57,7 @@ int thread_suspend(struct ktcb *task)
int thread_exit(struct ktcb *task)
{
return thread_signal(task, TASK_EXITING, TASK_DEAD);
return thread_signal(task, TASK_SUSPENDING, TASK_INACTIVE);
}
static inline int task_is_child(struct ktcb *task)
@@ -68,18 +68,26 @@ static inline int task_is_child(struct ktcb *task)
int thread_destroy_child(struct ktcb *task)
{
/* Wait until thread exits */
thread_exit(task);
/* Hint scheduler that an exit occured */
current->flags |= TASK_EXITED;
/* Now remove it atomically */
tcb_remove(task);
/* Wake up waiters */
/* Wake up waiters that arrived before removing */
wake_up_all(&task->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&task->wqh_recv, WAKEUP_INTERRUPT);
BUG_ON(task->wqh_pager.sleepers > 0);
BUG_ON(task->state != TASK_DEAD);
BUG_ON(task->state != TASK_INACTIVE);
/* Place the task on the zombie queue for its cpu */
ktcb_list_add(task, &per_cpu_byid(kernel_resources.zombie_list,
task->affinity));
tcb_delete(task);
return 0;
}
@@ -104,14 +112,37 @@ int thread_destroy_children(void)
void thread_destroy_self(unsigned int exit_code)
{
/* Destroy all children first */
thread_destroy_children();
/* Wake up waiters */
wake_up_all(&current->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&current->wqh_recv, WAKEUP_INTERRUPT);
/* If self-paged, finish everything except deletion */
if (current->tid == current->pagerid) {
/* Remove self safe against ipc */
tcb_remove(current);
/* Wake up any waiters queued up before removal */
wake_up_all(&current->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&current->wqh_recv, WAKEUP_INTERRUPT);
/* Move capabilities to current cpu idle task */
cap_list_move(&per_cpu(scheduler).idle_task->cap_list,
&current->cap_list);
/* Place self on the per-cpu zombie queue */
ktcb_list_add(current, &per_cpu(kernel_resources.zombie_list));
}
/*
* Both child and a self-paging would set exit
* code and quit the scheduler
*/
current->exit_code = exit_code;
sched_exit_sync();
/*
* Hint scheduler that an exit has occured
*/
current->flags |= TASK_EXITED;
sched_suspend_sync();
}
int thread_wait(struct ktcb *task)
@@ -119,27 +150,50 @@ int thread_wait(struct ktcb *task)
unsigned int exit_code;
int ret;
// printk("%s: (%d) for (%d)\n", __FUNCTION__, current->tid, task->tid);
/* Wait until task switches to desired state */
WAIT_EVENT(&task->wqh_pager,
task->state == TASK_DEAD, ret);
task->state == TASK_INACTIVE, ret);
/* Return if interrupted by async event */
if (ret < 0)
return ret;
else {
exit_code = (int)task->exit_code;
tcb_remove(task);
tcb_delete(task);
return exit_code;
}
/* Now remove it safe against ipc */
tcb_remove(task);
/* Wake up waiters that arrived before removing */
wake_up_all(&task->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&task->wqh_recv, WAKEUP_INTERRUPT);
BUG_ON(task->wqh_pager.sleepers > 0);
BUG_ON(task->state != TASK_INACTIVE);
/* Obtain exit code */
exit_code = (int)task->exit_code;
/* Place it on the zombie queue */
ktcb_list_add(task,
&per_cpu_byid(kernel_resources.zombie_list,
task->affinity));
return exit_code;
}
int thread_destroy(struct ktcb *task, unsigned int exit_code)
{
// printk("%s: (%d) for (%d)\n", __FUNCTION__, current->tid, task->tid);
exit_code &= THREAD_EXIT_MASK;
if (task_is_child(task))
return thread_destroy_child(task);
else if (task == current)
thread_destroy_self(exit_code);
else
BUG();
return 0;
}
@@ -208,13 +262,12 @@ int thread_start(struct ktcb *task)
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
/* FIXME: Refuse to run dead tasks */
/* Notify scheduler of task resume */
sched_resume_async(task);
/* Release lock and return */
mutex_unlock(&task->thread_control_lock);
return 0;
}
@@ -229,10 +282,15 @@ int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
}
BUG_ON(!orig);
/* If original has no syscall context yet, don't copy */
if (!orig->syscall_regs)
return 0;
/*
* For duplicated threads pre-syscall context is saved on
* the kernel stack. We copy this context of original
* into the duplicate thread's current context structure
* into the duplicate thread's current context structure,
*
* No locks needed as the thread is not known to the system yet.
*/
@@ -262,6 +320,24 @@ int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
return 0;
}
static DECLARE_SPINLOCK(task_select_affinity_lock);
static unsigned int cpu_rr_affinity;
/* Select which cpu to place the new task in round-robin fashion */
void thread_setup_affinity(struct ktcb *task)
{
spin_lock(&task_select_affinity_lock);
task->affinity = cpu_rr_affinity;
//printk("Set up thread %d affinity=%d\n",
// task->tid, task->affinity);
cpu_rr_affinity++;
if (cpu_rr_affinity >= CONFIG_NCPU)
cpu_rr_affinity = 0;
spin_unlock(&task_select_affinity_lock);
}
static inline void
thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
struct ktcb *new, struct ktcb *orig)
@@ -344,8 +420,8 @@ int thread_create(struct task_ids *ids, unsigned int flags)
& TC_COPY_SPACE & TC_NEW_SPACE) || !flags)
return -EINVAL;
/* Can't have multiple pager specifiers */
if (flags & TC_SHARE_PAGER & TC_AS_PAGER)
/* Must have one space flag */
if ((flags & THREAD_SPACE_MASK) == 0)
return -EINVAL;
/* Can't request shared utcb or tgid without shared space */
@@ -371,29 +447,23 @@ int thread_create(struct task_ids *ids, unsigned int flags)
}
}
/*
* Note this is a kernel-level relationship
* between the creator and the new thread.
*
* Any higher layer may define parent/child
* relationships between orig and new separately.
*/
if (flags & TC_AS_PAGER)
new->pagerid = current->tid;
else if (flags & TC_SHARE_PAGER)
new->pagerid = current->pagerid;
else
new->pagerid = new->tid;
/* Set creator as pager */
new->pagerid = current->tid;
//printk("Thread (%d) pager set as (%d)\n", new->tid, new->pagerid);
/*
* Setup container-generic fields from current task
*/
/* Setup container-generic fields from current task */
new->container = current->container;
/*
* Set up cpu affinity.
*
* This is the default setting, it may be changed
* by a subsequent exchange_registers call
*/
thread_setup_affinity(new);
/* Set up new thread context by using parent ids and flags */
thread_setup_new_ids(ids, flags, new, orig);
arch_setup_new_thread(new, orig, flags);
tcb_add(new);
@@ -406,7 +476,7 @@ int thread_create(struct task_ids *ids, unsigned int flags)
out_err:
/* Pre-mature tcb needs freeing by free_ktcb */
free_ktcb(new);
free_ktcb(new, current);
return err;
}
@@ -421,13 +491,25 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
int err, ret = 0;
if ((err = check_access((unsigned long)ids, sizeof(*ids),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE)
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) {
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
/*
* Tasks may only operate on their children. They may
* also destroy themselves or any children.
*/
if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY &&
!task_is_child(task) && task != current)
return -EPERM;
if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY
&& !task_is_child(task))
return -EPERM;
}
if ((err = cap_thread_check(task, flags, ids)) < 0)
return err;