Initial commit

This commit is contained in:
Bahadir Balban
2008-01-13 13:53:52 +00:00
commit e2b791a3d8
789 changed files with 95825 additions and 0 deletions

10
src/api/SConscript Normal file
View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
Import('config_symbols')
# The set of source files associated with this SConscript file.
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'space.c']
obj = env.Object(src_local)
Return('obj')

341
src/api/ipc.c Normal file
View File

@@ -0,0 +1,341 @@
/*
* Inter-process communication
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/tcb.h>
#include <l4/lib/mutex.h>
#include <l4/api/ipc.h>
#include <l4/api/thread.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include <l4/lib/bit.h>
#include <l4/generic/kmalloc.h>
#include INC_API(syscall.h)
enum IPC_TYPE {
IPC_INVALID = 0,
IPC_SEND = 1,
IPC_RECV = 2,
IPC_SENDRECV = 3
};
/*
* Copies message registers from one ktcb stack to another. During the return
* from system call, the registers are popped from the stack. On fast ipc path
* they shouldn't even be pushed to the stack to avoid extra copying.
*/
int ipc_msg_copy(struct ktcb *to, struct ktcb *from)
{
unsigned int *mr0_src = KTCB_REF_MR0(from);
unsigned int *mr0_dst = KTCB_REF_MR0(to);
/* NOTE:
* Make sure MR_TOTAL matches the number of registers saved on stack.
*/
memcpy(mr0_dst, mr0_src, MR_TOTAL * sizeof(unsigned int));
return 0;
}
/*
* Means this sender cannot contact this receiver with this type of tag.
* IOW not accepting a particular type of message from a sender.
*/
struct ipc_block_data {
l4id_t blocked_sender;
u32 blocked_tag;
struct list_head list;
};
/*
* These flags are used on ipc_control call in order to block and unblock a thread
* from doing ipc with another thread.
*/
enum ipc_control_flag {
IPC_CONTROL_BLOCK = 0,
IPC_CONTROL_UNBLOCK
};
/*
* This checks if any of the parties are not allowed to talk to each other.
*/
int ipc_blocked(struct ktcb *receiver, struct ktcb *sender)
{
u32 ipc_tag = *((u32 *)KTCB_REF_MR0(sender));
struct ipc_block_data *bdata;
spin_lock(&receiver->ipc_block_lock);
list_for_each_entry(bdata, &receiver->ipc_block_list, list)
if (bdata->blocked_sender == sender->tid &&
ipc_tag == bdata->blocked_tag) {
spin_unlock(&receiver->ipc_block_lock);
return 1;
}
spin_unlock(&receiver->ipc_block_lock);
return 0;
}
/*
* Adds and removes task/ipc_tag pairs to/from a task's receive block list.
* The pairs on this list are prevented to have ipc rendezvous with the task.
*/
int sys_ipc_control(struct syscall_args *regs)
{
enum ipc_control_flag flag = (enum ipc_control_flag)regs->r0;
struct ipc_block_data *bdata;
struct ktcb *blocked_sender;
l4id_t blocked_tid = (l4id_t)regs->r1;
u32 blocked_tag = (u32)regs->r2;
int unblocked = 0;
switch (flag) {
case IPC_CONTROL_BLOCK:
bdata = kmalloc(sizeof(struct ipc_block_data));
bdata->blocked_sender = blocked_tid;
bdata->blocked_tag = blocked_tag;
INIT_LIST_HEAD(&bdata->list);
BUG_ON(!(blocked_sender = find_task(blocked_tid)));
BUG_ON(ipc_blocked(current, blocked_sender));
spin_lock(&current->ipc_block_lock);
list_add(&bdata->list, &current->ipc_block_list);
spin_unlock(&current->ipc_block_lock);
break;
case IPC_CONTROL_UNBLOCK:
spin_lock(&current->ipc_block_lock);
list_for_each_entry(bdata, &current->ipc_block_list, list)
if (bdata->blocked_sender == blocked_tid &&
bdata->blocked_tag == blocked_tag) {
unblocked = 1;
list_del(&bdata->list);
kfree(bdata);
break;
}
spin_unlock(&current->ipc_block_lock);
BUG_ON(!unblocked);
break;
default:
printk("%s: Unsupported request.\n", __FUNCTION__);
}
return 0;
}
int ipc_send(l4id_t recv_tid)
{
struct ktcb *receiver = find_task(recv_tid);
struct waitqueue_head *wqhs = &receiver->wqh_send;
struct waitqueue_head *wqhr = &receiver->wqh_recv;
spin_lock(&wqhs->slock);
spin_lock(&wqhr->slock);
/* Is my receiver waiting and accepting ipc from me? */
if (wqhr->sleepers > 0 && !ipc_blocked(receiver, current)) {
struct waitqueue *wq, *n;
struct ktcb *sleeper;
list_for_each_entry_safe(wq, n, &wqhr->task_list, task_list) {
sleeper = wq->task;
/* Found the receiver. Does it sleep for this sender? */
BUG_ON(sleeper->tid != recv_tid);
if ((sleeper->senderid == current->tid) ||
(sleeper->senderid == L4_ANYTHREAD)) {
list_del_init(&wq->task_list);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
/* Do the work */
ipc_msg_copy(sleeper, current);
//printk("(%d) Waking up (%d)\n", current->tid,
// sleeper->tid);
/* Wake it up, we can yield here. */
sched_resume_task(sleeper);
return 0;
}
}
}
/* Could not find a receiver that's waiting */
DECLARE_WAITQUEUE(wq, current);
wqhs->sleepers++;
list_add_tail(&wq.task_list, &wqhs->task_list);
sched_notify_sleep(current);
need_resched = 1;
//printk("(%d) waiting for (%d)\n", current->tid, recv_tid);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
return 0;
}
int ipc_recv(l4id_t senderid)
{
struct waitqueue_head *wqhs = &current->wqh_send;
struct waitqueue_head *wqhr = &current->wqh_recv;
/* Specify who to receiver from, so senders know. */
current->senderid = senderid;
spin_lock(&wqhs->slock);
spin_lock(&wqhr->slock);
/* Is my sender waiting? */
if (wqhs->sleepers > 0) {
struct waitqueue *wq, *n;
struct ktcb *sleeper;
list_for_each_entry_safe(wq, n, &wqhs->task_list, task_list) {
sleeper = wq->task;
/* Found a sender, is it unblocked for rendezvous? */
if ((sleeper->tid == current->senderid) ||
((current->senderid == L4_ANYTHREAD) &&
!ipc_blocked(current, sleeper))) {
/* Check for bug */
BUG_ON(sleeper->tid == current->senderid &&
ipc_blocked(current, sleeper));
list_del_init(&wq->task_list);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
/* Do the work */
ipc_msg_copy(current, sleeper);
//printk("(%d) Waking up (%d)\n", current->tid,
// sleeper->tid);
/* Wake it up */
sched_resume_task(sleeper);
return 0;
}
}
}
/* Could not find a sender that's waiting */
DECLARE_WAITQUEUE(wq, current);
wqhr->sleepers++;
list_add_tail(&wq.task_list, &wqhr->task_list);
sched_notify_sleep(current);
need_resched = 1;
// printk("(%d) waiting for (%d) \n", current->tid, current->senderid);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
return 0;
}
/* FIXME: REMOVE: remove this completely and replace by ipc_sendrecv() */
int ipc_sendwait(l4id_t to)
{
unsigned int *mregs = KTCB_REF_MR0(current);
/* Send actual message */
ipc_send(to);
/* Send wait message */
mregs[L4_IPC_TAG_MR_OFFSET] = L4_IPC_TAG_WAIT;
ipc_send(to);
return 0;
}
/*
* We currently only support send-receiving from the same task. The receive
* stage is initiated with the special L4_IPC_TAG_IPCRETURN. This tag is used by
* client tasks for receiving returned ipc results back. This is by far the most
* common ipc pattern between client tasks and servers since every such ipc
* request expects a result.
*/
int ipc_sendrecv(l4id_t to, l4id_t from)
{
int ret = 0;
if (to == from) {
/* IPC send request stage */
ipc_send(to);
/*
* IPC result return stage.
*
* If the receiving task is scheduled here, (likely to be a
* server which shouldn't block too long) it would only block
* for a fixed amount of time between these send and receive
* calls.
*/
ipc_recv(from);
} else {
printk("%s: Unsupported ipc operation.\n", __FUNCTION__);
ret = -ENOSYS;
}
return ret;
}
static inline int __sys_ipc(l4id_t to, l4id_t from, unsigned int ipc_type)
{
int ret;
switch (ipc_type) {
case IPC_SEND:
ret = ipc_send(to);
break;
case IPC_RECV:
ret = ipc_recv(from);
break;
case IPC_SENDRECV:
ret = ipc_sendrecv(to, from);
break;
case IPC_INVALID:
default:
printk("Unsupported ipc operation.\n");
ret = -ENOSYS;
}
return ret;
}
/*
* sys_ipc has multiple functions. In a nutshell:
* - Copies message registers from one thread to another.
* - Sends notification bits from one thread to another.
* - Synchronises the threads involved in ipc. (i.e. a blocking rendez-vous)
* - Can propagate messages from third party threads.
* - A thread can both send and receive on the same call.
*/
int sys_ipc(struct syscall_args *regs)
{
l4id_t to = (l4id_t)regs->r0;
l4id_t from = (l4id_t)regs->r1;
unsigned int ipc_type = 0;
int ret = 0;
/* Check arguments */
if (!((from >= L4_ANYTHREAD) && (from <= MAX_PREDEFINED_TID))) {
ret = -EINVAL;
goto error;
}
if (!((to >= L4_ANYTHREAD) && (to <= MAX_PREDEFINED_TID))) {
ret = -EINVAL;
goto error;
}
if (from == current->tid || to == current->tid) {
ret = -EINVAL;
goto error;
}
/* [0] for Send */
ipc_type |= (to != L4_NILTHREAD);
/* [1] for Receive, [1:0] for both */
ipc_type |= ((from != L4_NILTHREAD) << 1);
if (ipc_type == IPC_INVALID) {
ret = -EINVAL;
goto error;
}
if ((ret = __sys_ipc(to, from, ipc_type)) < 0)
goto error;
return ret;
error:
printk("Erroneous ipc by: %d\n", current->tid);
ipc_type = IPC_INVALID;
return ret;
}

78
src/api/kip.c Normal file
View File

@@ -0,0 +1,78 @@
/*
* Kernel Interface Page and sys_kdata_read()
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <l4/generic/tcb.h>
#include <l4/generic/physmem.h>
#include INC_API(kip.h)
#include INC_API(syscall.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(bootdesc.h)
/* FIXME: Change the unit name */
UNIT("kip") struct kip kip;
/* Error-checked kernel data request call */
int __sys_kread(int rd, void *dest)
{
int err = 0;
switch(rd) {
case KDATA_PAGE_MAP:
/*
* FIXME:FIXME: Check if address is mapped here first!!!
* Also check if process has enough buffer for physmem to fit!!!
*/
printk("Handling KDATA_PAGE_MAP request.\n");
memcpy(dest, &page_map, sizeof(page_map));
break;
case KDATA_BOOTDESC:
printk("Handling KDATA_BOOTDESC request.\n");
/*
* FIXME:FIXME: Check if address is mapped here first!!!
* Also check if process has enough buffer for physmem to fit!!!
*/
memcpy(dest, bootdesc, bootdesc->desc_size);
break;
case KDATA_BOOTDESC_SIZE:
printk("Handling KDATA_BOOTDESC_SIZE request.\n");
/*
* FIXME:FIXME: Check if address is mapped here first!!!
* Also check if process has enough buffer for physmem to fit!!!
*/
*(unsigned int *)dest = bootdesc->desc_size;
break;
default:
printk("Unsupported kernel data request.\n");
err = -1;
}
return err;
}
/*
* Privilaged tasks use this call to request data about the system during their
* initialisation. This read-like call is only available during system startup.
* It is much more flexible to use this method rather than advertise a customly
* forged KIP to all tasks throughout the system lifetime. Note, this does not
* support file positions, any such features aren't supported since this is call
* is discarded after startup.
*/
int sys_kread(struct syscall_args *a)
{
unsigned int *arg = KTCB_REF_ARG0(current);
void *addr = (void *)arg[1]; /* Buffer address */
int rd = (int)arg[0]; /* Request descriptor */
/* Error checking */
if ((rd < 0) || (addr <= 0)) {
printk("%s: Invalid arguments.\n", __FUNCTION__);
return -1;
}
return __sys_kread(rd, addr);
}

65
src/api/space.c Normal file
View File

@@ -0,0 +1,65 @@
/*
* Space-related system calls.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/tcb.h>
#include INC_API(syscall.h)
#include INC_SUBARCH(mm.h)
#include <l4/api/errno.h>
/* NOTE:
* For lazy mm switching, a list of newly created mappings that are common to
* all tasks (e.g. any mapping done in the kernel) can be kept here so that when
* a new task is scheduled, the same mappings are copied to its page tables as
* well. struct list_head new_mappings;
*/
int sys_map(struct syscall_args *regs)
{
unsigned long phys = regs->r0;
unsigned long virt = regs->r1;
unsigned long npages = regs->r2;
unsigned long flags = regs->r3;
unsigned int tid = regs->r4;
struct ktcb *target;
if (tid == current->tid) { /* The easiest case */
target = current;
goto found;
} else /* else search the tcb from its hash list */
if ((target = find_task(tid)))
goto found;
BUG();
return -EINVAL;
found:
add_mapping_pgd(phys, virt, npages << PAGE_BITS, flags, target->pgd);
return 0;
}
int sys_unmap(struct syscall_args *regs)
{
unsigned long virt = regs->r0;
unsigned long npages = regs->r1;
unsigned int tid = regs->r2;
struct ktcb *target;
if (tid == current->tid) { /* The easiest case */
target = current;
goto found;
} else /* else search the tcb from its hash list */
if ((target = find_task(tid)))
goto found;
BUG();
return -EINVAL;
found:
for (int i = 0; i < npages; i++)
remove_mapping_pgd(virt, target->pgd);
return 0;
}

184
src/api/syscall.c Normal file
View File

@@ -0,0 +1,184 @@
/*
* System Calls
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/tcb.h>
#include <l4/generic/pgalloc.h>
#include <l4/generic/tcb.h>
#include <l4/generic/space.h>
#include <l4/api/space.h>
#include <l4/api/ipc.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include INC_API(syscall.h)
#include INC_ARCH(exception.h)
int sys_exchange_registers(struct syscall_args *regs)
{
struct ktcb *task;
unsigned int pc = regs->r0;
unsigned int sp = regs->r1;
unsigned int pagerid = regs->r2;
l4id_t tid = regs->r3;
/* Find tcb from its hash list */
if ((task = find_task(tid)))
goto found;
/* FIXME: Whatif not found??? Recover gracefully. */
BUG();
found:
/* Set its registers */
task->context.pc = pc;
task->context.sp = sp;
task->context.spsr = ARM_MODE_USR;
/* Set its pager */
task->pagerid = pagerid;
return 0;
}
int sys_schedule(struct syscall_args *regs)
{
printk("(SVC) %s called. Tid (%d)\n", __FUNCTION__, current->tid);
return 0;
}
#if 0
/*
* THIS CODE IS TO BE USED WHEN MODIFYING PAGE TABLES FOR SHARED MEMORY!!!
*/
int do_shm_setup(struct shm_kdata *kdata)
{
struct ktcb *sender, *receiver;
unsigned long sndphys, sndvirt, rcvvirt;
if (!(sender = find_task(kdata->sender)))
return -1;
if (!(receiver = find_task(kdata->receiver)))
return -1;
/*
* There's no guarantee that shared pages are contiguous in physical,
* therefore every virtual page in the sharer shall be converted for
* its physical address, and each of those addresses are mapped.
*/
for (int i = 0; i < kdata->npages; i++) {
/* The sender virtual address for each shared page */
sndvirt = __pfn_to_addr(kdata->send_pfn) + (i * PAGE_SIZE);
/* The corresponding receiver virtual address */
rcvvirt = __pfn_to_addr(kdata->recv_pfn) + (i * PAGE_SIZE);
/* Converted to physical, through the sharer's page table. */
sndphys = __pte_to_addr(virt_to_pte_from_pgd(sndvirt,
sender->pgd));
/*
* Mapped to virtual in the sharee's address space. Note this
* is mapped as uncached, in order to avoid cache aliasing
* issues in ARM v5, which is VIVT. A possible optimisation for
* the future is to make it cached and restrict the shm
* address range.
*/
add_mapping_pgd(sndphys, rcvvirt, PAGE_SIZE, MAP_SVC_IO_FLAGS,
receiver->pgd);
}
return 0;
}
/* Modifies an address space */
int sys_space_control(struct syscall_args *regs)
{
unsigned int operation = regs->r0;
int err = 0;
if (current->tid != PAGER_TID) {
printk("%s: Priveledged call, only task id %d can call it. (Current id: %d)\n",
__FUNCTION__, current->tid, PAGER_TID);
return -EPERM;
}
switch (operation) {
case SPCCTRL_SHM:
/* FIXME: Add an access check for user space structure */
if ((err = do_shm_setup((struct shm_kdata *)&regs->r1) < 0))
printk("%s: Error setting up the shm area.\n", __FUNCTION__);
break;
default:
printk("%s: Unsupported operation: %d\n", __FUNCTION__, operation);
err = -ENOSYS;
}
printk("%s called. Tid (%d)\n", __FUNCTION__, current->tid);
return err;
}
#endif
int sys_space_control(struct syscall_args *regs)
{
return -ENOSYS;
}
int sys_getid(struct syscall_args *regs)
{
struct task_ids *ids = (struct task_ids *)regs->r0;
struct ktcb *this = current;
ids->tid = this->tid;
ids->spid = this->spid;
return 0;
}
/*
* Granted pages *must* be outside of the pages that are already owned and used
* by the kernel, otherwise a hostile/buggy pager can attack kernel addresses by
* fooling it to use them as freshly granted pages. Kernel owned pages are
* defined as, "any page that has been used by the kernel prior to all free
* physical memory is taken by a pager, and any other page that has been granted
* so far by any such pager."
*/
int validate_granted_pages(unsigned long pfn, int npages)
{
/* FIXME: Fill this in */
return 0;
}
/*
* Used by a pager to grant memory to kernel for its own use. Generally
* this memory is used for thread creation and memory mapping, (e.g. new
* page tables, page middle directories, per-task kernel stack etc.)
*/
int sys_kmem_grant(struct syscall_args *regs)
{
unsigned long pfn = (unsigned long)regs->r0;
int npages = (int)regs->r1;
/*
* Check if given set of pages are outside the pages already
* owned by the kernel.
*/
if (validate_granted_pages(pfn, npages) < 0)
return -EINVAL;
/* Add the granted pages to the allocator */
if (pgalloc_add_new_grant(pfn, npages))
BUG();
return 0;
}
/* FIXME:
* The pager reclaims memory from the kernel whenever it thinks this is just.
*/
int sys_kmem_reclaim(struct syscall_args *regs)
{
BUG();
return 0;
}

139
src/api/thread.c Normal file
View File

@@ -0,0 +1,139 @@
/*
* Thread related system calls.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include INC_API(syscall.h)
#include <l4/api/thread.h>
#include <l4/api/errno.h>
#include <l4/generic/tcb.h>
#include <l4/lib/idpool.h>
#include <l4/generic/pgalloc.h>
int sys_thread_switch(struct syscall_args *regs)
{
sched_yield();
return 0;
}
int thread_suspend(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_suspend_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
return -EINVAL;
}
int thread_resume(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_resume_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
return -EINVAL;
}
int thread_start(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_start_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
BUG();
return -EINVAL;
}
/*
* Creates a thread, with a new thread id, and depending on whether the space
* id exists, either adds it to an existing space or creates a new space.
*/
int thread_create(struct task_ids *ids)
{
struct ktcb *task, *new = (struct ktcb *)zalloc_page();
/* Visit all tasks to see if space ids match. */
list_for_each_entry(task, &global_task_list, task_list) {
/* Space ids match, can use existing space */
if (task->spid == ids->spid) {
BUG(); /* This is untested yet. */
goto spc_found;
}
}
/* No existing space with such id. Creating a new address space */
new->pgd = alloc_pgd();
/* Copies all bits that are fixed for all tasks. */
copy_pgd_kern_all(new->pgd);
/* Get new space id */
ids->spid = id_new(space_id_pool);
spc_found:
/* Get a new thread id */
ids->tid = id_new(thread_id_pool);
/* Set all ids */
set_task_ids(new, ids);
/* Set task state. */
new->state = TASK_INACTIVE;
/* Initialise ipc waitqueues */
waitqueue_head_init(&new->wqh_send);
waitqueue_head_init(&new->wqh_recv);
/* Add task to global hlist of tasks */
add_task_global(new);
return 0;
}
/*
* Creates, destroys and modifies threads. Also implicitly creates an address
* space for a thread that doesn't already have one, or destroys it if the last
* thread that uses it is destroyed.
*/
int sys_thread_control(struct syscall_args *regs)
{
u32 *reg = (u32 *)regs;
unsigned int action = reg[0];
struct task_ids *ids = (struct task_ids *)reg[1];
int ret = 0;
switch (action) {
case THREAD_CREATE:
ret = thread_create(ids);
break;
case THREAD_RUN:
ret = thread_start(ids);
break;
case THREAD_SUSPEND:
ret = thread_suspend(ids);
break;
case THREAD_RESUME:
ret = thread_resume(ids);
break;
default:
ret = -EINVAL;
}
return ret;
}