Initial commit

This commit is contained in:
Bahadir Balban
2008-01-13 13:53:52 +00:00
commit e2b791a3d8
789 changed files with 95825 additions and 0 deletions

10
src/api/SConscript Normal file
View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
Import('config_symbols')
# The set of source files associated with this SConscript file.
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'space.c']
obj = env.Object(src_local)
Return('obj')

341
src/api/ipc.c Normal file
View File

@@ -0,0 +1,341 @@
/*
* Inter-process communication
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/tcb.h>
#include <l4/lib/mutex.h>
#include <l4/api/ipc.h>
#include <l4/api/thread.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include <l4/lib/bit.h>
#include <l4/generic/kmalloc.h>
#include INC_API(syscall.h)
enum IPC_TYPE {
IPC_INVALID = 0,
IPC_SEND = 1,
IPC_RECV = 2,
IPC_SENDRECV = 3
};
/*
* Copies message registers from one ktcb stack to another. During the return
* from system call, the registers are popped from the stack. On fast ipc path
* they shouldn't even be pushed to the stack to avoid extra copying.
*/
int ipc_msg_copy(struct ktcb *to, struct ktcb *from)
{
unsigned int *mr0_src = KTCB_REF_MR0(from);
unsigned int *mr0_dst = KTCB_REF_MR0(to);
/* NOTE:
* Make sure MR_TOTAL matches the number of registers saved on stack.
*/
memcpy(mr0_dst, mr0_src, MR_TOTAL * sizeof(unsigned int));
return 0;
}
/*
* Means this sender cannot contact this receiver with this type of tag.
* IOW not accepting a particular type of message from a sender.
*/
struct ipc_block_data {
l4id_t blocked_sender;
u32 blocked_tag;
struct list_head list;
};
/*
* These flags are used on ipc_control call in order to block and unblock a thread
* from doing ipc with another thread.
*/
enum ipc_control_flag {
IPC_CONTROL_BLOCK = 0,
IPC_CONTROL_UNBLOCK
};
/*
* This checks if any of the parties are not allowed to talk to each other.
*/
int ipc_blocked(struct ktcb *receiver, struct ktcb *sender)
{
u32 ipc_tag = *((u32 *)KTCB_REF_MR0(sender));
struct ipc_block_data *bdata;
spin_lock(&receiver->ipc_block_lock);
list_for_each_entry(bdata, &receiver->ipc_block_list, list)
if (bdata->blocked_sender == sender->tid &&
ipc_tag == bdata->blocked_tag) {
spin_unlock(&receiver->ipc_block_lock);
return 1;
}
spin_unlock(&receiver->ipc_block_lock);
return 0;
}
/*
* Adds and removes task/ipc_tag pairs to/from a task's receive block list.
* The pairs on this list are prevented to have ipc rendezvous with the task.
*/
int sys_ipc_control(struct syscall_args *regs)
{
enum ipc_control_flag flag = (enum ipc_control_flag)regs->r0;
struct ipc_block_data *bdata;
struct ktcb *blocked_sender;
l4id_t blocked_tid = (l4id_t)regs->r1;
u32 blocked_tag = (u32)regs->r2;
int unblocked = 0;
switch (flag) {
case IPC_CONTROL_BLOCK:
bdata = kmalloc(sizeof(struct ipc_block_data));
bdata->blocked_sender = blocked_tid;
bdata->blocked_tag = blocked_tag;
INIT_LIST_HEAD(&bdata->list);
BUG_ON(!(blocked_sender = find_task(blocked_tid)));
BUG_ON(ipc_blocked(current, blocked_sender));
spin_lock(&current->ipc_block_lock);
list_add(&bdata->list, &current->ipc_block_list);
spin_unlock(&current->ipc_block_lock);
break;
case IPC_CONTROL_UNBLOCK:
spin_lock(&current->ipc_block_lock);
list_for_each_entry(bdata, &current->ipc_block_list, list)
if (bdata->blocked_sender == blocked_tid &&
bdata->blocked_tag == blocked_tag) {
unblocked = 1;
list_del(&bdata->list);
kfree(bdata);
break;
}
spin_unlock(&current->ipc_block_lock);
BUG_ON(!unblocked);
break;
default:
printk("%s: Unsupported request.\n", __FUNCTION__);
}
return 0;
}
int ipc_send(l4id_t recv_tid)
{
struct ktcb *receiver = find_task(recv_tid);
struct waitqueue_head *wqhs = &receiver->wqh_send;
struct waitqueue_head *wqhr = &receiver->wqh_recv;
spin_lock(&wqhs->slock);
spin_lock(&wqhr->slock);
/* Is my receiver waiting and accepting ipc from me? */
if (wqhr->sleepers > 0 && !ipc_blocked(receiver, current)) {
struct waitqueue *wq, *n;
struct ktcb *sleeper;
list_for_each_entry_safe(wq, n, &wqhr->task_list, task_list) {
sleeper = wq->task;
/* Found the receiver. Does it sleep for this sender? */
BUG_ON(sleeper->tid != recv_tid);
if ((sleeper->senderid == current->tid) ||
(sleeper->senderid == L4_ANYTHREAD)) {
list_del_init(&wq->task_list);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
/* Do the work */
ipc_msg_copy(sleeper, current);
//printk("(%d) Waking up (%d)\n", current->tid,
// sleeper->tid);
/* Wake it up, we can yield here. */
sched_resume_task(sleeper);
return 0;
}
}
}
/* Could not find a receiver that's waiting */
DECLARE_WAITQUEUE(wq, current);
wqhs->sleepers++;
list_add_tail(&wq.task_list, &wqhs->task_list);
sched_notify_sleep(current);
need_resched = 1;
//printk("(%d) waiting for (%d)\n", current->tid, recv_tid);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
return 0;
}
int ipc_recv(l4id_t senderid)
{
struct waitqueue_head *wqhs = &current->wqh_send;
struct waitqueue_head *wqhr = &current->wqh_recv;
/* Specify who to receiver from, so senders know. */
current->senderid = senderid;
spin_lock(&wqhs->slock);
spin_lock(&wqhr->slock);
/* Is my sender waiting? */
if (wqhs->sleepers > 0) {
struct waitqueue *wq, *n;
struct ktcb *sleeper;
list_for_each_entry_safe(wq, n, &wqhs->task_list, task_list) {
sleeper = wq->task;
/* Found a sender, is it unblocked for rendezvous? */
if ((sleeper->tid == current->senderid) ||
((current->senderid == L4_ANYTHREAD) &&
!ipc_blocked(current, sleeper))) {
/* Check for bug */
BUG_ON(sleeper->tid == current->senderid &&
ipc_blocked(current, sleeper));
list_del_init(&wq->task_list);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
/* Do the work */
ipc_msg_copy(current, sleeper);
//printk("(%d) Waking up (%d)\n", current->tid,
// sleeper->tid);
/* Wake it up */
sched_resume_task(sleeper);
return 0;
}
}
}
/* Could not find a sender that's waiting */
DECLARE_WAITQUEUE(wq, current);
wqhr->sleepers++;
list_add_tail(&wq.task_list, &wqhr->task_list);
sched_notify_sleep(current);
need_resched = 1;
// printk("(%d) waiting for (%d) \n", current->tid, current->senderid);
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
return 0;
}
/* FIXME: REMOVE: remove this completely and replace by ipc_sendrecv() */
int ipc_sendwait(l4id_t to)
{
unsigned int *mregs = KTCB_REF_MR0(current);
/* Send actual message */
ipc_send(to);
/* Send wait message */
mregs[L4_IPC_TAG_MR_OFFSET] = L4_IPC_TAG_WAIT;
ipc_send(to);
return 0;
}
/*
* We currently only support send-receiving from the same task. The receive
* stage is initiated with the special L4_IPC_TAG_IPCRETURN. This tag is used by
* client tasks for receiving returned ipc results back. This is by far the most
* common ipc pattern between client tasks and servers since every such ipc
* request expects a result.
*/
int ipc_sendrecv(l4id_t to, l4id_t from)
{
int ret = 0;
if (to == from) {
/* IPC send request stage */
ipc_send(to);
/*
* IPC result return stage.
*
* If the receiving task is scheduled here, (likely to be a
* server which shouldn't block too long) it would only block
* for a fixed amount of time between these send and receive
* calls.
*/
ipc_recv(from);
} else {
printk("%s: Unsupported ipc operation.\n", __FUNCTION__);
ret = -ENOSYS;
}
return ret;
}
static inline int __sys_ipc(l4id_t to, l4id_t from, unsigned int ipc_type)
{
int ret;
switch (ipc_type) {
case IPC_SEND:
ret = ipc_send(to);
break;
case IPC_RECV:
ret = ipc_recv(from);
break;
case IPC_SENDRECV:
ret = ipc_sendrecv(to, from);
break;
case IPC_INVALID:
default:
printk("Unsupported ipc operation.\n");
ret = -ENOSYS;
}
return ret;
}
/*
* sys_ipc has multiple functions. In a nutshell:
* - Copies message registers from one thread to another.
* - Sends notification bits from one thread to another.
* - Synchronises the threads involved in ipc. (i.e. a blocking rendez-vous)
* - Can propagate messages from third party threads.
* - A thread can both send and receive on the same call.
*/
int sys_ipc(struct syscall_args *regs)
{
l4id_t to = (l4id_t)regs->r0;
l4id_t from = (l4id_t)regs->r1;
unsigned int ipc_type = 0;
int ret = 0;
/* Check arguments */
if (!((from >= L4_ANYTHREAD) && (from <= MAX_PREDEFINED_TID))) {
ret = -EINVAL;
goto error;
}
if (!((to >= L4_ANYTHREAD) && (to <= MAX_PREDEFINED_TID))) {
ret = -EINVAL;
goto error;
}
if (from == current->tid || to == current->tid) {
ret = -EINVAL;
goto error;
}
/* [0] for Send */
ipc_type |= (to != L4_NILTHREAD);
/* [1] for Receive, [1:0] for both */
ipc_type |= ((from != L4_NILTHREAD) << 1);
if (ipc_type == IPC_INVALID) {
ret = -EINVAL;
goto error;
}
if ((ret = __sys_ipc(to, from, ipc_type)) < 0)
goto error;
return ret;
error:
printk("Erroneous ipc by: %d\n", current->tid);
ipc_type = IPC_INVALID;
return ret;
}

78
src/api/kip.c Normal file
View File

@@ -0,0 +1,78 @@
/*
* Kernel Interface Page and sys_kdata_read()
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <l4/generic/tcb.h>
#include <l4/generic/physmem.h>
#include INC_API(kip.h)
#include INC_API(syscall.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(bootdesc.h)
/* FIXME: Change the unit name */
UNIT("kip") struct kip kip;
/* Error-checked kernel data request call */
int __sys_kread(int rd, void *dest)
{
int err = 0;
switch(rd) {
case KDATA_PAGE_MAP:
/*
* FIXME:FIXME: Check if address is mapped here first!!!
* Also check if process has enough buffer for physmem to fit!!!
*/
printk("Handling KDATA_PAGE_MAP request.\n");
memcpy(dest, &page_map, sizeof(page_map));
break;
case KDATA_BOOTDESC:
printk("Handling KDATA_BOOTDESC request.\n");
/*
* FIXME:FIXME: Check if address is mapped here first!!!
* Also check if process has enough buffer for physmem to fit!!!
*/
memcpy(dest, bootdesc, bootdesc->desc_size);
break;
case KDATA_BOOTDESC_SIZE:
printk("Handling KDATA_BOOTDESC_SIZE request.\n");
/*
* FIXME:FIXME: Check if address is mapped here first!!!
* Also check if process has enough buffer for physmem to fit!!!
*/
*(unsigned int *)dest = bootdesc->desc_size;
break;
default:
printk("Unsupported kernel data request.\n");
err = -1;
}
return err;
}
/*
* Privilaged tasks use this call to request data about the system during their
* initialisation. This read-like call is only available during system startup.
* It is much more flexible to use this method rather than advertise a customly
* forged KIP to all tasks throughout the system lifetime. Note, this does not
* support file positions, any such features aren't supported since this is call
* is discarded after startup.
*/
int sys_kread(struct syscall_args *a)
{
unsigned int *arg = KTCB_REF_ARG0(current);
void *addr = (void *)arg[1]; /* Buffer address */
int rd = (int)arg[0]; /* Request descriptor */
/* Error checking */
if ((rd < 0) || (addr <= 0)) {
printk("%s: Invalid arguments.\n", __FUNCTION__);
return -1;
}
return __sys_kread(rd, addr);
}

65
src/api/space.c Normal file
View File

@@ -0,0 +1,65 @@
/*
* Space-related system calls.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/tcb.h>
#include INC_API(syscall.h)
#include INC_SUBARCH(mm.h)
#include <l4/api/errno.h>
/* NOTE:
* For lazy mm switching, a list of newly created mappings that are common to
* all tasks (e.g. any mapping done in the kernel) can be kept here so that when
* a new task is scheduled, the same mappings are copied to its page tables as
* well. struct list_head new_mappings;
*/
int sys_map(struct syscall_args *regs)
{
unsigned long phys = regs->r0;
unsigned long virt = regs->r1;
unsigned long npages = regs->r2;
unsigned long flags = regs->r3;
unsigned int tid = regs->r4;
struct ktcb *target;
if (tid == current->tid) { /* The easiest case */
target = current;
goto found;
} else /* else search the tcb from its hash list */
if ((target = find_task(tid)))
goto found;
BUG();
return -EINVAL;
found:
add_mapping_pgd(phys, virt, npages << PAGE_BITS, flags, target->pgd);
return 0;
}
int sys_unmap(struct syscall_args *regs)
{
unsigned long virt = regs->r0;
unsigned long npages = regs->r1;
unsigned int tid = regs->r2;
struct ktcb *target;
if (tid == current->tid) { /* The easiest case */
target = current;
goto found;
} else /* else search the tcb from its hash list */
if ((target = find_task(tid)))
goto found;
BUG();
return -EINVAL;
found:
for (int i = 0; i < npages; i++)
remove_mapping_pgd(virt, target->pgd);
return 0;
}

184
src/api/syscall.c Normal file
View File

@@ -0,0 +1,184 @@
/*
* System Calls
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/tcb.h>
#include <l4/generic/pgalloc.h>
#include <l4/generic/tcb.h>
#include <l4/generic/space.h>
#include <l4/api/space.h>
#include <l4/api/ipc.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include INC_API(syscall.h)
#include INC_ARCH(exception.h)
int sys_exchange_registers(struct syscall_args *regs)
{
struct ktcb *task;
unsigned int pc = regs->r0;
unsigned int sp = regs->r1;
unsigned int pagerid = regs->r2;
l4id_t tid = regs->r3;
/* Find tcb from its hash list */
if ((task = find_task(tid)))
goto found;
/* FIXME: Whatif not found??? Recover gracefully. */
BUG();
found:
/* Set its registers */
task->context.pc = pc;
task->context.sp = sp;
task->context.spsr = ARM_MODE_USR;
/* Set its pager */
task->pagerid = pagerid;
return 0;
}
int sys_schedule(struct syscall_args *regs)
{
printk("(SVC) %s called. Tid (%d)\n", __FUNCTION__, current->tid);
return 0;
}
#if 0
/*
* THIS CODE IS TO BE USED WHEN MODIFYING PAGE TABLES FOR SHARED MEMORY!!!
*/
int do_shm_setup(struct shm_kdata *kdata)
{
struct ktcb *sender, *receiver;
unsigned long sndphys, sndvirt, rcvvirt;
if (!(sender = find_task(kdata->sender)))
return -1;
if (!(receiver = find_task(kdata->receiver)))
return -1;
/*
* There's no guarantee that shared pages are contiguous in physical,
* therefore every virtual page in the sharer shall be converted for
* its physical address, and each of those addresses are mapped.
*/
for (int i = 0; i < kdata->npages; i++) {
/* The sender virtual address for each shared page */
sndvirt = __pfn_to_addr(kdata->send_pfn) + (i * PAGE_SIZE);
/* The corresponding receiver virtual address */
rcvvirt = __pfn_to_addr(kdata->recv_pfn) + (i * PAGE_SIZE);
/* Converted to physical, through the sharer's page table. */
sndphys = __pte_to_addr(virt_to_pte_from_pgd(sndvirt,
sender->pgd));
/*
* Mapped to virtual in the sharee's address space. Note this
* is mapped as uncached, in order to avoid cache aliasing
* issues in ARM v5, which is VIVT. A possible optimisation for
* the future is to make it cached and restrict the shm
* address range.
*/
add_mapping_pgd(sndphys, rcvvirt, PAGE_SIZE, MAP_SVC_IO_FLAGS,
receiver->pgd);
}
return 0;
}
/* Modifies an address space */
int sys_space_control(struct syscall_args *regs)
{
unsigned int operation = regs->r0;
int err = 0;
if (current->tid != PAGER_TID) {
printk("%s: Priveledged call, only task id %d can call it. (Current id: %d)\n",
__FUNCTION__, current->tid, PAGER_TID);
return -EPERM;
}
switch (operation) {
case SPCCTRL_SHM:
/* FIXME: Add an access check for user space structure */
if ((err = do_shm_setup((struct shm_kdata *)&regs->r1) < 0))
printk("%s: Error setting up the shm area.\n", __FUNCTION__);
break;
default:
printk("%s: Unsupported operation: %d\n", __FUNCTION__, operation);
err = -ENOSYS;
}
printk("%s called. Tid (%d)\n", __FUNCTION__, current->tid);
return err;
}
#endif
int sys_space_control(struct syscall_args *regs)
{
return -ENOSYS;
}
int sys_getid(struct syscall_args *regs)
{
struct task_ids *ids = (struct task_ids *)regs->r0;
struct ktcb *this = current;
ids->tid = this->tid;
ids->spid = this->spid;
return 0;
}
/*
* Granted pages *must* be outside of the pages that are already owned and used
* by the kernel, otherwise a hostile/buggy pager can attack kernel addresses by
* fooling it to use them as freshly granted pages. Kernel owned pages are
* defined as, "any page that has been used by the kernel prior to all free
* physical memory is taken by a pager, and any other page that has been granted
* so far by any such pager."
*/
int validate_granted_pages(unsigned long pfn, int npages)
{
/* FIXME: Fill this in */
return 0;
}
/*
* Used by a pager to grant memory to kernel for its own use. Generally
* this memory is used for thread creation and memory mapping, (e.g. new
* page tables, page middle directories, per-task kernel stack etc.)
*/
int sys_kmem_grant(struct syscall_args *regs)
{
unsigned long pfn = (unsigned long)regs->r0;
int npages = (int)regs->r1;
/*
* Check if given set of pages are outside the pages already
* owned by the kernel.
*/
if (validate_granted_pages(pfn, npages) < 0)
return -EINVAL;
/* Add the granted pages to the allocator */
if (pgalloc_add_new_grant(pfn, npages))
BUG();
return 0;
}
/* FIXME:
* The pager reclaims memory from the kernel whenever it thinks this is just.
*/
int sys_kmem_reclaim(struct syscall_args *regs)
{
BUG();
return 0;
}

139
src/api/thread.c Normal file
View File

@@ -0,0 +1,139 @@
/*
* Thread related system calls.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include INC_API(syscall.h)
#include <l4/api/thread.h>
#include <l4/api/errno.h>
#include <l4/generic/tcb.h>
#include <l4/lib/idpool.h>
#include <l4/generic/pgalloc.h>
int sys_thread_switch(struct syscall_args *regs)
{
sched_yield();
return 0;
}
int thread_suspend(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_suspend_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
return -EINVAL;
}
int thread_resume(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_resume_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
return -EINVAL;
}
int thread_start(struct task_ids *ids)
{
struct ktcb *task;
if ((task = find_task(ids->tid))) {
sched_start_task(task);
return 0;
}
printk("%s: Error: Could not find any thread with id %d to start.\n",
__FUNCTION__, ids->tid);
BUG();
return -EINVAL;
}
/*
* Creates a thread, with a new thread id, and depending on whether the space
* id exists, either adds it to an existing space or creates a new space.
*/
int thread_create(struct task_ids *ids)
{
struct ktcb *task, *new = (struct ktcb *)zalloc_page();
/* Visit all tasks to see if space ids match. */
list_for_each_entry(task, &global_task_list, task_list) {
/* Space ids match, can use existing space */
if (task->spid == ids->spid) {
BUG(); /* This is untested yet. */
goto spc_found;
}
}
/* No existing space with such id. Creating a new address space */
new->pgd = alloc_pgd();
/* Copies all bits that are fixed for all tasks. */
copy_pgd_kern_all(new->pgd);
/* Get new space id */
ids->spid = id_new(space_id_pool);
spc_found:
/* Get a new thread id */
ids->tid = id_new(thread_id_pool);
/* Set all ids */
set_task_ids(new, ids);
/* Set task state. */
new->state = TASK_INACTIVE;
/* Initialise ipc waitqueues */
waitqueue_head_init(&new->wqh_send);
waitqueue_head_init(&new->wqh_recv);
/* Add task to global hlist of tasks */
add_task_global(new);
return 0;
}
/*
* Creates, destroys and modifies threads. Also implicitly creates an address
* space for a thread that doesn't already have one, or destroys it if the last
* thread that uses it is destroyed.
*/
int sys_thread_control(struct syscall_args *regs)
{
u32 *reg = (u32 *)regs;
unsigned int action = reg[0];
struct task_ids *ids = (struct task_ids *)reg[1];
int ret = 0;
switch (action) {
case THREAD_CREATE:
ret = thread_create(ids);
break;
case THREAD_RUN:
ret = thread_start(ids);
break;
case THREAD_SUSPEND:
ret = thread_suspend(ids);
break;
case THREAD_RESUME:
ret = thread_resume(ids);
break;
default:
ret = -EINVAL;
}
return ret;
}

10
src/arch/arm/SConscript Normal file
View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception.c', 'bootdesc.c']
obj = env.Object(src_local)
Return('obj')

45
src/arch/arm/bootdesc.c Normal file
View File

@@ -0,0 +1,45 @@
/*
* Reading of bootdesc forged at build time.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/lib/string.h>
#include <l4/generic/kmalloc.h>
#include <l4/generic/space.h>
#include INC_ARCH(linker.h)
#include INC_ARCH(bootdesc.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(printascii.h)
#include INC_SUBARCH(mm.h)
struct bootdesc *bootdesc;
void copy_bootdesc()
{
struct bootdesc *new = kzalloc(bootdesc->desc_size);
memcpy(new, bootdesc, bootdesc->desc_size);
remove_mapping((unsigned long)bootdesc);
bootdesc = new;
}
void read_bootdesc(void)
{
/*
* End of the kernel image is where bootdesc resides. Note this is
* not added to the page_map because it's meant to be discarded.
*/
add_mapping(virt_to_phys(_end), (unsigned long)_end, PAGE_SIZE,
MAP_USR_DEFAULT_FLAGS);
/* Get original bootdesc */
bootdesc = (struct bootdesc *)_end;
/* Determine end of physical memory used by loaded images. */
for (int i = 0; i < bootdesc->total_images; i++)
if (bootdesc->images[i].phys_end > __svc_images_end)
__svc_images_end = bootdesc->images[i].phys_end;
}

218
src/arch/arm/exception.c Normal file
View File

@@ -0,0 +1,218 @@
/*
* Debug print support for unexpected exceptions
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include <l4/lib/printk.h>
#include <l4/api/ipc.h>
#include <l4/api/errno.h>
#include INC_PLAT(printascii.h)
#include INC_ARCH(exception.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(utcb.h)
#include INC_SUBARCH(mm.h)
/*
* NOTE: These are defined in libl4 headers for userspace. Syslib uses
* these as conventional mr offsets to store ipc-related data commonly needed
* for all ipc parties.
*/
#define MR_TAG 0
#define MR_SENDERID 1
#define MR_UNUSED_START 2
/* Send data fault ipc to the faulty task's pager */
void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
{
/* mr[0] has the fault tag. The rest is the fault structure */
u32 mr[MR_TOTAL] = { [MR_TAG] = L4_IPC_TAG_PFAULT,
[MR_SENDERID] = current->tid };
fault_kdata_t *fault = (fault_kdata_t *)&mr[MR_UNUSED_START];
/* Fill in fault information to pass over during ipc */
fault->faulty_pc = faulty_pc;
fault->fsr = fsr;
fault->far = far;
/* Write pte of the abort address, which is different on pabt/dabt */
if (is_prefetch_abort(fsr))
fault->pte = virt_to_pte(faulty_pc);
else
fault->pte = virt_to_pte(far);
/*
* System calls save arguments (and message registers) on the kernel
* stack. They are then referenced from the caller's ktcb. Here, the
* same ktcb reference is set to the fault data so it gives the effect
* as if the ipc to the pager has the fault data in the message
* registers saved on the kernel stack during an ipc syscall. Also this
* way fault does not need to modify the actual utcb MRs in userspace.
*/
/* Assign fault such that it overlaps as the MR0 reference in ktcb. */
current->syscall_regs = (syscall_args_t *)
((unsigned long)&mr[0] -
offsetof(syscall_args_t, r3));
/* Send ipc to the task's pager */
ipc_sendwait(current->pagerid);
/*
* Pager is now notified and handling the fault. We now sleep on
* another queue.
*/
}
int check_aborts(u32 faulted_pc, u32 fsr, u32 far)
{
int ret = 0;
if (is_prefetch_abort(fsr)) {
dprintk("Prefetch abort @ ", faulted_pc);
return 0;
}
switch (fsr & ARM_FSR_MASK) {
/* Aborts that are expected on page faults: */
case DABT_PERM_PAGE:
dprintk("Page permission fault @ ", far);
ret = 0;
break;
case DABT_XLATE_PAGE:
dprintk("Page translation fault @ ", far);
ret = 0;
break;
case DABT_XLATE_SECT:
dprintk("Section translation fault @ ", far);
ret = 0;
break;
/* Aborts that can't be handled by a pager yet: */
case DABT_TERMINAL:
dprintk("Terminal fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_VECTOR:
dprintk("Vector abort (obsolete!) @ ", far);
ret = -EINVAL;
break;
case DABT_ALIGN:
dprintk("Alignment fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_EXT_XLATE_LEVEL1:
dprintk("External LVL1 translation fault @ ", far);
ret = -EINVAL;
break;
case DABT_EXT_XLATE_LEVEL2:
dprintk("External LVL2 translation fault @ ", far);
ret = -EINVAL;
break;
case DABT_DOMAIN_SECT:
dprintk("Section domain fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_DOMAIN_PAGE:
dprintk("Page domain fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_PERM_SECT:
dprintk("Section permission fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_EXT_LFETCH_SECT:
dprintk("External section linefetch fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_EXT_LFETCH_PAGE:
dprintk("Page perm fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_EXT_NON_LFETCH_SECT:
dprintk("External section non-linefetch fault dabt @ ", far);
ret = -EINVAL;
break;
case DABT_EXT_NON_LFETCH_PAGE:
dprintk("External page non-linefetch fault dabt @ ", far);
ret = -EINVAL;
break;
default:
dprintk("FATAL: Unrecognised/Unknown data abort @ ", far);
dprintk("FATAL: FSR code: ", fsr);
ret = -EINVAL;
}
return ret;
}
/*
* @r0: The address where the program counter was during the fault.
* @r1: Contains the fault status register
* @r2: Contains the fault address register
*/
void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
{
set_abort_type(fsr, ARM_DABT);
dprintk("Data abort @ PC: ", faulted_pc);
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by any pager.\n");
goto error;
}
if (KERN_ADDR(faulted_pc))
goto error;
/* This notifies the pager */
fault_ipc_to_pager(faulted_pc, fsr, far);
return;
error:
disable_irqs();
dprintk("Unhandled data abort @ PC address: ", faulted_pc);
dprintk("FAR:", far);
dprintk("FSR:", fsr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while (1)
;
}
void prefetch_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
{
set_abort_type(fsr, ARM_PABT);
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by any pager.\n");
goto error;
}
fault_ipc_to_pager(faulted_pc, fsr, far);
return;
error:
disable_irqs();
while (1)
;
}
void dump_undef_abort(u32 undef_addr)
{
dprintk("Undefined instruction at address: ", undef_addr);
printascii("Halting system...\n");
}
extern int current_irq_nest_count;
/*
* This is called right where the nest count is increased in case the nesting
* is beyond the predefined max limit. It is another matter whether this
* limit is enough to guarantee the kernel stack is not overflown.
*/
void irq_overnest_error(void)
{
dprintk("Irqs nested beyond limit. Current count: ", current_irq_nest_count);
printascii("Halting system...\n");
while(1)
;
}

71
src/arch/arm/head.S Normal file
View File

@@ -0,0 +1,71 @@
/*
* ARM Kernel entry point
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_ARCH(asm.h)
#define C15_C0_M 0x0001 /* MMU */
#define C15_C0_A 0x0002 /* Alignment */
#define C15_C0_C 0x0004 /* (D) Cache */
#define C15_C0_W 0x0008 /* Write buffer */
#define C15_C0_B 0x0080 /* Endianness */
#define C15_C0_S 0x0100 /* System */
#define C15_C0_R 0x0200 /* ROM */
#define C15_C0_Z 0x0800 /* Branch Prediction */
#define C15_C0_I 0x1000 /* I cache */
#define C15_C0_V 0x2000 /* High vectors */
/*
* This is the entry point of the L4 ARM architecture.
* The boot loader must call _start with the processor in privileged
* mode and mmu disabled.
*/
.section .text.head
BEGIN_PROC(_start)
/* Setup status register for supervisor mode, interrupts disabled */
msr cpsr_fc, #ARM_MODE_SVC
/* Disable mmu if it is enabled */
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #C15_C0_M @ Disable MMU
bic r0, r0, #C15_C0_C @ Disable (D) Cache
bic r0, r0, #C15_C0_I @ Disable I cache
bic r0, r0, #C15_C0_W @ Disable Write buffer
mcr p15, 0, r0, c1, c0, 0
/* Setup boot stack (physical address) */
ldr sp, _kernel_init_stack
/* Exception stacks are defined in vector page */
msr cpsr_fc, #ARM_NOIRQ_ABT
ldr sp, _kernel_abt_stack
msr cpsr_fc, #ARM_NOIRQ_IRQ
ldr sp, _kernel_irq_stack
msr cpsr_fc, #ARM_NOIRQ_FIQ
ldr sp, _kernel_fiq_stack
msr cpsr_fc, #ARM_NOIRQ_UND
ldr sp, _kernel_und_stack
msr cpsr_fc, #ARM_NOIRQ_SVC
/* Jump to start_kernel */
bl start_kernel
/* Never reached */
1:
b 1b
_kernel_init_stack:
.word _bootstack_physical
/* Exception stacks are defined in vector page */
_kernel_abt_stack:
.word __abt_stack_high
_kernel_irq_stack:
.word __irq_stack_high
_kernel_fiq_stack:
.word __fiq_stack_high
_kernel_und_stack:
.word __und_stack_high

12
src/arch/arm/linker.c Normal file
View File

@@ -0,0 +1,12 @@
/*
* Any link-related marking variable that gets updated at runtime is listed here
*
* Copyright (C) 2007 Bahadir Balban
*/
/* The first free address after the last image loaded in physical memory */
unsigned long __svc_images_end;
/* The new boundaries of page tables after they're relocated */
unsigned long __pt_start;
unsigned long __pt_end;

34
src/arch/arm/syscall.S Normal file
View File

@@ -0,0 +1,34 @@
/*
* The syscall page.
*
* Exported to userspace, used merely for entering the kernel.
* Actual handling happens elsewhere.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_ARCH(asm.h)
.balign 4096
.section .data.syscalls
.global __syscall_page_start;
__syscall_page_start:
/* LR_USR is inspected to find out which system call. */
BEGIN_PROC(arm_system_calls)
swi 0x14 @ ipc /* 0x0 */
swi 0x14 @ thread_switch /* 0x4 */
swi 0x14 @ thread_control /* 0x8 */
swi 0x14 @ exchange_registers /* 0xc */
swi 0x14 @ schedule /* 0x10 */
swi 0x14 @ unmap /* 0x14 */
swi 0x14 @ space_control /* 0x18 */
swi 0x14 @ processor_control /* 0x1c */
swi 0x14 @ memory_control /* 0x20 */
swi 0x14 @ getid /* 0x24 */
swi 0x14 @ kread /* 0x28 */
swi 0x14 @ kmem_grant /* 0x2C */
swi 0x14 @ kmem_reclaim /* 0x30 */
END_PROC(arm_system_calls)

View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['mm.c', 'mmu_ops.S', 'mutex.S']
obj = env.Object(src_local)
Return('obj')

537
src/arch/arm/v5/mm.c Normal file
View File

@@ -0,0 +1,537 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/lib/mutex.h>
#include <l4/lib/string.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/space.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(printascii.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
/*
* These are indices into arrays with pgd_t or pmd_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) & 0x3FFC) / sizeof(pgd_t))
/* Strip out the page offset in this megabyte from a total of 256 pages. */
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) & 0x3FC) / sizeof (pmd_t))
/*
* Removes initial mappings needed for transition to virtual memory.
* Used one-time only.
*/
void remove_section_mapping(unsigned long vaddr)
{
pgd_table_t *pgd = current->pgd;
pgd_t pgd_i = PGD_INDEX(vaddr);
if (!((pgd->entry[pgd_i] & PGD_TYPE_MASK)
& PGD_TYPE_SECTION))
while(1);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
arm_invalidate_tlb();
}
/*
* Maps given section-aligned @paddr to @vaddr using enough number
* of section-units to fulfill @size in sections. Note this overwrites
* a mapping if same virtual address was already mapped.
*/
void __add_section_mapping_init(unsigned int paddr,
unsigned int vaddr,
unsigned int size,
unsigned int flags)
{
pte_t *ppte;
unsigned int l1_ptab;
unsigned int l1_offset;
/* 1st level page table address */
l1_ptab = virt_to_phys(&kspace);
/* Get the section offset for this vaddr */
l1_offset = (vaddr >> 18) & 0x3FFC;
/* The beginning entry for mapping */
ppte = (unsigned int *)(l1_ptab + l1_offset);
for(int i = 0; i < size; i++) {
*ppte = 0; /* Clear out old value */
*ppte |= paddr; /* Assign physical address */
*ppte |= PGD_TYPE_SECTION; /* Assign translation type */
/* Domain is 0, therefore no writes. */
/* Only kernel access allowed */
*ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
/* Cacheability/Bufferability flags */
*ppte |= flags;
ppte++; /* Next section entry */
paddr += ARM_SECTION_SIZE; /* Next physical section */
}
return;
}
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
unsigned int psection;
unsigned int vsection;
/* Align each address to the pages they reside in */
psection = paddr & ~ARM_SECTION_MASK;
vsection = vaddr & ~ARM_SECTION_MASK;
if(size == 0)
return;
__add_section_mapping_init(psection, vsection, size, flags);
return;
}
/* TODO: Make sure to flush tlb entry and caches */
void __add_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int flags, pmd_table_t *pmd)
{
unsigned int pmd_i = PMD_INDEX(vaddr);
pmd->entry[pmd_i] = paddr;
pmd->entry[pmd_i] |= PMD_TYPE_SMALL; /* Small page type */
pmd->entry[pmd_i] |= flags;
/* TODO: Is both required? Investigate */
/* TEST:
* I think cleaning or invalidating the cache is not required,
* because the entries in the cache aren't for the new mapping anyway.
* It's required if a mapping is removed, but not when newly added.
*/
arm_clean_invalidate_cache();
/* TEST: tlb must be flushed because a new mapping is present in page
* tables, and tlb is inconsistent with the page tables */
arm_invalidate_tlb();
}
/* Return whether a pmd associated with @vaddr is mapped on a pgd or not. */
pmd_table_t *pmd_exists(pgd_table_t *pgd, unsigned long vaddr)
{
unsigned int pgd_i = PGD_INDEX(vaddr);
/* Return true if non-zero pgd entry */
switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
case PGD_TYPE_COARSE:
return (pmd_table_t *)
phys_to_virt((pgd->entry[pgd_i] &
PGD_COARSE_ALIGN_MASK));
break;
case PGD_TYPE_FAULT:
return 0;
break;
case PGD_TYPE_SECTION:
dprintk("Warning, a section is already mapped "
"where a coarse page mapping is attempted:",
(u32)(pgd->entry[pgd_i]
& PGD_SECTION_ALIGN_MASK));
BUG();
break;
case PGD_TYPE_FINE:
dprintk("Warning, a fine page table is already mapped "
"where a coarse page mapping is attempted:",
(u32)(pgd->entry[pgd_i]
& PGD_FINE_ALIGN_MASK));
printk("Fine tables are unsupported. ");
printk("What is this doing here?");
BUG();
break;
default:
dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
BUG();
break;
}
return 0;
}
/* Convert a virtual address to a pte if it exists in the page tables. */
pte_t virt_to_pte_from_pgd(unsigned long virtual, pgd_table_t *pgd)
{
pmd_table_t *pmd = pmd_exists(pgd, virtual);
if (pmd)
return (pte_t)pmd->entry[PMD_INDEX(virtual)];
else
return (pte_t)0;
}
/* Convert a virtual address to a pte if it exists in the page tables. */
pte_t virt_to_pte(unsigned long virtual)
{
pmd_table_t *pmd = pmd_exists(current->pgd, virtual);
if (pmd)
return (pte_t)pmd->entry[PMD_INDEX(virtual)];
else
return (pte_t)0;
}
void attach_pmd(pgd_table_t *pgd, pmd_table_t *pmd, unsigned int vaddr)
{
u32 pgd_i = PGD_INDEX(vaddr);
u32 pmd_phys = virt_to_phys(pmd);
/* Domain is 0, therefore no writes. */
pgd->entry[pgd_i] = (pgd_t)pmd_phys;
pgd->entry[pgd_i] |= PGD_TYPE_COARSE;
}
/*
* Maps @paddr to @vaddr, covering @size bytes also allocates new pmd if
* necessary. This flavor explicitly supplies the pgd to modify. This is useful
* when modifying userspace of processes that are not currently running. (Only
* makes sense for userspace mappings since kernel mappings are common.)
*/
void add_mapping_pgd(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags,
pgd_table_t *pgd)
{
pmd_table_t *pmd;
unsigned int numpages = (size >> PAGE_BITS);
if (size < PAGE_SIZE) {
printascii("Error: Mapping size must be in bytes not pages.\n");
while(1);
}
if (size & PAGE_MASK)
numpages++;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all consecutive pages that cover given size */
for (int i = 0; i < numpages; i++) {
/* Check if another mapping already has a pmd attached. */
pmd = pmd_exists(pgd, vaddr);
if (!pmd) {
/*
* If this is the first vaddr in
* this pmd, allocate new pmd
*/
pmd = alloc_pmd();
/* Attach pmd to its entry in pgd */
attach_pmd(pgd, pmd, vaddr);
}
/* Attach paddr to this pmd */
__add_mapping(page_align(paddr),
page_align(vaddr), flags, pmd);
/* Go to the next page to be mapped */
paddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
}
#if 0
/* Maps @paddr to @vaddr, covering @size bytes,
* also allocates new pmd if necessary. */
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
pmd_table_t *pmd;
unsigned int numpages = (size >> PAGE_BITS);
if (size < PAGE_SIZE) {
printascii("Error: Mapping size must be in bytes not pages.\n");
while(1);
}
if (size & PAGE_MASK)
numpages++;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all consecutive pages that cover given size */
for (int i = 0; i < numpages; i++) {
/* Check if another vaddr in same pmd already
* has a pmd attached. */
pmd = pmd_exists(current->pgd, vaddr);
if (!pmd) {
/* If this is the first vaddr in
* this pmd, allocate new pmd */
pmd = alloc_boot_pmd();
/* Attach pmd to its entry in pgd */
attach_pmd(current->pgd, pmd, vaddr);
}
/* Attach paddr to this pmd */
__add_mapping(page_align(paddr),
page_align(vaddr), flags, pmd);
/* Go to the next page to be mapped */
paddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
}
#endif
#if 0
/* Maps @paddr to @vaddr, covering @size bytes,
* also allocates new pmd if necessary. */
void add_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
pmd_table_t *pmd;
unsigned int numpages = (size >> PAGE_BITS);
if (size < PAGE_SIZE) {
printascii("Error: Mapping size must be in bytes not pages.\n");
while(1);
}
if (size & PAGE_MASK)
numpages++;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all consecutive pages that cover given size */
for (int i = 0; i < numpages; i++) {
/* Check if another vaddr in same pmd already
* has a pmd attached. */
pmd = pmd_exists(current->pgd, vaddr);
if (!pmd) {
/* If this is the first vaddr in
* this pmd, allocate new pmd */
pmd = alloc_pmd();
/* Attach pmd to its entry in pgd */
attach_pmd(current->pgd, pmd, vaddr);
}
/* Attach paddr to this pmd */
__add_mapping(page_align(paddr),
page_align(vaddr), flags, pmd);
/* Go to the next page to be mapped */
paddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
}
#endif
void add_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
add_mapping_pgd(paddr, vaddr, size, flags, current->pgd);
}
/* FIXME: Empty PMDs should be returned here !!! */
void __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
{
pmd_t pmd_i = PMD_INDEX(vaddr);
switch (pmd->entry[pmd_i] & PMD_TYPE_MASK) {
case PMD_TYPE_LARGE:
pmd->entry[pmd_i] = 0;
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
break;
case PMD_TYPE_SMALL:
pmd->entry[pmd_i] = 0;
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
break;
default:
printk("Unknown page mapping in pmd. Assuming bug.\n");
BUG();
}
return;
}
void remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
{
pgd_t pgd_i = PGD_INDEX(vaddr);
pmd_table_t *pmd;
pmd_t pmd_i;
/*
* Clean the cache to main memory before removing the mapping. Otherwise
* entries in the cache for this mapping will cause tranlation faults
* if they're cleaned to main memory after the mapping is removed.
*/
arm_clean_invalidate_cache();
/* TEST:
* Can't think of a valid reason to flush tlbs here, but keeping it just
* to be safe. REMOVE: Remove it if it's unnecessary.
*/
arm_invalidate_tlb();
/* Return true if non-zero pgd entry */
switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
case PGD_TYPE_COARSE:
// printk("Removing coarse mapping @ 0x%x\n", vaddr);
pmd = (pmd_table_t *)
phys_to_virt((pgd->entry[pgd_i]
& PGD_COARSE_ALIGN_MASK));
pmd_i = PMD_INDEX(vaddr);
__remove_mapping(pmd, vaddr);
break;
case PGD_TYPE_FAULT:
dprintk("Attempting to remove fault mapping. "
"Assuming bug.\n", vaddr);
BUG();
break;
case PGD_TYPE_SECTION:
printk("Removing section mapping for 0x%lx",
vaddr);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
break;
case PGD_TYPE_FINE:
printk("Table mapped is a fine page table.\n"
"Fine tables are unsupported. Assuming bug.\n");
BUG();
break;
default:
dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
printk("Assuming bug.\n");
BUG();
break;
}
/* The tlb must be invalidated here because it might have cached the
* old translation for this mapping. */
arm_invalidate_tlb();
}
void remove_mapping(unsigned long vaddr)
{
remove_mapping_pgd(vaddr, current->pgd);
}
extern pmd_table_t *pmd_array;
/*
* Moves the section mapped kspace that resides far apart from kernel as close
* as possible to the kernel image, and unmaps the old 1MB kspace section which
* is really largely unused.
*/
void relocate_page_tables(void)
{
/* Adjust the end of kernel address to page table alignment. */
unsigned long pt_new = align_up(_end_kernel, sizeof(pgd_table_t));
unsigned long reloc_offset = (unsigned long)_start_kspace - pt_new;
unsigned long pt_area_size = (unsigned long)_end_kspace -
(unsigned long)_start_kspace;
BUG_ON(reloc_offset & (SZ_1K - 1))
/* Map the new page table area into the current pgd table */
add_mapping(virt_to_phys(pt_new), pt_new, pt_area_size,
MAP_IO_DEFAULT_FLAGS);
/* Copy the entire kspace area, i.e. the pgd + static pmds. */
memcpy((void *)pt_new, _start_kspace, pt_area_size);
/* Update the only reference to current pgd table */
current->pgd = (pgd_table_t *)pt_new;
/*
* Since pmd's are also moved, update the pmd references in pgd by
* subtracting the relocation offset from each valid pmd entry.
* TODO: This would be best done within a helper function.
*/
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
/* If there's a coarse 2nd level entry */
if ((current->pgd->entry[i] & PGD_TYPE_MASK)
== PGD_TYPE_COARSE)
current->pgd->entry[i] -= reloc_offset;
/* Update the pmd array pointer. */
pmd_array = (pmd_table_t *)((unsigned long)_start_pmd - reloc_offset);
/* Switch the virtual memory system into new area */
arm_clean_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(current->pgd));
arm_invalidate_tlb();
/* Unmap the old page table area */
remove_section_mapping((unsigned long)&kspace);
/* Update the page table markers to the new area. Any references would
* go to these markers. */
__pt_start = pt_new;
__pt_end = pt_new + pt_area_size;
printk("Initial page table area relocated from phys 0x%x to 0x%x\n",
virt_to_phys(&kspace), virt_to_phys(current->pgd));
}
/*
* Useful for upgrading to page-grained control over a section mapping:
* Remaps a section mapping in pages. It allocates a pmd, (at all times because
* there can't really be an already existing pmd for a section mapping) fills
* in the page information, and replaces the direct section physical translation
* with the address of the pmd. Flushes the caches/tlbs.
*/
void remap_as_pages(void *vstart, void *vend)
{
unsigned long pstart = virt_to_phys(vstart);
unsigned long pend = virt_to_phys(vend);
unsigned long paddr = pstart;
pgd_t pgd_i = PGD_INDEX(vstart);
pmd_t pmd_i = PMD_INDEX(vstart);
pgd_table_t *pgd = (pgd_table_t *)current->pgd;
pmd_table_t *pmd = alloc_pmd();
u32 pmd_phys = virt_to_phys(pmd);
int numpages = __pfn(page_align_up(pend) - pstart);
BUG_ON((unsigned long)vstart & ARM_SECTION_MASK);
BUG_ON(pmd_i);
/* Fill in the pmd first */
while (pmd_i < numpages) {
pmd->entry[pmd_i] = paddr;
pmd->entry[pmd_i] |= PMD_TYPE_SMALL; /* Small page type */
pmd->entry[pmd_i] |= space_flags_to_ptflags(MAP_SVC_DEFAULT_FLAGS);
paddr += PAGE_SIZE;
pmd_i++;
}
/* Fill in the type to produce a complete pmd translator information */
pmd_phys |= PGD_TYPE_COARSE;
/* Make sure memory is coherent first. */
arm_clean_invalidate_cache();
arm_invalidate_tlb();
/* Replace the direct section physical address with pmd's address */
pgd->entry[pgd_i] = (pgd_t)pmd_phys;
printk("Kernel area 0x%lx - 0x%lx remapped as %d pages\n",
(unsigned long)vstart, (unsigned long)vend, numpages);
}
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
unsigned long start_i = PGD_INDEX(start);
unsigned long end_i = PGD_INDEX(end);
unsigned long irange = (end_i != 0) ? (end_i - start_i)
: (PGD_ENTRY_TOTAL - start_i);
memcpy(&to->entry[start_i], &from->entry[start_i],
irange * sizeof(pgd_t));
}

155
src/arch/arm/v5/mmu_ops.S Normal file
View File

@@ -0,0 +1,155 @@
/*
* low-level mmu operations
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_ARCH(asm.h)
#define C15_id c0
#define C15_control c1
#define C15_ttb c2
#define C15_dom c3
#define C15_fsr c5
#define C15_far c6
#define C15_tlb c8
#define C15_C0_M 0x0001 /* MMU */
#define C15_C0_A 0x0002 /* Alignment */
#define C15_C0_C 0x0004 /* (D) Cache */
#define C15_C0_W 0x0008 /* Write buffer */
#define C15_C0_B 0x0080 /* Endianness */
#define C15_C0_S 0x0100 /* System */
#define C15_C0_R 0x0200 /* ROM */
#define C15_C0_Z 0x0800 /* Branch Prediction */
#define C15_C0_I 0x1000 /* I cache */
#define C15_C0_V 0x2000 /* High vectors */
/* FIXME: Make sure the ops that need r0 dont trash r0, or if they do,
* save it on stack before these operations.
*/
/*
* In ARM terminology, flushing the cache means invalidating its contents.
* Cleaning the cache means, writing the contents of the cache back to
* main memory. In write-back caches the cache must be cleaned before
* flushing otherwise in-cache data is lost.
*/
BEGIN_PROC(arm_set_ttb)
mcr p15, 0, r0, C15_ttb, c0, 0
mov pc, lr
END_PROC(arm_set_ttb)
BEGIN_PROC(arm_get_domain)
mrc p15, 0, r0, C15_dom, c0, 0
mov pc, lr
END_PROC(arm_get_domain)
BEGIN_PROC(arm_set_domain)
mcr p15, 0, r0, C15_dom, c0, 0
mov pc, lr
END_PROC(arm_set_domain)
BEGIN_PROC(arm_enable_mmu)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_M
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_mmu)
BEGIN_PROC(arm_enable_icache)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_I
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_icache)
BEGIN_PROC(arm_enable_dcache)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_C
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_dcache)
BEGIN_PROC(arm_enable_wbuffer)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_W
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_wbuffer)
BEGIN_PROC(arm_enable_high_vectors)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_V
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_high_vectors)
BEGIN_PROC(arm_invalidate_cache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c7 @ Flush I cache and D cache
mov pc, lr
END_PROC(arm_invalidate_cache)
BEGIN_PROC(arm_invalidate_icache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c5, 0 @ Flush I cache
mov pc, lr
END_PROC(arm_invalidate_icache)
BEGIN_PROC(arm_invalidate_dcache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c6, 0 @ Flush D cache
mov pc, lr
END_PROC(arm_invalidate_dcache)
BEGIN_PROC(arm_clean_dcache)
mcr p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
bne arm_clean_dcache
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
END_PROC(arm_clean_dcache)
BEGIN_PROC(arm_clean_invalidate_dcache)
1:
mrc p15, 0, pc, c7, c14, 3 @ Test/clean/flush dcache line
@ COMMENT: Why use PC?
bne 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
END_PROC(arm_clean_invalidate_dcache)
BEGIN_PROC(arm_clean_invalidate_cache)
1:
mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
@ COMMENT: Why use PC?
bne 1b
mcr p15, 0, ip, c7, c5, 0 @ Flush icache
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
END_PROC(arm_clean_invalidate_cache)
BEGIN_PROC(arm_drain_writebuffer)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c10, 4
mov pc, lr
END_PROC(arm_drain_writebuffer)
BEGIN_PROC(arm_invalidate_tlb)
mcr p15, 0, ip, c8, c7
mov pc, lr
END_PROC(arm_invalidate_tlb)
BEGIN_PROC(arm_invalidate_itlb)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c8, c5, 0
mov pc, lr
END_PROC(arm_invalidate_itlb)
BEGIN_PROC(arm_invalidate_dtlb)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c8, c6, 0
mov pc, lr
END_PROC(arm_invalidate_dtlb)

90
src/arch/arm/v5/mutex.S Normal file
View File

@@ -0,0 +1,90 @@
/*
* ARM v5 Binary semaphore (mutex) implementation.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include INC_ARCH(asm.h)
/* Recap on swp:
* swp rx, ry, [rz]
* In one instruction:
* 1) Stores the value in ry into location pointed by rz.
* 2) Loads the value in the location of rz into rx.
* By doing so, in one instruction one can attempt to lock
* a word, and discover whether it was already locked.
*/
#define MUTEX_UNLOCKED 0
#define MUTEX_LOCKED 1
BEGIN_PROC(__spin_lock)
mov r1, #1
__spin:
swp r2, r1, [r0]
cmp r2, #0
bne __spin
mov pc, lr
END_PROC(__spin_lock)
BEGIN_PROC(__spin_unlock)
mov r1, #0
swp r2, r1, [r0]
cmp r2, #1 @ Debug check.
1:
bne 1b
mov pc, lr
END_PROC(__spin_unlock)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_lock)
mov r1, #1
swp r2, r1, [r0]
cmp r2, #0
movne r0, #0
moveq r0, #1
mov pc, lr
END_PROC(__mutex_lock)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_unlock)
mov r1, #0
swp r2, r1, [r0]
cmp r2, #1
1: @ Debug check.
bne 1b
mov pc, lr
END_PROC(__mutex_unlock)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_inc)
swp r2, r1, [r0]
mov r1, #1
swp r2, r1, [r0]
cmp r2, #0
movne r0, #0
moveq r0, #1
mov pc, lr
END_PROC(__mutex_inc)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_dec)
mov r1, #0
swp r2, r1, [r0]
cmp r2, #1
1: @ Debug check.
bne 1b
mov pc, lr
END_PROC(__mutex_dec)

7
src/arch/arm/v6/mm.c Normal file
View File

@@ -0,0 +1,7 @@
/*
*
* Copyright Bahadir Balban (C) 2005
*
*/

View File

710
src/arch/arm/vectors.S Normal file
View File

@@ -0,0 +1,710 @@
/*
* The vectors page. Includes all exception handlers.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_ARCH(asm.h)
.balign 4096
.section .data.vectors
__vector_vaddr:
BEGIN_PROC(arm_high_vector)
b arm_reset_exception
b arm_undef_exception
b arm_swi_exception
b arm_prefetch_abort_exception_reentrant
b arm_data_abort_exception_reentrant
nop
b arm_irq_exception_reentrant_with_schedule
b arm_fiq_exception
END_PROC(arm_high_vector)
.balign 4
/*
* vect_reset
*
* Upon Entry:
* - All registers are undefined and insignificant,
* - FIQ/IRQs are disabled.
* - PC: 0x00000000
*
*
* PURPOSE:
* CPU always starts executing from this vector
* upon a HW reset. It may also be used as a SW reset.
*/
BEGIN_PROC(arm_reset_exception)
END_PROC(arm_reset_exception)
/*
* vect_undef
*
* Upon Entry:
* - R14: Address of next instruction after undefined instruction
* - PC: 0x00000004
* - IRQs are disabled (CPSR[7] = 1)
*
*
* PURPOSE:
* A co-processor instruction not supported by the core can be
* emulated here. Also unrecognised/invalid instructions are handled.
*/
BEGIN_PROC(arm_undef_exception)
sub lr, lr, #4
mov r0, lr @ Get undefined abort address
mov r5, lr @ Save it in r5 in case r0 is trashed
mov lr, pc @ Save return address
ldr pc, =dump_undef_abort
1:
b 1b
END_PROC(arm_undef_exception)
.macro disable_irqs rx
mrs \rx, cpsr_fc
orr \rx, #ARM_IRQ_BIT
msr cpsr_fc, \rx
.endm
.macro enable_irqs rx
mrs \rx, cpsr_fc
bic \rx, #ARM_IRQ_BIT
msr cpsr_fc, \rx
.endm
/* Only works in SVC MODE. Know what you are doing! */
.macro get_current rx
bic \rx, sp, #0xFF0
bic \rx, \rx, #0xF
.endm
/* Saves the address of system call argument registers pushed to stack
* to the current task's ktcb. */
.macro ktcb_ref_saved_regs regs_addr, ktcb, regs_off
get_current \ktcb
ldr \regs_off, =syscall_regs_offset
ldr \regs_off, [\regs_off]
str \regs_addr, [\ktcb, \regs_off]
.endm
/*
* vect_swi
*
* Upon Entry:
* - R14: Address of next instruction after the SWI
* - PC: 0x00000008
* - R0-R12: Depending on the system call some of them contain
* indicators of what the exception means.
* - IRQs are disabled (CPSR[7] = 1)
* - SWI instruction's bits [7:0] may contain SWI indicator
*
* PURPOSE:
* Used for trapping into a debugger or OS kernel via system calls.
* Argument registers from R0 up to R12 and [7:0] of the causing SWI
* instruction contains hints of what to do with this exception. What
* R0-R12 contains depends on what userspace has put in them. Note this
* is the only exception that userspace can generate and thus has control
* on what it put into r0-rx.
*
* RECAP:
* Normally across a function call, only r0-r3 are used for passing parameters.
* Why r0-r3 only but not r4, r5...? See APCS (ARM procedure call standard)
* Short answer: r4-r12 must be preserved across procedures but r0-r3 can be
* trashed because they're set aside for argument passing. Arguments more than 4
* go on the stack. Note APCS is a *suggestion*, rather than enforcement. So if
* a userspace stub library is created that say, preserves and uses r0-r9 for a
* system call, and the system call handler (this) knows about it, it is a
* perfectly valid setup. In fact this is what we do here, we don't strictly use
* r0-r3. Depending on the system call, the set of input registers (and output
* registers to return results from the system call) may be redefined. These are
* documented for each system call in the reference manual.
* Another caveat to note in SWI usage is that we use the address offset of the
* SWI instruction to see which offset it has in the system call vector, to
* determine the correct system call, rather than [7:0] bits of the SWI.
*/
BEGIN_PROC(arm_swi_exception)
sub lr, lr, #4 @ Get address of swi instruction user executed.
stmfd sp, {r0-r8,sp,lr}^ @ Push arguments, LR_USR and SP_USR to stack.
nop
@ NOTE: SP_USR MUST be pushed here, otherwise a kernel preemption could
@ cause user mode of another process to overwrite SP_USR. The reason we
@ save it here is because the preemption path does not currently save it
@ if it is a kernel preemption. User SP can also be used here, as the
@ user might have pushed data to its stack to be used by system calls.
@ But we dont plan to pass data to kernel in this way, so saving of
@ SP_USR can be done in preemption path as an optimisation.
/*
* The LR_usr is important here, because the user application uses a BL
* to jump to the system call SWI, so the LR_usr contains the return
* address, i.e. the next instruction after the *jumping* instruction to
* the system call SWI (not the one after the swi itself, which is in
* LR_svc).
*/
sub sp, sp, #44 @ stmfd on user registers can't writeback the SP. We do it manually.
mrs r0, spsr_fc @ psr also need saving in case this context is interrupted.
stmfd sp!, {r0}
enable_irqs r0
add r0, sp, #4 @ Pass sp address + 4 as a pointer to saved regs.
ktcb_ref_saved_regs r0, r1, r2 @ Save regs pointer in ktcb
mov r1, lr @ Pass swi instruction address in LR as arg1
mov lr, pc
ldr pc, =syscall
disable_irqs r1 @ Not disabling irqs at this point causes the SP_USR and spsr
@ to get corrupt causing havoc.
ldmfd sp!, {r1}
msr spsr, r1
add sp, sp, #4 @ Skip, r0's location, since r0 already has returned result.
@ Note we're obliged to preserve at least r3-r8 because they're MRs.
ldmfd sp!, {r1-r8} @ Restore r1-r8 pushed to stack earlier. r0 already has return result.
ldmfd sp, {sp}^ @ Restore user stack pointer, which might have been corrupt on preemption
nop
add sp, sp, #4 @ Update sp.
ldmfd sp!, {lr} @ Load userspace return address
movs pc, lr
END_PROC(arm_swi_exception)
/* Minimal abort state saved on data abort stack right after abort vector enters: */
#define ABT_R0 0
#define ABT_SPSR -4
#define ABT_R14 -8
/* Minimal prefetch abort state saved on abort stack upon entry. */
#define ABT_R0 0
#define ABT_SPSR -4
#define ABT_R14 -8
/* Depending on the SPSR condition determines whether irqs should be enabled
* during abort handling. If abort occured in userspace it orders irqs
* should be enabled. Else if irqs come from kernel mode, it orders irqs are
* enabled only if they were alreday enabled before the abort. */
.macro can_abort_enable_irqs temp1, r_spsr
and \temp1, \r_spsr, #ARM_MODE_MASK
cmp \temp1, #ARM_MODE_USR @ Usermode indicates irqs can be enabled.
beq 1f @ Z flag set. Which indicates "can enable"
and \temp1, \r_spsr, #ARM_IRQ_BIT @ Clear irq bit indicates irqs were enabled
cmp \temp1, #0 @ before the abort and can be safely enabled.
1: @ Z flag must be set for "can enable" here.
.endm
/* Pushes the user sp and lr to stack, updates the stack pointer */
.macro push_user_sp_lr sp
@ stack state: (Low) |..|..|->(Original)| (High)
stmfd \sp, {sp, lr}^ @ Push USR banked regs to stack.
nop @ Need a NOOP after push/popping user registers.
@ stack state: (Low) |SP_USR|LR_USR|->(Original)| (High)
sub \sp, \sp, #8 @ Adjust SP, since stack op on banked regs is no writeback.
@ stack state: (Low) |->SP_USR|LR_USR|(Original)| (High)
.endm
/*
* vect_pabt
*
* Upon Entry:
* - R14_svc: Address of next instruction after aborted instruction
* - R14_usr: Address of return instruction in last function call**
* - PC: 0x0000000c
* - IRQs are disabled (CPSR[7] = 1)
*
*
* PURPOSE:
* Used for handling instructions that caused *memory aborts* during
* the *prefetching* of the instruction. The instruction is also marked
* as invalid by the core. It handles the cause for the memory abort.
*
* (One reason why a memory abort would occur is when we were entering
* into a new page region that contained executable code and was not
* present in memory, or its physical-to-virtual translation was not
* present in the page tables. See other causes for memory aborts)
*
* **In case abort occured in userspace. This is useful if the abort
* was due to a null/invalid function pointer call. Since R14_abt
* includes the aborting instruction itself, R14_usr gives the clue to
* where this call came from.
*/
BEGIN_PROC(arm_prefetch_abort_exception_reentrant)
sub lr, lr, #4 @ lr-4 points at aborted instruction
str lr, [r13, #ABT_R14] @ Store abort address.
mrs lr, spsr @ Get SPSR
str lr, [r13, #ABT_SPSR] @ Store SPSR
str r0, [r13, #ABT_R0] @ Store R0 to use as temp register.
mov r0, r13 @ SP to R0
mrs lr, cpsr @ Change to SVC mode.
bic lr, #ARM_MODE_MASK
orr lr, lr, #ARM_MODE_SVC
msr cpsr_fc, r14
@ FIXME: Ensure 8-byte stack here.
str lr, [sp, #-8]! @ NOTE: Switched mode! Save LR_SVC 2 words down from SP_SVC.
transfer_pabt_state_to_svc: @ Move data saved on PABT stack to SVC stack.
ldr lr, [r0, #ABT_R14]
str lr, [sp, #4]
@ Stack state: |LR_SVC<-|LR_PABT|{original SP_SVC}|
ldr lr, [r0, #ABT_SPSR]
ldr r0, [r0, #ABT_R0]
stmfd sp!, {r0-r3,r12,lr}
@ Stack state: |R0<-|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
push_user_sp_lr sp
@ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
read_pabt_state:
mrc p15, 0, r1, c5, c0, 0 @ Read FSR (Tells why the fault occured) FIXME: Do we need this in pabt?
mrc p15, 0, r2, c6, c0, 0 @ Read FAR (Contains the faulted data address) Do we need this in pabt?
@ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
ldr r3, [sp, #28] @ Load PABT_SPSR
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
bne 1f @ Branch here based on previous irq judgement.
enable_irqs r3
1:
ldr r0, [sp, #36] @ Load LR_PABT saved previously.
mov lr, pc
ldr pc, =prefetch_abort_handler @ Jump to function outside this page.
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
@ (i.e. an interrupt could overwrite spsr with current psr)
ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
nop @ User reg mod requires nop
add sp, sp, #8 @ Update SP.
ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
msr spsr_cxsf, r14 @ Restore spsr register from lr.
@ Stack state: |LR_SVC<-|LR_PREV(PABT)|{original SP_SVC}|
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
@ and pc gets lr_dabt. Saved at #4 and #8 offsets
@ down from where svc stack had left.
END_PROC(arm_prefetch_abort_exception_reentrant)
/*
* vect_dabt
*
* Upon Entry:
* - R14_abt: Address of next instruction after aborted instruction
* - PC: 0x00000010
* - IRQs are disabled (CPSR[7] = 1)
*
*
* PURPOSE:
* Used for handling instructions that caused *memory aborts* during
* the *execution* of the current instruction. This may happen if the
* instruction accessed a memory address (e.g LDR/STR) that is not
* defined as part of the currently executing process (aka illegal
* access). Another possibility is the address is within the address
* space of the process, but it is not mapped, i.e. does not have
* physical-to-virtual translation entry in the page tables.
*/
BEGIN_PROC(arm_data_abort_exception)
sub lr, lr, #8 @ lr-8 points at aborted instruction
mrc p15, 0, r2, c5, c0, 0 @ Read FSR
mrc p15, 0, r1, c6, c0, 0 @ Read FAR
mov r0, lr @ Get data abort address
mov r5, lr @ Save it in r5 in case r0 will get trashed
mov lr, pc @ Save return address
ldr pc, =data_abort_handler @ Jump to function outside this page.
1:
b 1b
END_PROC(arm_data_abort_exception)
/*
* The method of saving abort state to svc stack is identical with that of
* reentrant irq vector. Natural to this, Restoring of the previous state
* is also identical.
*/
BEGIN_PROC(arm_data_abort_exception_reentrant)
sub lr, lr, #8 @ Get abort address
str lr, [r13, #ABT_R14] @ Store abort address
mrs lr, spsr @ Get SPSR
str lr, [r13, #ABT_SPSR] @ Store SPSR
str r0, [r13, #ABT_R0] @ Store r0
@ NOTE: Can increase data abort nest here.
mov r0, r13 @ Keep current sp point in R0
mrs lr, cpsr @ Change to SVC mode.
bic lr, #ARM_MODE_MASK
orr lr, lr, #ARM_MODE_SVC
msr cpsr_fc, r14
@ FIXME: Ensure 8-byte stack here.
str lr, [sp, #-8]! @ Save lr_svc 2 words down from interrupted SP_SVC
transfer_dabt_state_to_svc:
ldr lr, [r0, #ABT_R14]
str lr, [sp, #4]
@ Stack state: |LR_SVC<-|LR_DABT|{original SP_SVC}|
ldr lr, [r0, #ABT_SPSR]
ldr r0, [r0, #ABT_R0]
stmfd sp!, {r0-r3,r12,lr}
@ Stack state: |R0<-|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
push_user_sp_lr sp
@ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
read_dabt_state:
mrc p15, 0, r1, c5, c0, 0 @ Read FSR (Tells why the fault occured)
mrc p15, 0, r2, c6, c0, 0 @ Read FAR (Contains the faulted data address)
@ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
ldr r3, [sp, #28] @ Load DABT_SPSR
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
bne 1f @ Branch here based on previous irq judgement.
enable_irqs r3
1:
ldr r0, [sp, #36] @ Load LR_DABT saved previously.
mov lr, pc
ldr pc, =data_abort_handler @ Jump to function outside this page.
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
nop @ User reg mod requires nop
add sp, sp, #8 @ Update SP.
ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
msr spsr_cxsf, r14 @ Restore spsr register from lr.
@ Stack state: |LR_SVC<-|LR_PREV(DABT)|{original SP_SVC}|
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
@ and pc gets lr_dabt. Saved at #4 and #8 offsets
@ down from where svc stack had left.
END_PROC(arm_data_abort_exception_reentrant)
/*
* vect_irq
*
* Upon Entry:
* - R14: Address of next instruction after interrupted instruction.
* - PC: 0x00000018
* - IRQs are disabled (CPSR[7] = 1)
* - A vectored interrupt controller would also provide where to jump in
* order to handle the interrupt, or an irq controller in general would
* provide registers that indicate what kind of interrupt has occured.
*
*
* PURPOSE:
* Used for handling IRQs. IRQs have lower priority compared to other
* types of exceptions.
*/
/* The most basic handler where neither context switching nor re-entry can occur. */
BEGIN_PROC(arm_irq_exception_basic)
sub lr, lr, #4
stmfd sp!, {r0-r3,lr}
mov lr, pc
ldr pc, =do_irq
ldmfd sp!, {r0-r3, pc}^
END_PROC(arm_irq_exception)
/* Minimal IRQ state saved on irq stack right after irq vector enters: */
#define IRQ_R0 0
#define IRQ_SPSR -4
#define IRQ_R14 -8
/* A reentrant handler that uses svc mode stack to prevent banked lr_irq corruption. */
BEGIN_PROC(arm_irq_exception_reentrant)
sub lr, lr, #4
@ Save minimal state to irq stack:
str r14, [r13, #IRQ_R14] @ Save lr_irq
mrs r14, spsr @ Copy spsr
str r14, [r13, #IRQ_SPSR] @ Save spsr on irq stack
str r0, [r13, #IRQ_R0] @ Save r0.
mov r0, r13 @ Using r0 to keep banked sp_irq when mode is switched.
mrs r14, cpsr @ Get current psr (irq)
bic r14, #ARM_MODE_MASK @ Clear mode part from psr
orr r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
msr cpsr_fc, r14 @ Change to SVC mode.
str r14, [r13, #-8]! @ Save lr_svc 2 words down from where svc stack left.
@ Transfer minimal irq state saved to svc stack:
ldr r14, [r0, #IRQ_R14] @ Load lr_irq to lr using r0 that contains sp_irq.
str r14, [r13, #4] @ Save lr_irq 1 word down from where svc stack left.
ldr r14, [r0, #IRQ_SPSR] @ Load irq spsr.
ldr r0, [r0, #IRQ_R0] @ Restore r0.
stmfd sp!, {r0-r3,r12,lr} @ Save all of rest of irq context to svc stack.
bl do_irq @ Read irq number etc. Free to re-enable irqs here.
ldmfd sp!, {r0-r3-r12,lr} @ Restore previous context. (note, lr has spsr)
msr spsr_cxsf, lr @ Restore spsr register from lr.
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
@ and pc gets lr_irq. Saved at #4 and #8 offsets
@ down from where svc stack had left.
END_PROC(arm_irq_exception_reentrant)
.macro was_irq_mode rx
mrs rx, spsr_fc
and rx, rx, 0x1F
cmp rx, #ARM_MODE_IRQ
.endm
.macro need_resched rx, ry
get_current \rx
ldr \ry, =need_resched_offset
ldr \ry, [\ry]
ldr \ry, [\rx, \ry]
cmp \ry, #1
.endm
/*
* Keeps the PSR of the last pre-empted process. This helps to tell
* what mode the process was in when it was preempted.
*/
.global preempted_psr;
preempted_psr:
.word 0
/* Keeps track of how many nests of irqs have happened. */
.global current_irq_nest_count;
current_irq_nest_count:
.word 0
#define IRQ_NESTING_MAX 15
.macro inc_irq_cnt_with_overnest_check rx, ry
ldr \rx, =current_irq_nest_count @ Load the irq nest status word.
ldr \ry, [\rx]
add \ry, \ry, #1 @ No need for atomic inc since irqs are disabled.
str \ry, [\rx]
cmp \ry, #IRQ_NESTING_MAX @ Check no more than max nests, and die miserably if so.
ldrge pc, =irq_overnest_error
.endm
@ This decrement need not be atomic because if you are *decrementing* this, then it means
@ Preemption is already *disabled*. Ruling out preemption, only race could be against irqs.
@ If an irq preempts it during decrement and modifies it, it is still responsible to change
@ it back to the original value as it was when we read it, before it returns. So effectively
@ anything that runs during the decrement does not affect the value of the count.
.macro dec_irq_nest_cnt rx, ry
ldr \ry, =current_irq_nest_count
ldr \rx, [\ry]
sub \rx, \rx, #1
str \rx, [\ry]
.endm
.macro in_process_context rx
ldr \rx, =current_irq_nest_count
ldr \rx, [\rx]
cmp \rx, #0
.endm
/* If interrupted a process (as opposed to another irq), saves spsr value to preempted_psr */
.macro cmp_and_save_process_psr rx, process_psr
in_process_context \rx @ If nest count is 0, a running process is preempted.
ldreq \rx, =preempted_psr
streq \process_psr, [\rx]
.endm
.macro is_psr_usr rx
and \rx, \rx, #ARM_MODE_MASK
cmp \rx, #ARM_MODE_USR
.endm
#define CONTEXT_PSR 0
#define CONTEXT_R0 4
#define CONTEXT_R1 8
#define CONTEXT_R2 12
#define CONTEXT_R3 16
#define CONTEXT_R4 20
#define CONTEXT_R5 24
#define CONTEXT_R6 28
#define CONTEXT_R7 32
#define CONTEXT_R8 36
#define CONTEXT_r9 40
#define CONTEXT_R10 44
#define CONTEXT_R11 48
#define CONTEXT_R12 52
#define CONTEXT_R13 56
#define CONTEXT_R14 60
#define CONTEXT_PC 64
BEGIN_PROC(arm_irq_exception_reentrant_with_schedule)
sub lr, lr, #4
str lr, [r13, #IRQ_R14] @ Save lr_irq
mrs r14, spsr @ Copy spsr
str r14, [r13, #IRQ_SPSR] @ Save spsr on irq stack
str r0, [r13, #IRQ_R0] @ Save r0.
cmp_and_save_process_psr r0, r14 @ R14 should have spsr here.
inc_irq_cnt_with_overnest_check r0, r14
mov r0, r13 @ Using r0 to keep banked sp_irq when mode is switched.
mrs r14, cpsr @ Get current psr (irq)
bic r14, #ARM_MODE_MASK @ Clear mode part from psr
orr r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
msr cpsr_fc, r14 @ Change to SVC mode.
@ FIXME: Ensure 8-byte aligned stack here! Make sure to restore original state later!
str r14, [r13, #-8]! @ Save lr_svc 2 words down from where svc stack left. SP updated.
@ Transfer minimal irq state to svc stack:
ldr r14, [r0, #IRQ_R14] @ Load lr_irq to lr using r0 that contains sp_irq.
str r14, [r13, #4] @ Save lr_irq 1 word down from where svc stack left.
ldr r14, [r0, #IRQ_SPSR] @ Load irq spsr.
ldr r0, [r0, #IRQ_R0] @ Restore r0.
stmfd sp!, {r0-r3,r12,lr} @ Save all of rest of irq context to svc stack.
mov lr, pc
ldr pc, =do_irq @ Read irq number etc. Free to re-enable irqs here.
@ stack state: (Low) r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ| (High)
ldr r0, =current_irq_nest_count
ldr r0, [r0]
cmp r0, #1 @ Expect 1 as lowest since each irq increase preempt cnt by 1.
bgt return_to_prev_context @ if (irq_nest > 1) return_to_prev_context();
need_resched r0, r1 @ if (irq_nest == 1 && need_resched) schedule();
beq preemption_path @ if (irq_nest == 1 && !need_resched) return_to_prev_context();
return_to_prev_context:
dec_irq_nest_cnt r0, r1
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
msr spsr_cxsf, r14 @ Restore spsr register from lr.
@ stack state: (Low) |LR_SVC<-|LR_PREV(IRQ)|{original SP_SVC}| (High)
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
@ and pc gets lr_irq. Saved at #4 and #8 offsets
@ down from where svc stack had left.
preemption_path:
disable_irqs r0 @ Interrupts can corrupt stack state.
get_current r0 @ Get the interrupted process
@ stack state: (Low) |->r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ(<return_address>)| (High)
save_interrupted_context:
add sp, sp, #4
@ stack state: (Low) |r0|->r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ(<return_address>)| (High)
ldmfd sp!, {r1-r3, r12, lr}
@ stack state: (Low) |r0|..|..|..|..|..|->LR_SVC|LR_IRQ(<return_address>)| (High)
str lr, [r0, #CONTEXT_PSR]
is_psr_usr lr
add r0, r0, #CONTEXT_R1 @ Points at register save location for #CONTEXT_R1
stmia r0!, {r1-r12}
ldmfd sp!, {r1-r2} @ At this point SP_SVC is at its original svc location.
@ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
@ register state: r0 = (register save loc for #CONTEXT_R13) r1 = LR_SVC, r2 = LR_IRQ
beq save_usr_context
save_svc_context:
stmib r0, {r1-r2} @ Save LR_SVC and LR_RETURN in advancing locations.
str sp, [r0] @ Current sp is where sp_svc has left, and r0 at #CONTEXT_SP loc.
sub r0, r0, #CONTEXT_R13 @ Go back to first word from SP position.
ldr r1, [sp, #-32] @ Load r0 from stack
str r1, [r0, #CONTEXT_R0] @ Save r0
b prepare_schedule @ All registers saved.
save_usr_context:
sub r0, r0, #CONTEXT_R13
str r2, [r0, #CONTEXT_PC] @ Save Program counter
@ LR_SVC need restoring because it won't be pushed to context frame. SP_SVC is already up-to-date.
mov lr, r1
stmfd sp, {sp, lr}^ @ Push USR banked regs to stack.
@ stack state: (Low) |r0|..|..|..|..|..|SP_USR|LR_USR|->(Original)| (High)
nop @ Need a NOP after twiddling with usr registers.
sub sp, sp, #8 @ Adjust SP, since stack op on banked regs is no writeback.
@ stack state: (Low) |r0|..|..|..|..|..|->SP_USR|LR_USR|(Original)| (High)
ldmfd sp!, {r1-r2} @ Pop USR Banked regs.
@ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
str r1, [r0, #CONTEXT_R13] @ Save SP_USR to context frame.
str r2, [r0, #CONTEXT_R14] @ Save LR_USR to context frame.
ldr r1, [sp, #-32]
str r1, [r0, #CONTEXT_R0]
@ stack state: (Low) |..|..|..|..|..|..|..|..|->(Original)| (High)
prepare_schedule:
ldr pc, =schedule
END_PROC(arm_irq_exception_reentrant_with_schedule)
/*
* Context switch implementation.
*
* Upon entry:
*
* - r0 = current ktcb ptr, r1 = next ktcb ptr. r2 and r3 = insignificant.
* - The current mode is always SVC, but the call may be coming from interrupt
* or process context.
* - If coming from interrupt, the interrupted context is already copied to current
* ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
*
* PURPOSE: Handles all paths from irq exception, thread_switch system call,
* and sleeping in the kernel.
*
* NOTES:
* - If coming from interrupt, the interrupted context is already copied to current
* ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
* - If coming from a process context, the current process context need saving here.
* - From irq contexts, preemption is disabled, i.e. preemption count is 1. This is because
* irqs naturally increase preemption count. From process context preemption count is 0.
* Process context disables preemption during schedule(), but re-enables before calling
* switch_to(). Irq and process contexts are distinguished by preemption_count.
* Furthermore, irqs are also disabled shortly before calling switch_to() from both contexts.
* This happens at points where stack state would be irrecoverable if an irq occured.
*/
BEGIN_PROC(switch_to)
in_process_context r2 @ Note this depends on preempt count being 0.
beq save_process_context @ Voluntary switch needs explicit saving of current state.
dec_irq_nest_cnt r2, r3 @ Soon leaving irq context, so reduce preempt count here.
b load_next_context @ Interrupted context already saved by irq handler.
save_process_context: @ Voluntary process schedules enter here:
mrs r2, cpsr_fc
str r2, [r0]
stmib r0, {r0-r14} @ Voluntary scheduling always in SVC mode, so using svc regs.
str r14, [r0, #CONTEXT_PC] @ Store R15 as R14. R14 has return address for switch_to().
load_next_context:
@ stack state: (Low) |..|..|..|..|..|..|..|..|..|->(Original)| (High)
mov sp, r1
ldr r0, [sp, #CONTEXT_PSR] @ Load r0 with SPSR
bic r0, r0, #ARM_IRQ_BIT @ Enable irqs on will-be-restored context.
msr spsr_fcxs, r0 @ Restore spsr from r0.
is_psr_usr r0
bne load_next_context_svc @ Loading user context is different than svc.
load_next_context_usr:
ldmib sp, {r0-r14}^ @ Load all including banked user regs.
ldr lr, [sp, #CONTEXT_PC] @ Load value of PC to r14
orr sp, sp, #0xFF0
orr sp, sp, #0x8 @ 8-byte aligned.
movs pc, lr @ Jump to user changing modes.
load_next_context_svc:
ldmib sp, {r0-r15}^ @ Switch to svc context and jump, loading R13 and R14 from stack.
@ This is OK since the jump is to current context.
END_PROC(switch_to)
/*
* vect_fiq
*
* Upon Entry:
* - R14: Address of next instruction after interrupted instruction.
* - PC: 0x00000014
* - FIQs are disabled (CPSR[6] = 1)
* - IRQs are disabled (CPSR[7] = 1)
* - As in IRQ, the irq controller would provide registers that indicate
* what kind of interrupt has occured.
*
* PURPOSE:
* Handling of high-priority interrupts. FIQs have highest priority after
* reset and data abort exceptions. They're mainly used for achieving
* low-latency interrupts, e.g. for DMA.
*/
BEGIN_PROC(arm_fiq_exception)
END_PROC(arm_fiq_exception)
/* * * * * * * * * * * * * * * * * * * * * * * *
* External functions with absolute addresses *
* * * * * * * * * * * * * * * * * * * * * * * */
/*
* NOTE: Notes on relative and absolute symbols on this file:
*
* Note that branches (B and BL) are *RELATIVE* on ARM. So no need to take any
* special action to access symbols within this file, even though this page
* (in virtual memory) is relocated to another address at run-time (high or low
* vectors) - this is an address other than where it is linked at, at
* compile-time.
*
* To access external symbols from this file, (e.g. calling some function in the
* kernel) one needs to use the: `LDR, pc, =external_symbol' pseudo-instruction,
* (note the "=") and use absolute addressing. This automatically generates an
* inline data word within the current module and indirectly loads the value in
* that word to resolve the undefined reference. All other methods, (LDR, B
* instructions, or ADR pseudoinstruction) generate relative addresses, and they
* will complain for external symbols because a relative offset cannot be
* calculated for an unknown distance. In conclusion, relative branches are
* useful for accessing symbols on this page, but they mean nothing outside this
* page, because the page is relocated at run-time. So, wherever you access
* *relatively* outside this page, would be *relative* to where this page is at
* that moment.
*/
/* * * * * * * * * * * * * * * * *
* Stacks for Exception Vectors *
* * * * * * * * * * * * * * * * */
.global __stacks_end;
.global __abt_stack_high;
.global __irq_stack_high;
.global __fiq_stack_high;
.global __und_stack_high;
/*
* These are also linked at high vectors, just as any other symbol
* on this page.
*/
.balign 4
.equ __abt_stack_high, (__abt_stack - __vector_vaddr + 0xFFFF0000);
.equ __irq_stack_high, (__irq_stack - __vector_vaddr + 0xFFFF0000);
.equ __fiq_stack_high, (__fiq_stack - __vector_vaddr + 0xFFFF0000);
.equ __und_stack_high, (__und_stack - __vector_vaddr + 0xFFFF0000);
/*
* NOTE: This could be cache line aligned.
* (use a macro, e.g. ____arm_asm_cache_aligned)
*/
.balign 4
__stacks_end: .space 256
__abt_stack: .space 256
__irq_stack: .space 256
__fiq_stack: .space 256
__und_stack: .space 256
.balign 4096

View File

@@ -0,0 +1,9 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['linker.c']
obj = env.Object(src_local)
Return('obj')

8
src/arch/tests/linker.c Normal file
View File

@@ -0,0 +1,8 @@
#include <macros.h>
#include <config.h>
#include INC_ARCH(linker.h)
#include INC_PLAT(offsets.h)
unsigned int kernel_mapping_end = 0;
unsigned int _end = 0;

View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['pl190_vic.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,104 @@
/*
* PL190 Vectored irq controller support.
*
* This is more pb926 specific as it also touches the SIC, a partial irq
* controller.Normally, irq controller must be independent and singular. Later
* other generic code should make thlongwork in cascaded setup.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/bit.h>
#include <l4/drivers/irq/pl190/pl190_vic.h>
/* FIXME: Fix the stupid uart driver and change to single definition of this! */
#if defined(read)
#undef read
#endif
#if defined(write)
#undef write
#endif
#define read(a) *((volatile unsigned int *)(a))
#define write(v, a) (*((volatile unsigned int *)(a)) = v)
#define setbit(bitvect, a) write(read(a) | (bitvect), a)
#define clrbit(bitvect, a) write(read(a) & ~(bitvect), a)
#define devio(base, reg, bitvect, setclr) \
((setclr) ? setbit(bitvect, (base + reg)) \
: clrbit(bitvect, (base + reg)))
/* Returns the irq number on this chip converting the irq bitvector */
int pl190_read_irq(void)
{
/* This also correctly returns a negative value for a spurious irq. */
return 31 - __clz(read(PL190_VIC_IRQSTATUS));
}
void pl190_mask_irq(int irq)
{
/* Reading WO registers blows QEMU/PB926.
* setbit((1 << irq), PL190_VIC_INTENCLEAR); */
write(1 << irq, PL190_VIC_INTENCLEAR);
}
/* Ack is same as mask */
void pl190_ack_irq(int irq)
{
pl190_mask_irq(irq);
}
void pl190_unmask_irq(int irq)
{
setbit(1 << irq, PL190_VIC_INTENABLE);
}
int pl190_sic_read_irq(void)
{
return 32 - __clz(read(PL190_SIC_STATUS));
}
void pl190_sic_mask_irq(int irq)
{
write(1 << irq, PL190_SIC_ENCLR);
}
void pl190_sic_ack_irq(int irq)
{
pl190_sic_mask_irq(irq);
}
void pl190_sic_unmask_irq(int irq)
{
setbit(1 << irq, PL190_SIC_ENSET);
}
/* Initialises the primary and secondary interrupt controllers */
void pl190_vic_init(void)
{
/* Clear all interrupts */
write(0, PL190_VIC_INTENABLE);
write(0xFFFFFFFF, PL190_VIC_INTENCLEAR);
/* Set all irqs as normal IRQs (i.e. not FIQ) */
write(0, PL190_VIC_INTSELECT);
/* TODO: Is there a SIC_IRQ_SELECT for irq/fiq ??? */
/* Disable user-mode access to VIC registers */
write(1, PL190_VIC_PROTECTION);
/* Clear software interrupts */
write(0xFFFFFFFF, PL190_VIC_SOFTINTCLEAR);
/* At this point, all interrupts are cleared and disabled.
* the controllers are ready to receive interrupts, if enabled. */
return;
}
void pl190_sic_init(void)
{
write(0, PL190_SIC_ENABLE);
write(0xFFFFFFFF, PL190_SIC_ENCLR);
/* Disable SIC-to-PIC direct routing of individual irq lines on SIC */
write(0xFFFFFFFF, PL190_SIC_PICENCLR);
}

View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['sp804_timer.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,107 @@
/*
* SP804 Primecell Timer driver
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/drivers/timer/sp804/sp804_timer.h>
/* FIXME: Fix the shameful uart driver and change to single definition of this! */
#if defined(read)
#undef read
#endif
#if defined(write)
#undef write
#endif
#define read(a) *((volatile unsigned int *)(a))
#define write(v, a) (*((volatile unsigned int *)(a)) = v)
#define setbit(bit, a) write(read(a) | bit, a)
#define clrbit(bit, a) write(read(a) & ~bit, a)
#define devio(base, reg, bit, setclr) \
((setclr) ? setbit(bit, base + reg) \
: clrbit(bit, base + reg))
void sp804_irq_handler(void)
{
/* Timer enabled as Periodic/Wrapper only needs irq clearing
* as it automatically reloads and wraps. */
write(1, SP804_TIMER1INTCLR);
}
static inline void sp804_control(int timer, int bit, int setclr)
{
unsigned long addr = SP804_TIMER1CONTROL + (timer ? SP804_TIMER2OFFSET : 0);
setclr ? setbit(bit, addr) : clrbit(bit, addr);
}
/* Sets timer's run mode:
* @periodic: periodic mode = 1, free-running = 0.
*/
#define SP804_PEREN (1 << 6)
static inline void sp804_set_runmode(int timer, int periodic)
{
sp804_control(timer, SP804_PEREN, periodic);
}
/* Sets timer's wrapping mode:
* @oneshot: oneshot = 1, wrapping = 0.
*/
#define SP804_ONESHOT (1 << 0)
static inline void sp804_set_wrapmode(int timer, int oneshot)
{
sp804_control(timer, SP804_ONESHOT, oneshot);
}
/* Sets the operational width of timers.
* In 16bit mode, top halfword is ignored.
* @width: 32bit mode = 1; 16bit mode = 0
*/
#define SP804_32BIT (1 << 1)
static inline void sp804_set_widthmode(int timer, int width)
{
sp804_control(timer, SP804_32BIT, width);
}
/* Enable/disable timer:
* @enable: enable = 1, disable = 0;
*/
#define SP804_ENABLE (1 << 7)
void sp804_enable(int timer, int enable)
{
sp804_control(timer, SP804_ENABLE, enable);
}
/* Enable/disable local irq register:
* @enable: enable = 1, disable = 0
*/
#define SP804_IRQEN (1 << 5)
void sp804_set_irq(int timer, int enable)
{
sp804_control(timer, SP804_IRQEN, enable);
}
/* Loads timer with value in val */
static inline void sp804_load_value(int timer, u32 val)
{
write(val, SP804_TIMER1LOAD + (timer ? SP804_TIMER2OFFSET : 0));
}
/* Returns current timer value */
static inline u32 sp804_read_value(int timer)
{
return read(SP804_TIMER1VALUE + (timer ? SP804_TIMER2OFFSET : 0));
}
/* TODO: These are default settings! The values must be passed as arguments */
void sp804_init(void)
{
/* 1 tick per usec */
const int duration = 250;
sp804_set_runmode(0, 1); /* Periodic */
sp804_set_wrapmode(0, 0); /* Wrapping */
sp804_set_widthmode(0, 1); /* 32 bit */
sp804_set_irq(0, 1); /* Enable */
sp804_load_value(0, duration);
}

View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['pl011_uart.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,278 @@
/*
* PL011 Primecell UART driver
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/drivers/uart/pl011/pl011_uart.h>
#include <l4/lib/bit.h>
struct pl011_uart uart = {
.base = PL011_BASE,
.ops = {
.initialise = pl011_initialise_device,
.tx_char = pl011_tx_char,
.rx_char = pl011_rx_char,
.set_baudrate = pl011_set_baudrate,
.set_irq_mask = pl011_set_irq_mask,
.clr_irq_mask = pl011_clr_irq_mask,
},
.frame_errors = 0,
.parity_errors = 0,
.break_errors = 0,
.rx_timeout_errors = 0,
};
/* UART-specific internal error codes.
* TODO: Replace them when generic error codes are in place */
#define PL011_ERROR 1
#define PL011_EAGAIN 2
/* Error status bits in receive status register */
#define PL011_FE (1 << 0)
#define PL011_PE (1 << 1)
#define PL011_BE (1 << 2)
#define PL011_OE (1 << 3)
/* Status bits in flag register */
#define PL011_TXFE (1 << 7)
#define PL011_RXFF (1 << 6)
#define PL011_TXFF (1 << 5)
#define PL011_RXFE (1 << 4)
#define PL011_BUSY (1 << 3)
#define PL011_DCD (1 << 2)
#define PL011_DSR (1 << 1)
#define PL011_CTS (1 << 0)
int pl011_tx_char(char c)
{
unsigned int val;
val = 0;
read(val, PL011_UARTFR);
if(val & PL011_TXFF) { /* TX FIFO Full */
return -PL011_EAGAIN;
}
write(c, PL011_UARTDR);
return 0;
}
int pl011_rx_char(char * c)
{
unsigned int data;
unsigned int val;
val = 0;
read(val, PL011_UARTFR);
if(val & PL011_RXFE) { /* RX FIFO Empty */
return -PL011_EAGAIN;
}
read(data, PL011_UARTDR);
*c = (char) data;
if((data >> 8) & 0xF) { /* There were errors */
return -1; /* Signal error in xfer */
}
return 0; /* No error return */
}
/*
* Sets the baud rate in kbps. It is recommended to use
* standard rates such as: 1200, 2400, 3600, 4800, 7200,
* 9600, 14400, 19200, 28800, 38400, 57600 76800, 115200.
*/
void pl011_set_baudrate(unsigned int baud, unsigned int clkrate)
{
const unsigned int uartclk = 24000000; /* 24Mhz clock fixed on pb926 */
unsigned int val;
unsigned int ipart, fpart;
unsigned int remainder;
remainder = 0;
ipart = 0;
fpart = 0;
val = 0;
/* Use default pb926 rate if no rate is supplied */
if(clkrate == 0)
clkrate = uartclk;
if(baud > 115200 || baud < 1200)
baud = 38400; /* Default rate. */
/* 24000000 / (16 * 38400) */
ipart = 39;
write(ipart, PL011_UARTIBRD);
write(fpart, PL011_UARTFBRD);
/* For the IBAUD and FBAUD to update, we need to
* write to UARTLCR_H because the 3 registers are
* actually part of a single register in hardware
* which only updates by a write to UARTLCR_H */
read(val, PL011_UARTLCR_H);
write(val, PL011_UARTLCR_H);
return;
}
/* Masks the irqs given in the flags bitvector. */
void pl011_set_irq_mask(unsigned int flags)
{
unsigned int val;
val = 0;
if(flags > 0x3FF) { /* Invalid irqmask bitvector */
return;
}
read(val, PL011_UARTIMSC);
val |= flags;
write(val, PL011_UARTIMSC);
return;
}
/* Clears the irqs given in flags from masking */
void pl011_clr_irq_mask(unsigned int flags)
{
unsigned int val;
val = 0;
if(flags > 0x3FF) { /* Invalid irqmask bitvector */
return;
}
read(val, PL011_UARTIMSC);
val &= ~flags;
write(val, PL011_UARTIMSC);
return;
}
/* Produces 1 character from data register and appends it into
* rx buffer keeps record of timeout errors if one occurs. */
void pl011_rx_irq_handler(struct pl011_uart * uart, unsigned int flags)
{
/*
* Currently we do nothing for uart irqs, because there's no external
* client to send/receive data (e.g. userspace processes kernel threads).
*/
return;
}
/* Consumes 1 character from tx buffer and attempts to transmit it */
void pl011_tx_irq_handler(struct pl011_uart * uart, unsigned int flags)
{
/*
* Currently we do nothing for uart irqs, because there's no external
* client to send/receive data (e.g. userspace processes kernel threads).
*/
return;
}
/* Updates error counts and exits. Does nothing to recover errors */
void pl011_error_irq_handler(struct pl011_uart * uart, unsigned int flags)
{
if(flags & PL011_FEIRQ) {
uart->frame_errors++;
}
if(flags & PL011_PEIRQ) {
uart->parity_errors++;
}
if(flags & PL011_BEIRQ) {
uart->break_errors++;
}
if(flags & PL011_OEIRQ) {
uart->overrun_errors++;
}
return;
}
void (* pl011_handlers[]) (struct pl011_uart *, unsigned int) = {
0, /* Modem RI */
0, /* Modem CTS */
0, /* Modem DCD */
0, /* Modem DSR */
&pl011_rx_irq_handler, /* Rx */
&pl011_tx_irq_handler, /* Tx */
&pl011_rx_irq_handler, /* Rx timeout */
&pl011_error_irq_handler, /* Framing error */
&pl011_error_irq_handler, /* Parity error */
&pl011_error_irq_handler, /* Break error */
&pl011_error_irq_handler /* Overrun error */
};
/* UART main entry for irq handling. It redirects actual
* handling to handlers relevant to the irq that has occured.
*/
void pl011_irq_handler(struct pl011_uart * uart)
{
unsigned int val;
int handler_index;
void (* handler)(struct pl011_uart *, unsigned int);
val = pl011_read_irqstat();
handler_index = 32 - __clz(val);
if(!handler_index) { /* No irq */
return;
}
/* Jump to right handler */
handler = (void (*) (struct pl011_uart *, unsigned int))
pl011_handlers[handler_index];
if(handler) { /* If a handler is available */
(*handler)(uart, val); /* Call it */
}
return;
}
void pl011_initialise_driver(void)
{
uart.ops.initialise(&uart);
}
/* Initialises the uart class data structures, and the device.
* Terminal-like operation is assumed for default settings.
*/
int pl011_initialise_device(struct pl011_uart * uart)
{
uart->frame_errors = 0;
uart->parity_errors = 0;
uart->break_errors = 0;
uart->overrun_errors = 0;
/* Initialise data register for 8 bit data read/writes */
pl011_set_word_width(8);
/* Fifos are disabled because by default it is assumed the port
* will be used as a user terminal, and in that case the typed
* characters will only show up when fifos are flushed, rather than
* when each character is typed. We avoid this by not using fifos.
*/
pl011_disable_fifos();
/* Set default baud rate of 38400 */
pl011_set_baudrate(38400, 24000000);
/* Set default settings of 1 stop bit, no parity, no hw flow ctrl */
pl011_set_stopbits(1);
pl011_parity_disable();
/* Install the irq handler */
/* TODO: INSTALL IT HERE */
/* Enable all irqs */
pl011_clr_irq_mask(0x3FF);
/* Enable rx, tx, and uart chip */
pl011_tx_enable();
pl011_rx_enable();
pl011_uart_enable();
return 0;
}

10
src/generic/SConscript Normal file
View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'pgalloc.c', 'kmalloc.c']
obj = env.Object(src_local)
Return('obj')

77
src/generic/irq.c Normal file
View File

@@ -0,0 +1,77 @@
/*
* Kernel irq handling (core irqs like timer). Also hope to add thread-level
* irq handling in the future.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/generic/platform.h>
#include <l4/generic/irq.h>
#include <l4/lib/mutex.h>
#include <l4/generic/scheduler.h>
#include <l4/lib/printk.h>
#include INC_PLAT(irq.h)
#include INC_ARCH(exception.h)
/* This enables the lower chip on the current chip, if such chaining exists. */
static inline void cascade_irq_chip(struct irq_chip *this_chip)
{
if (this_chip->cascade >= 0) {
BUG_ON(IRQ_CHIPS_MAX == 1);
this_chip->ops.unmask(this_chip->cascade);
}
}
void irq_controllers_init(void)
{
struct irq_chip *this_chip;
for (int i = 0; i < IRQ_CHIPS_MAX; i++) {
this_chip = irq_chip_array + i;
/* Initialise the irq chips (e.g. reset all registers) */
this_chip->ops.init();
/* Enable cascaded irqs if needed */
cascade_irq_chip(this_chip);
}
}
int global_irq_index(void)
{
struct irq_chip *this_chip;
int irq_index = 0;
/* Loop over irq chips from top to bottom until
* the actual irq on the lowest chip is found */
for (int i = 0; i < IRQ_CHIPS_MAX; i++) {
this_chip = irq_chip_array + i;
BUG_ON((irq_index = this_chip->ops.read_irq()) < 0);
if (irq_index != this_chip->cascade) {
irq_index += this_chip->offset;
/* Found the real irq, return */
break;
}
/* Hit the cascading irq. Continue on next irq chip. */
}
return irq_index;
}
void do_irq(void)
{
int irq_index = global_irq_index();
struct irq_desc *this_irq = irq_desc_array + irq_index;
/* TODO: This can be easily done few instructions quicker by some
* immediate read/disable/enable_all(). We stick with this clear
* implementation for now. */
irq_disable(irq_index);
enable_irqs();
/* TODO: Call irq_thread_notify(irq_index) for threaded irqs. */
BUG_ON(!this_irq->handler);
if (this_irq->handler() != IRQ_HANDLED) {
printk("Spurious or broken irq\n"); BUG();
}
irq_enable(irq_index);
}

101
src/generic/kmalloc.c Normal file
View File

@@ -0,0 +1,101 @@
/*
* Memory pool based kmalloc.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/list.h>
#include <l4/lib/memcache.h>
#include <l4/generic/pgalloc.h>
#include INC_GLUE(memory.h)
/* Supports this many different kmalloc sizes */
#define KMALLOC_POOLS_MAX 5
struct kmalloc_mempool {
int total;
struct list_head pool_head[KMALLOC_POOLS_MAX];
};
struct kmalloc_mempool km_pool;
void init_kmalloc()
{
for (int i = 0; i < KMALLOC_POOLS_MAX; i++)
INIT_LIST_HEAD(&km_pool.pool_head[i]);
}
/*
* Allocates memory from mem_caches that it generates on-the-fly,
* for up to KMALLOC_POOLS_MAX different sizes.
*/
void *kmalloc(int size)
{
struct mem_cache *cache, *n;
int right_sized_pool_idx = -1;
int index;
/* Search all existing pools for this size, and if found, free bufs */
for (int i = 0; i < km_pool.total; i++) {
list_for_each_entry_safe(cache, n, &km_pool.pool_head[i], list) {
if (cache->struct_size == size) {
right_sized_pool_idx = i;
if (cache->free)
return mem_cache_alloc(cache);
else
continue;
} else
break;
}
}
/*
* No such pool list already available at hand, and we don't have room
* for new pool lists.
*/
if ((right_sized_pool_idx < 0) &&
(km_pool.total == KMALLOC_POOLS_MAX - 1)) {
printk("kmalloc: Too many types of pool sizes requested. "
"Giving up.\n");
BUG();
}
if (right_sized_pool_idx >= 0)
index = right_sized_pool_idx;
else
index = km_pool.total++;
/* Only allow up to page size */
BUG_ON(size >= PAGE_SIZE);
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
size, 0)));
list_add(&cache->list, &km_pool.pool_head[index]);
return mem_cache_alloc(cache);
}
/* FIXME:
* Horrible complexity O(n^2) because we don't know which cache
* we're freeing from!!! But its simple. ;-)
*/
int kfree(void *p)
{
struct mem_cache *cache, *tmp;
for (int i = 0; i < km_pool.total; i++)
list_for_each_entry_safe(cache, tmp, &km_pool.pool_head[i], list)
if (!mem_cache_free(cache, p)) {
if (mem_cache_is_empty(cache)) {
list_del(&cache->list);
free_page(cache);
/* Total remains the same. */
}
return 0;
}
return -1;
}
void *kzalloc(int size)
{
void *p = kmalloc(size);
memset(p, 0, size);
return p;
}

172
src/generic/pgalloc.c Normal file
View File

@@ -0,0 +1,172 @@
/*
* Simple kernel memory allocator built on top of memcache
* implementation.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/memcache.h>
#include <l4/lib/list.h>
#include <l4/generic/space.h>
#include <l4/generic/kmalloc.h>
#include <l4/generic/pgalloc.h>
#include <l4/generic/physmem.h>
#include INC_GLUE(memory.h)
#define PGALLOC_PGD_CACHE 0
#define PGALLOC_PMD_CACHE 1
#define PGALLOC_PG_CACHE 2
#define PGALLOC_CACHE_TOTAL 3
/* The initial chunk of physical memory allocated before any pagers. */
#define PGALLOC_INIT_GRANT SZ_1MB
/* Covers 3 main types of memory needed by the kernel. */
struct pgalloc {
struct list_head cache_list[3];
};
static struct pgalloc pgalloc;
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
{
INIT_LIST_HEAD(&cache->list);
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
list_add(&cache->list, &pgalloc.cache_list[cidx]);
}
void calc_kmem_usage_per_grant(kmem_usage_per_grant_t *params)
{
/* Pmds, pgds, pages in numbers, per grant */
int pmds_per_task_avg = params->task_size_avg / PMD_MAP_SIZE;
int pmds_per_kmem_grant = params->tasks_per_kmem_grant * pmds_per_task_avg;
int pgds_per_kmem_grant = params->tasks_per_kmem_grant * 1;
int pgs_per_kmem_grant = params->tasks_per_kmem_grant * 1;
/* Now everything in Kbs */
params->pmd_total = pmds_per_kmem_grant * PMD_SIZE;
params->pgd_total = pgds_per_kmem_grant * PGD_SIZE;
params->pg_total = pgs_per_kmem_grant * PAGE_SIZE;
params->extra = params->grant_size -
(params->pgd_total + params->pmd_total +
params->pg_total);
}
int pgalloc_add_new_grant(unsigned long pfn, int npages)
{
unsigned long physical = __pfn_to_addr(pfn);
void *virtual = (void *)phys_to_virt(physical);
struct mem_cache *pgd_cache, *pmd_cache, *pg_cache;
kmem_usage_per_grant_t params;
/* First map the whole grant */
add_mapping(physical, phys_to_virt(physical), __pfn_to_addr(npages),
MAP_SVC_RW_FLAGS);
/* Calculate how to divide buffer into different caches */
params.task_size_avg = TASK_AVERAGE_SIZE;
params.grant_size = npages * PAGE_SIZE;
/* Calculate pools for how many tasks from this much grant */
params.tasks_per_kmem_grant = (__pfn(SZ_1MB) * TASKS_PER_1MB_GRANT) /
__pfn(params.grant_size);
calc_kmem_usage_per_grant(&params);
/* Create the caches, least alignment-needing, most, then others. */
pmd_cache = mem_cache_init(virtual, params.pmd_total, PMD_SIZE, 1);
virtual += params.pmd_total;
pgd_cache = mem_cache_init(virtual, params.pgd_total, PGD_SIZE, 1);
virtual += params.pgd_total;
pg_cache = mem_cache_init(virtual, params.pg_total + params.extra,
PAGE_SIZE, 1);
/* Add the caches */
pgalloc_add_new_cache(pgd_cache, PGALLOC_PGD_CACHE);
pgalloc_add_new_cache(pmd_cache, PGALLOC_PMD_CACHE);
pgalloc_add_new_cache(pg_cache, PGALLOC_PG_CACHE);
return 0;
}
void init_pgalloc(void)
{
int initial_grant = PGALLOC_INIT_GRANT;
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
INIT_LIST_HEAD(&pgalloc.cache_list[i]);
/* Grant ourselves with an initial chunk of physical memory */
physmem.free_cur = page_align_up(physmem.free_cur);
set_page_map(physmem.free_cur, __pfn(initial_grant), 1);
pgalloc_add_new_grant(__pfn(physmem.free_cur), __pfn(initial_grant));
physmem.free_cur += initial_grant;
/* Activate kmalloc */
init_kmalloc();
}
void pgalloc_remove_cache(struct mem_cache *cache)
{
list_del_init(&cache->list);
}
static inline void *pgalloc_from_cache(int cidx)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_total_empty(cache))
return mem_cache_zalloc(cache);
return 0;
}
int kfree_to_cache(int cidx, void *virtual)
{
struct mem_cache *cache, *n;
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
if (mem_cache_free(cache, virtual) == 0)
return 0;
return -1;
}
void *alloc_page(void)
{
return pgalloc_from_cache(PGALLOC_PG_CACHE);
}
void *alloc_pmd(void)
{
pmd_table_t *pmd;
if (!(pmd = alloc_boot_pmd()))
pmd = pgalloc_from_cache(PGALLOC_PMD_CACHE);
return pmd;
}
void *alloc_pgd(void)
{
return pgalloc_from_cache(PGALLOC_PGD_CACHE);
}
int free_page(void *v)
{
return kfree_to_cache(PGALLOC_PG_CACHE, v);
}
int free_pmd(void *v)
{
return kfree_to_cache(PGALLOC_PMD_CACHE, v);
}
int free_pgd(void *v)
{
return kfree_to_cache(PGALLOC_PGD_CACHE, v);
}
void *zalloc_page(void)
{
void *p = alloc_page();
memset(p, 0, PAGE_SIZE);
return p;
}

94
src/generic/physmem.c Normal file
View File

@@ -0,0 +1,94 @@
/*
* Global physical memory descriptions.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/physmem.h>
#include <l4/generic/pgalloc.h>
#include <l4/generic/tcb.h>
#include <l4/lib/list.h>
#include <l4/lib/spinlock.h>
#include INC_SUBARCH(mm.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(offsets.h)
#include INC_PLAT(printascii.h)
#include INC_ARCH(linker.h)
struct page_bitmap page_map;
static void init_page_map(unsigned long start, unsigned long end)
{
page_map.pfn_start = __pfn(start);
page_map.pfn_end = __pfn(end);
set_page_map(start, __pfn(end - start), 0);
}
/*
* Marks pages in the global page_map as used or unused.
*
* @start = start page address to set, inclusive.
* @numpages = number of pages to set.
*/
int set_page_map(unsigned long start, int numpages, int val)
{
unsigned long pfn_start = __pfn(start);
unsigned long pfn_end = __pfn(start) + numpages;
unsigned long pfn_err = 0;
if (page_map.pfn_start > pfn_start || page_map.pfn_end < pfn_start) {
pfn_err = pfn_start;
goto error;
}
if (page_map.pfn_end < pfn_end || page_map.pfn_start > pfn_end) {
pfn_err = pfn_end;
goto error;
}
if (val)
for (int i = pfn_start; i < pfn_end; i++)
page_map.map[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
else
for (int i = pfn_start; i < pfn_end; i++)
page_map.map[BITWISE_GETWORD(i)] &= ~BITWISE_GETBIT(i);
return 0;
error:
BUG_MSG("Given page area is out of system page_map range: 0x%lx\n",
pfn_err << PAGE_BITS);
return -1;
}
/* Describes physical memory boundaries of the system. */
struct memdesc physmem;
/* Fills in the physmem structure with free physical memory information */
void physmem_init()
{
unsigned long start = (unsigned long)_start_kernel;
unsigned long end = (unsigned long)_end_kernel;
/* Initialise page map */
init_page_map(PHYS_MEM_START, PHYS_MEM_END);
/* Mark kernel areas as used */
set_page_map(virt_to_phys(start), __pfn(end - start), 1);
/* Map initial pgd area as used */
start = (unsigned long)__pt_start;
end = (unsigned long)__pt_end;
set_page_map(virt_to_phys(current->pgd), __pfn(end - start), 1);
physmem.start = PHYS_MEM_START;
physmem.end = PHYS_MEM_END;
physmem.free_cur = __svc_images_end;
physmem.free_end = PHYS_MEM_END;
physmem.numpages = (PHYS_MEM_START - PHYS_MEM_END) / PAGE_SIZE;
}
void memory_init()
{
printascii("Initialising kernel memory allocator.\n");
init_pgalloc();
}

371
src/generic/scheduler.c Normal file
View File

@@ -0,0 +1,371 @@
/*
* A basic scheduler that does the job for now.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/list.h>
#include <l4/lib/printk.h>
#include <l4/lib/string.h>
#include <l4/lib/mutex.h>
#include <l4/lib/bit.h>
#include <l4/lib/spinlock.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/preempt.h>
#include <l4/generic/irq.h>
#include <l4/generic/tcb.h>
#include <l4/api/errno.h>
#include <l4/api/kip.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(init.h)
#include INC_PLAT(platform.h)
#include INC_ARCH(exception.h)
/* A very basic runqueue */
struct runqueue {
struct spinlock lock;
struct list_head task_list;
unsigned int total;
};
static struct runqueue sched_rq[3];
static struct runqueue *rq_runnable, *rq_expired, *rq_pending;
/* This is incremented on each irq or voluntarily by preempt_disable() */
extern unsigned int current_irq_nest_count;
/* This ensures no scheduling occurs after voluntary preempt_disable() */
static int voluntary_preempt = 0;
int preemptive()
{
return current_irq_nest_count == 0;
}
int preempt_count()
{
return current_irq_nest_count;
}
void preempt_enable(void)
{
voluntary_preempt--;
current_irq_nest_count--;
/*
* Even if count increases after we check it, it will come back to zero.
* This test really is asking "is this the outmost explicit
* preempt_enable() that will really enable context switching?"
*/
if (current_irq_nest_count == 0) {
/* Then, give scheduler a chance to check need_resched == 1 */
schedule();
}
}
/* A positive irq nest count implies current context cannot be preempted. */
void preempt_disable(void)
{
current_irq_nest_count++;
voluntary_preempt++;
}
void sched_runqueue_init(void)
{
for (int i = 0; i < 3; i++) {
memset(&sched_rq[i], 0, sizeof(struct runqueue));
INIT_LIST_HEAD(&sched_rq[i].task_list);
spin_lock_init(&sched_rq[i].lock);
}
rq_runnable = &sched_rq[0];
rq_expired = &sched_rq[1];
rq_pending = &sched_rq[2];
}
/* Lock scheduler. Should only be used when scheduling. */
static inline void sched_lock(void)
{
preempt_disable();
}
/* Sched unlock */
static inline void sched_unlock(void)
{
/*
* This is to make sure preempt_enable() does not
* try to schedule since we're already scheduling.
*/
need_resched = 0;
preempt_enable();
}
/* Swaps runnable and expired queues *if* runnable queue is empty. */
static void sched_rq_swap_expired_runnable(void)
{
struct runqueue *temp;
if (list_empty(&rq_runnable->task_list) &&
!list_empty(&rq_expired->task_list)) {
/* Queues are swapped and expired list becomes runnable */
temp = rq_runnable;
rq_runnable = rq_expired;
rq_expired = temp;
}
}
/* Helper for adding a new task to a runqueue */
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
{
BUG_ON(task->rq);
/*
* If the task is sinfully in a runqueue, this may still keep silent
* upon a racing condition, since its rq can't be locked in advance.
*/
BUG_ON(!list_empty(&task->rq_list));
if (front)
list_add(&task->rq_list, &rq->task_list);
else
list_add_tail(&task->rq_list, &rq->task_list);
rq->total++;
task->rq = rq;
}
static inline void
sched_rq_add_task_front(struct ktcb *task, struct runqueue *rq)
{
sched_rq_add_task(task, rq, 1);
}
static inline void
sched_rq_add_task_behind(struct ktcb *task, struct runqueue *rq)
{
sched_rq_add_task(task, rq, 0);
}
/* Helper for removing a task from its runqueue. */
static inline void sched_rq_remove_task(struct ktcb *task)
{
list_del_init(&task->rq_list);
task->rq->total--;
task->rq = 0;
}
static inline void sched_init_task(struct ktcb *task)
{
INIT_LIST_HEAD(&task->rq_list);
task->ticks_left = TASK_TIMESLICE_DEFAULT;
task->state = TASK_INACTIVE;
task->ts_need_resched = 0;
}
void sched_tell(struct ktcb *task, unsigned int fl)
{
BUG_ON(!(SCHED_FL_MASK & fl));
/* The last flag overrrides all existing flags. */
task->schedfl = fl;
}
void sched_yield()
{
need_resched = 1;
schedule();
}
/*
* Any task that wants the scheduler's attention and not in its any one of
* its currently runnable realms, would call this. E.g. dormant tasks
* sleeping tasks, newly created tasks. But not currently runnable tasks.
*/
void sched_add_pending_task(struct ktcb *task)
{
BUG_ON(task->rq);
spin_lock(&rq_pending->lock);
sched_rq_add_task_behind(task, rq_pending);
spin_unlock(&rq_pending->lock);
}
/* Tells scheduler to remove given runnable task from runqueues */
void sched_notify_sleep(struct ktcb *task)
{
sched_tell(task, SCHED_FL_SLEEP);
}
void sched_sleep_task(struct ktcb *task)
{
sched_notify_sleep(task);
if (task == current)
sched_yield();
}
/* Tells scheduler to remove given runnable task from runqueues */
void sched_notify_suspend(struct ktcb *task)
{
sched_tell(task, SCHED_FL_SUSPEND);
}
void sched_suspend_task(struct ktcb *task)
{
sched_notify_suspend(task);
if (task == current)
sched_yield();
}
/* Tells scheduler to add given task into runqueues whenever possible */
void sched_notify_resume(struct ktcb *task)
{
BUG_ON(current == task);
sched_tell(task, SCHED_FL_RESUME);
sched_add_pending_task(task);
}
/* NOTE: Might as well just set need_resched instead of full yield.
* This would work on irq context as well. */
/* Same as resume, but also yields. */
void sched_resume_task(struct ktcb *task)
{
sched_notify_resume(task);
sched_yield();
}
void sched_start_task(struct ktcb *task)
{
sched_init_task(task);
sched_resume_task(task);
}
/*
* Checks currently pending scheduling flags on the task and does two things:
* 1) Modify their state.
* 2) Modify their runqueues.
*
* An inactive/sleeping task that is pending-runnable would change state here.
* A runnable task that is pending-inactive would also change state here.
* Returns 1 if it has changed anything, e.g. task state, runqueues, and
* 0 otherwise.
*/
static int sched_next_state(struct ktcb *task)
{
unsigned int flags = task->schedfl;
int ret = 0;
switch(flags) {
case 0:
ret = 0;
break;
case SCHED_FL_SUSPEND:
task->state = TASK_INACTIVE;
ret = 1;
break;
case SCHED_FL_RESUME:
task->state = TASK_RUNNABLE;
ret = 1;
break;
case SCHED_FL_SLEEP:
task->state = TASK_SLEEPING;
ret = 1;
break;
default:
BUG();
}
task->schedfl = 0;
return ret;
}
extern void switch_to(struct ktcb *cur, struct ktcb *next);
static inline void context_switch(struct ktcb *next)
{
struct ktcb *cur = current;
// printk("(%d) to (%d)\n", cur->tid, next->tid);
/* Flush caches and everything */
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(next->pgd));
arm_invalidate_tlb();
switch_to(cur, next);
// printk("Returning from yield. Tid: (%d)\n", cur->tid);
}
void scheduler()
{
struct ktcb *next = 0, *pending = 0, *n = 0;
sched_lock();
need_resched = 0;
BUG_ON(current->tid < MIN_PREDEFINED_TID ||
current->tid > MAX_PREDEFINED_TID);
BUG_ON(current->rq != rq_runnable);
/* Current task */
sched_rq_remove_task(current);
sched_next_state(current);
if (current->state == TASK_RUNNABLE) {
current->ticks_left += TASK_TIMESLICE_DEFAULT;
BUG_ON(current->ticks_left <= 0);
sched_rq_add_task_behind(current, rq_expired);
}
sched_rq_swap_expired_runnable();
/* Runnable-pending tasks */
spin_lock(&rq_pending->lock);
list_for_each_entry_safe(pending, n, &rq_pending->task_list, rq_list) {
sched_next_state(pending);
sched_rq_remove_task(pending);
if (pending->state == TASK_RUNNABLE)
sched_rq_add_task_front(pending, rq_runnable);
}
spin_unlock(&rq_pending->lock);
/* Next task */
retry_next:
if (rq_runnable->total > 0) {
next = list_entry(rq_runnable->task_list.next, struct ktcb, rq_list);
sched_next_state(next);
if (next->state != TASK_RUNNABLE) {
sched_rq_remove_task(next);
sched_rq_swap_expired_runnable();
goto retry_next;
}
} else {
printk("Idle task.\n");
while (1);
}
disable_irqs();
sched_unlock();
context_switch(next);
}
void schedule(void)
{
/* It's a royal bug to call schedule when preemption is disabled */
BUG_ON(voluntary_preempt);
if (need_resched)
scheduler();
}
void scheduler_start()
{
/* Initialise runqueues */
sched_runqueue_init();
/* Initialse inittask as runnable for first-ever scheduling */
sched_init_task(current);
current->state = TASK_RUNNABLE;
sched_rq_add_task_front(current, rq_runnable);
/* Start the timer */
timer_start();
switch_to_user(current);
}

36
src/generic/tcb.c Normal file
View File

@@ -0,0 +1,36 @@
/*
* Some ktcb related data
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/tcb.h>
#include <l4/generic/space.h>
#include <l4/lib/idpool.h>
/* ID pools for threads and spaces. */
struct id_pool *thread_id_pool;
struct id_pool *space_id_pool;
/* Hash table for all existing tasks */
struct list_head global_task_list;
/* Offsets for ktcb fields that are accessed from assembler */
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
#if 0
int task_suspend(struct ktcb *task)
{
task->flags |= SCHED_FLAG_SUSPEND;
return 0;
}
int task_resume(struct ktcb *task)
{
task->flags &= ~SCHED_FLAG_SUSPEND;
return sched_enqueue_task(task);
}
#endif

96
src/generic/time.c Normal file
View File

@@ -0,0 +1,96 @@
/*
* Time.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <l4/types.h>
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/generic/irq.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/time.h>
#include INC_ARCH(exception.h)
/* TODO:
* 1) Add RTC support.
* 2) Need to calculate time since EPOCH,
* 3) Jiffies must be initialised to a reasonable value.
*/
volatile u32 jiffies;
static inline void increase_jiffies(void)
{
jiffies++;
}
static int noticks_noresched = 0;
/*
* Check preemption anomalies:
*
* This checks how many times no rescheduling has occured even though ticks
* reached zero. This suggests that preemption was enabled for more than a timer
* interval. Normally, even if a preemption irq occured during a non-preemptive
* state, preemption is *guaranteed* to occur before the next irq, provided that
* the non-preemptive period is less than a timer irq interval (and it must be).
*
* Time:
*
* |-|---------------------|-|-------------------->
* | V | V
* | Preemption irq() | Next irq.
* V V
* preempt_disabled() preempt_enabled() && preemption;
*/
void check_noticks_noresched(void)
{
if (!current->ticks_left)
noticks_noresched++;
if (noticks_noresched >= 2) {
printk("Warning, no ticks and yet no rescheduling "
"for %d times.\n", noticks_noresched);
printk("Spending more than a timer period"
" as nonpreemptive!!!\n");
}
}
void update_process_times(void)
{
struct ktcb *cur = current;
BUG_ON(cur->ticks_left < 0);
/*
* If preemption is disabled we stop reducing ticks when it reaches 0
* but set need_resched so that as soon as preempt-enabled, scheduling
* occurs.
*/
if (cur->ticks_left == 0) {
need_resched = 1;
// check_noticks_noresched();
return;
}
// noticks_noresched = 0;
if (in_kernel())
cur->kernel_time++;
else
cur->user_time++;
cur->ticks_left--;
if (!cur->ticks_left)
need_resched = 1;
}
int do_timer_irq(void)
{
increase_jiffies();
update_process_times();
return IRQ_HANDLED;
}

10
src/glue/arm/SConscript Normal file
View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['init.c', 'memory.c', 'systable.c']
obj = env.Object(src_local)
Return('obj')

396
src/glue/arm/init.c Normal file
View File

@@ -0,0 +1,396 @@
/*
* Main initialisation code for the ARM kernel
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/lib/string.h>
#include <l4/lib/idpool.h>
#include <l4/generic/kmalloc.h>
#include <l4/generic/platform.h>
#include <l4/generic/physmem.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
#include INC_ARCH(bootdesc.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(utcb.h)
#include INC_GLUE(syscall.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(printascii.h)
#include INC_API(syscall.h)
#include INC_API(kip.h)
unsigned int kernel_mapping_end;
void init_locks(void)
{
}
/* Maps the early memory regions needed to bootstrap the system */
void init_kernel_mappings(void)
{
init_clear_ptab();
/* Map kernel area to its virtual region */
add_section_mapping_init(virt_to_phys(_start_text),
(unsigned int)_start_text, 1,
cacheable | bufferable);
/* Map kernel one-to-one to its physical region */
add_section_mapping_init(virt_to_phys(_start_text),
virt_to_phys(_start_text),
1, 0);
/* Map page table to its virtual region */
add_section_mapping_init(virt_to_phys(_start_kspace),
(unsigned int)_start_kspace,
1, 0);
/* Clean current before first time access. */
memset(current, 0, sizeof(struct ktcb));
/*
* Setup a dummy current ktcb over the bootstack, so that generic
* mapping functions can use this as the pgd source.
*/
current->pgd = &kspace;
}
void print_sections(void)
{
dprintk("_start_kernel: ",(unsigned int)_start_kernel);
dprintk("_start_text: ",(unsigned int)_start_text);
dprintk("_end_text: ", (unsigned int)_end_text);
dprintk("_start_data: ", (unsigned int)_start_data);
dprintk("_end_data: ", (unsigned int)_end_data);
dprintk("_start_vectors: ",(unsigned int)_start_vectors);
dprintk("arm_high_vector: ",(unsigned int)arm_high_vector);
dprintk("_end_vectors: ",(unsigned int)_end_vectors);
dprintk("_start_kip: ", (unsigned int) _start_kip);
dprintk("_end_kip: ", (unsigned int) _end_kip);
dprintk("_bootstack: ", (unsigned int)_bootstack);
dprintk("_end_kernel: ", (unsigned int)_end_kernel);
dprintk("_start_kspace: ", (unsigned int)_start_kspace);
dprintk("_start_pmd: ", (unsigned int)_start_pmd);
dprintk("_end_pmd: ", (unsigned int)_end_pmd);
dprintk("_end_kspace: ", (unsigned int)_end_kspace);
dprintk("_end: ", (unsigned int)_end);
}
/* Enable virtual memory using kernel's first level table
* and continue execution on virtual addresses.*/
void start_vm()
{
/*
* TTB must be 16K aligned. This is because first level tables are
* sized 16K.
*/
if ((unsigned int)&kspace & 0x3FFF)
dprintk("kspace not properly aligned for ttb:",
(u32)&kspace);
memset((void *)&kspace, 0, sizeof(pgd_table_t));
arm_set_ttb(virt_to_phys(&kspace));
/*
* This sets all 16 domains to zero and domain 0 to 1. The outcome
* is that page table access permissions are in effect for domain 0.
* All other domains have no access whatsoever.
*/
arm_set_domain(1);
/* Enable everything before mmu permissions are in place */
arm_enable_caches();
arm_enable_wbuffer();
/*
* Leave the past behind. Tlbs are invalidated, write buffer is drained.
* The whole of I + D caches are invalidated unconditionally. This is
* important to ensure that the cache is free of previously loaded
* values. Otherwise unpredictable data aborts may occur at arbitrary
* times, each time a load/store operation hits one of the invalid
* entries and those entries are cleaned to main memory.
*/
arm_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_enable_mmu();
/* Jump to virtual memory addresses */
__asm__ __volatile__ (
"add sp, sp, %0 \n" /* Update stack pointer */
#ifndef __OPTIMIZED_FP__ /* If fp not optimised away */
"add fp, fp, %0 \n" /* Update frame pointer */
#endif
/* On the next instruction below, r0 gets
* current PC + KOFFSET + 2 instructions after itself. */
"add r0, pc, %0 \n"
/* Special symbol that is extracted and included in the loader.
* Debuggers can break on it to load the virtual symbol table */
".global bkpt_phys_to_virt;\n"
"bkpt_phys_to_virt:\n"
"mov pc, r0 \n" /* (r0 has next instruction) */
:
: "r" (KERNEL_OFFSET)
: "r0"
);
/* At this point, execution is on virtual addresses. */
remove_section_mapping(virt_to_phys(_start_kernel));
/*
* Restore link register (LR) for this function.
*
* NOTE: LR values are pushed onto the stack at each function call,
* which means the restored return values will be physical for all
* functions in the call stack except this function. So the caller
* of this function must never return but initiate scheduling etc.
*/
__asm__ __volatile__ (
"add %0, %0, %1 \n"
"mov pc, %0 \n"
:: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
);
while(1);
}
/* This calculates what address the kip field would have in userspace. */
#define KIP_USR_OFFSETOF(kip, field) ((void *)(((unsigned long)&kip.field - \
(unsigned long)&kip) + USER_KIP_PAGE))
/* The kip is non-standard, using 0xBB to indicate mine for now ;-) */
void kip_init()
{
struct utcb **utcb_ref;
memset(&kip, 0, PAGE_SIZE);
memcpy(&kip, "L4\230K", 4); /* Name field = l4uK */
kip.api_version = 0xBB;
kip.api_subversion = 1;
kip.api_flags = 0; /* LE, 32-bit architecture */
kip.kdesc.subid = 0x1;
kip.kdesc.id = 0xBB;
kip.kdesc.gendate = (__YEAR__ << 9)|(__MONTH__ << 5)|(__DAY__);
kip.kdesc.subsubver = 0x00000001; /* Consider as .00000001 */
kip.kdesc.ver = 0;
memcpy(&kip.kdesc.supplier, "BBB", 3);
kip_init_syscalls();
/* KIP + 0xFF0 is pointer to UTCB area for this thread group. */
utcb_ref = (struct utcb **)((unsigned long)&kip + UTCB_KIP_OFFSET);
/* All thread groups have their utcb mapped at UTCB_AREA_START */
*utcb_ref = (struct utcb *)UTCB_AREA_START;
add_mapping(virt_to_phys(&kip), USER_KIP_PAGE, PAGE_SIZE,
MAP_USR_RO_FLAGS);
}
void vectors_init()
{
unsigned int size = ((u32)_end_vectors - (u32)arm_high_vector);
/* Map the vectors in high vector page */
add_mapping(virt_to_phys(arm_high_vector),
ARM_HIGH_VECTOR, size, 0);
arm_enable_high_vectors();
/* Kernel memory trapping is enabled at this point. */
}
void abort()
{
printk("Aborting on purpose to halt system.\n");
#if 0
/* Prefetch abort */
__asm__ __volatile__ (
"mov pc, #0x0\n"
::
);
#endif
/* Data abort */
__asm__ __volatile__ (
"mov r0, #0 \n"
"ldr r0, [r0] \n"
::
);
}
void jump(struct ktcb *task)
{
__asm__ __volatile__ (
"mov lr, %0\n" /* Load pointer to context area */
"ldr r0, [lr]\n" /* Load spsr value to r0 */
"msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
"ldmib lr, {r0-r14}^\n" /* Load all USR registers */
"nop \n" /* Spec says dont touch banked registers
* right after LDM {no-pc}^ for one instruction */
"add lr, lr, #64\n" /* Manually move to PC location. */
"ldr lr, [lr]\n" /* Load the PC_USR to LR */
"movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
:
: "r" (task)
);
}
void switch_to_user(struct ktcb *task)
{
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(task->pgd));
arm_invalidate_tlb();
jump(task);
}
void init_inittask(char *name, struct task_ids *ids)
{
struct svc_image *taskimg;
struct ktcb *task;
int task_pages;
/*
* NOTE: Inittask uses the kernel bootstack as its PAGE_SIZE'd kernel
* stack. There is no problem with this as the inittask always exists.
* This also solves the problem of freeing the bootstack and making use
* of the initial kspace pgd.
*/
if (!strcmp(name, "mm0"))
task = current; /* mm0 is the mockup current during init */
else
task = (struct ktcb *)zalloc_page();
/*
* Search the compile-time generated boot descriptor for information on
* available task images.
*/
for (int i = 0; i < bootdesc->total_images; i++) {
if (!strcmp(name, bootdesc->images[i].name)) {
BUG_ON(!(taskimg = &bootdesc->images[i]));
break;
}
}
printk("\nInitialising %s.\n", name);
if (taskimg->phys_start & PAGE_MASK)
printk("Warning, image start address not page aligned.\n");
/* Calculate the number of pages the task sections occupy. */
task_pages = __pfn((page_align_up(taskimg->phys_end) -
page_align(taskimg->phys_start)));
task->context.pc = INITTASK_AREA_START;
/* Stack starts one page above the end of image. */
task->context.sp = INITTASK_AREA_END - 8;
task->context.spsr = ARM_MODE_USR;
set_task_ids(task, ids);
if (!task->pgd) {
BUG(); /* Inittask won't come here */
task->pgd = alloc_pgd();
/* Tasks with no pgd copy from the inittask's pgd. */
memcpy(task->pgd, current->pgd, sizeof(pgd_table_t));
}
/*
* This task's userspace mapping. This should allocate a new pmd, if not
* existing, and a new page entry on its private pgd.
*/
add_mapping_pgd(taskimg->phys_start, INITTASK_AREA_START,
task_pages * PAGE_SIZE, MAP_USR_DEFAULT_FLAGS,
task->pgd);
printk("Mapping %d pages from 0x%x to 0x%x for %s\n", task_pages,
taskimg->phys_start, INITTASK_AREA_START, name);
/* Add the physical pages used by the task to the page map */
set_page_map(taskimg->phys_start, task_pages, 1);
/* Task's rendezvous point */
waitqueue_head_init(&task->wqh_send);
waitqueue_head_init(&task->wqh_recv);
/* Tasks' rendezvous blocked list */
spin_lock_init(&task->ipc_block_lock);
INIT_LIST_HEAD(&task->ipc_block_list);
/* Global hashlist that keeps all existing tasks */
add_task_global(task);
/* Scheduler initialises the very first task itself */
}
void init_tasks()
{
struct task_ids ids;
/* Initialise thread and space id pools */
thread_id_pool = id_pool_new_init(THREAD_IDS_MAX);
space_id_pool = id_pool_new_init(SPACE_IDS_MAX);
ids.tid = id_new(thread_id_pool);
ids.spid = id_new(space_id_pool);
/* Initialise the global task list head */
INIT_LIST_HEAD(&global_task_list);
/*
* This must come last so that other tasks can copy its pgd before it
* modifies it for its own specifics.
*/
init_inittask("mm0", &ids);
}
void start_kernel(void)
{
printascii("\nstart_kernel...\n");
/* Print section boundaries for kernel image */
//print_sections();
/* Initialise section mappings for the kernel area */
init_kernel_mappings();
/* Enable virtual memory and jump to virtual addresses */
start_vm();
/* PMD tables initialised */
init_pmd_tables();
/* Initialise platform-specific page mappings, and peripherals */
platform_init();
/* Map and enable high vector page. Faults can be handled after here. */
vectors_init();
/* Remap 1MB kernel sections as 4Kb pages. */
remap_as_pages(_start_kernel, _end_kernel);
/* Move the initial pgd into a more convenient place, mapped as pages. */
relocate_page_tables();
/* Initialise memory allocators */
paging_init();
/* Initialise kip and map for userspace access */
kip_init();
/* Initialise system call page */
syscall_init();
/* Initialise everything else, e.g. locks, lists... */
init_locks();
/* Setup inittask's ktcb and push it to scheduler runqueue */
init_tasks();
/* Start the scheduler with available tasks in the runqueue */
scheduler_start();
BUG();
}

4
src/glue/arm/irq.c Normal file
View File

@@ -0,0 +1,4 @@
/*
* ARM Generic irq handler
*/

135
src/glue/arm/memory.c Normal file
View File

@@ -0,0 +1,135 @@
/*
* ARM virtual memory implementation
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/list.h>
#include <l4/lib/string.h>
#include <l4/lib/printk.h>
#include <l4/generic/physmem.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include INC_SUBARCH(mm.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(printascii.h)
#include INC_PLAT(offsets.h)
#include INC_ARCH(linker.h)
/*
* Conversion from generic protection flags to arch-specific
* pte flags.
*/
unsigned int space_flags_to_ptflags(unsigned int flags)
{
switch (flags) {
case MAP_USR_RW_FLAGS:
return __MAP_USR_RW_FLAGS;
case MAP_USR_RO_FLAGS:
return __MAP_USR_RO_FLAGS;
case MAP_SVC_RW_FLAGS:
return __MAP_SVC_RW_FLAGS;
case MAP_USR_IO_FLAGS:
return __MAP_USR_IO_FLAGS;
case MAP_SVC_IO_FLAGS:
return __MAP_SVC_IO_FLAGS;
default:
BUG();
}
BUG(); return 0;
}
#define NUM_PMD_TABLES 6
#define NUM_PGD_TABLES 8
/* Initial first level page table to provide startup mappings */
SECTION(".kspace.pgd") pgd_table_t kspace;
SECTION(".kspace.pmd") pmd_table_t pmd_tables[NUM_PMD_TABLES];
/* A mini bitmap for boot pmd allocations */
static int pmd_cnt;
pmd_table_t *pmd_array;
pmd_table_t *alloc_boot_pmd(void)
{
pmd_table_t *pt;
if (pmd_cnt == NUM_PMD_TABLES)
return 0;
pt = &pmd_array[pmd_cnt++];
BUG_ON((unsigned long)pt & (sizeof(pmd_table_t) - 1));
return pt;
}
/*
* Initialises pmd allocation cache, this is called before page allocator
* initialises. After this call one can add page mappings via add_mapping().
* This also sets the alloc_pmd() global function to this boot-time version.
*/
void init_pmd_tables(void)
{
pmd_cnt = 0;
pmd_array = pmd_tables;
memset(pmd_array, 0, NUM_PMD_TABLES * sizeof(pmd_table_t));
}
/* Clears out all entries in the initial page table */
void init_clear_ptab(void)
{
memset((void *)virt_to_phys(&kspace), 0, sizeof(pgd_table_t));
}
/* Sets up struct page array and the physical memory descriptor. */
void paging_init(void)
{
read_bootdesc();
physmem_init();
memory_init();
copy_bootdesc();
}
/*
* Copies global kernel entries into another pgd. Even for sub-pmd ranges
* the associated pmd entries are copied, assuming any pmds copied are
* applicable to all tasks in the system.
*/
void copy_pgd_kern_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
/* Extend sub-pmd ranges to their respective pmd boundaries */
start = align(start, PMD_MAP_SIZE);
if (end < start)
end = 0;
/* Aligning would overflow if mapping the last virtual pmd */
if (end < align(~0, PMD_MAP_SIZE) ||
start > end) /* end may have already overflown as input */
end = align_up(end, PMD_MAP_SIZE);
else
end = 0;
copy_pgds_by_vrange(to, from, start, end);
}
/* Copies all standard bits that a user process should have in its pgd */
void copy_pgd_kern_all(pgd_table_t *to)
{
pgd_table_t *from = current->pgd;
copy_pgd_kern_by_vrange(to, from, KERNEL_AREA_START, KERNEL_AREA_END);
copy_pgd_kern_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
copy_pgd_kern_by_vrange(to, from, USER_KIP_PAGE,
USER_KIP_PAGE + PAGE_SIZE);
copy_pgd_kern_by_vrange(to, from, ARM_HIGH_VECTOR,
ARM_HIGH_VECTOR + PAGE_SIZE);
copy_pgd_kern_by_vrange(to, from, ARM_SYSCALL_VECTOR,
ARM_SYSCALL_VECTOR + PAGE_SIZE);
/* We temporarily map uart registers to every process */
copy_pgd_kern_by_vrange(to, from, USERSPACE_UART_BASE,
USERSPACE_UART_BASE + PAGE_SIZE);
}

82
src/glue/arm/systable.c Normal file
View File

@@ -0,0 +1,82 @@
/*
* System Calls
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/generic/space.h>
#include <l4/api/errno.h>
#include INC_GLUE(memlayout.h)
#include INC_GLUE(syscall.h)
#include INC_SUBARCH(mm.h)
#include INC_API(syscall.h)
#include INC_API(kip.h)
void kip_init_syscalls(void)
{
kip.space_control = ARM_SYSCALL_PAGE + sys_space_control_offset;
kip.thread_control = ARM_SYSCALL_PAGE + sys_thread_control_offset;
kip.ipc_control = ARM_SYSCALL_PAGE + sys_ipc_control_offset;
kip.map = ARM_SYSCALL_PAGE + sys_map_offset;
kip.ipc = ARM_SYSCALL_PAGE + sys_ipc_offset;
kip.kread = ARM_SYSCALL_PAGE + sys_kread_offset;
kip.unmap = ARM_SYSCALL_PAGE + sys_unmap_offset;
kip.exchange_registers = ARM_SYSCALL_PAGE + sys_exchange_registers_offset;
kip.thread_switch = ARM_SYSCALL_PAGE + sys_thread_switch_offset;
kip.schedule = ARM_SYSCALL_PAGE + sys_schedule_offset;
kip.getid = ARM_SYSCALL_PAGE + sys_getid_offset;
kip.kmem_grant = ARM_SYSCALL_PAGE + sys_kmem_grant_offset;
kip.kmem_reclaim = ARM_SYSCALL_PAGE + sys_kmem_reclaim_offset;
}
/* Jump table for all system calls. */
syscall_fn_t syscall_table[SYSCALLS_TOTAL];
/*
* Initialises the system call jump table, for kernel to use.
* Also maps the system call page into userspace.
*/
void syscall_init()
{
syscall_table[sys_ipc_offset >> 2] = (syscall_fn_t)sys_ipc;
syscall_table[sys_thread_switch_offset >> 2] = (syscall_fn_t)sys_thread_switch;
syscall_table[sys_thread_control_offset >> 2] = (syscall_fn_t)sys_thread_control;
syscall_table[sys_exchange_registers_offset >> 2] = (syscall_fn_t)sys_exchange_registers;
syscall_table[sys_schedule_offset >> 2] = (syscall_fn_t)sys_schedule;
syscall_table[sys_getid_offset >> 2] = (syscall_fn_t)sys_getid;
syscall_table[sys_unmap_offset >> 2] = (syscall_fn_t)sys_unmap;
syscall_table[sys_space_control_offset >> 2] = (syscall_fn_t)sys_space_control;
syscall_table[sys_ipc_control_offset >> 2] = (syscall_fn_t)sys_ipc_control;
syscall_table[sys_map_offset >> 2] = (syscall_fn_t)sys_map;
syscall_table[sys_kread_offset >> 2] = (syscall_fn_t)sys_kread;
syscall_table[sys_kmem_grant_offset >> 2] = (syscall_fn_t)sys_kmem_grant;
syscall_table[sys_kmem_reclaim_offset >> 2] = (syscall_fn_t)sys_kmem_reclaim;
add_mapping(virt_to_phys(&__syscall_page_start),
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS);
}
/* Checks a syscall is legitimate and dispatches to appropriate handler. */
int syscall(struct syscall_args *regs, unsigned long swi_addr)
{
/* Check if genuine system call, coming from the syscall page */
if ((swi_addr & ARM_SYSCALL_PAGE) == ARM_SYSCALL_PAGE) {
/* Check within syscall offset boundary */
if (((swi_addr & syscall_offset_mask) >= 0) &&
((swi_addr & syscall_offset_mask) <= syscalls_end_offset)) {
/* Quick jump, rather than compare each */
return (*syscall_table[(swi_addr & 0xFF) >> 2])(regs);
} else {
printk("System call received from call @ 0x%lx."
"Instruction: 0x%lx.\n", swi_addr,
*((unsigned long *)swi_addr));
return -ENOSYS;
}
} else {
printk("System call exception from unknown location 0x%lx."
"Discarding.\n", swi_addr);
return -ENOSYS;
}
}

View File

@@ -0,0 +1,9 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['main.c', 'test_kmalloc.c', 'test_memcache.c', 'test_allocpage.c', 'test_alloc_generic.c', 'debug.c', 'memory.c', 'clz.c']
obj = env.Object(src_local)
Return('obj')

15
src/glue/tests/clz.c Normal file
View File

@@ -0,0 +1,15 @@
#include <macros.h>
#include <types.h>
#include <config.h>
/* Emulation of CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}

7
src/glue/tests/clz.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef __CLZ_H__
#define __CLZ_H__
unsigned int __clz(unsigned int bitvector);
#endif /* __CLZ_H__ */

59
src/glue/tests/debug.c Normal file
View File

@@ -0,0 +1,59 @@
#include <generic/physmem.h>
#include "debug.h"
#include <stdio.h>
void print_page_area_list(struct page_area *p)
{
struct page_area *current_item = p;
struct list_head *begin = &p->list;
if (!current_item) {
printf("%-20s\n", "Null list.");
return;
}
printf("%-20s", "Page area:");
printf("%s", (list_empty(&current_item->list) ? "(Single Item.)\n" : "\n"));
printf("%-20s\n","-------------------------");
printf("%-20s %d\n", "Index:", current_item->index);
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Number of pages:", current_item->numpages);
list_for_each_entry (current_item, begin, list) {
printf("%-20s\n%-20s\n", "Page area:","-------------------------");
printf("%-20s %d\n", "Index:", current_item->index);
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Number of pages:", current_item->numpages);
}
}
void print_subpage_area(struct subpage_area *s)
{
printf("%-20s\n%-20s\n", "Subpage area:","-------------------------");
printf("%-20s 0x%x\n", "Addr:", s->vaddr);
printf("%-20s 0x%x\n", "Size:", s->size);
printf("%-20s %d\n", "Used:", s->used);
printf("%-20s %d\n\n", "Head_of_pages:", s->head_of_pages);
}
void print_subpage_area_list(struct subpage_area *s)
{
struct subpage_area *current_item = s;
struct list_head *begin = &s->list;
if (!current_item) {
printf("Null list.\n");
return;
}
printf("%-20s", "Subpage area:");
printf("%s", (list_empty(&current_item->list) ? "(Single Item.)\n" : "\n"));
printf("%-20s\n","-------------------------");
printf("%-20s 0x%x\n", "Addr:", current_item->vaddr);
printf("%-20s 0x%x\n", "Size:", current_item->size);
printf("%-20s %d\n", "Used:", current_item->used);
printf("%-20s %d\n\n", "Head_of_pages:", current_item->head_of_pages);
list_for_each_entry (current_item, begin, list) {
print_subpage_area(current_item);
}
}

13
src/glue/tests/debug.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef __DEBUG_H__
#define __DEBUG_H__
#include <generic/physmem.h>
#include <generic/kmalloc.h>
#include <generic/alloc_page.h>
#include <lib/list.h>
void print_physmem(struct memdesc *m);
void print_page_area_list(struct page_area *p);
void print_subpage_area_list(struct subpage_area *s);
void print_subpage_area(struct subpage_area *s);
#endif /* DEBUG_H */

0
src/glue/tests/linker.c Normal file
View File

215
src/glue/tests/main.c Normal file
View File

@@ -0,0 +1,215 @@
#include <stdio.h>
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <macros.h>
#include <config.h>
#include <generic/physmem.h>
#include <generic/kmalloc.h>
#include <generic/alloc_page.h>
#include INC_SUBARCH(mm.h)
#include INC_ARCH(linker.h)
#include INC_PLAT(printascii.h)
#include INC_PLAT(offsets.h)
#include INC_GLUE(basiclayout.h)
#include "tests.h"
#include "test_kmalloc.h"
#include "test_allocpage.h"
#include "test_memcache.h"
#include "clz.h"
unsigned int TEST_PHYSMEM_TOTAL_PAGES = 250;
unsigned int TEST_PHYSMEM_TOTAL_SIZE;
void *malloced_test_memory;
/* Allocating memory from the host C library, and
* it is used as if it is the physical memory available
* on the system.
*/
void alloc_test_memory()
{
TEST_PHYSMEM_TOTAL_SIZE = (PAGE_SIZE * TEST_PHYSMEM_TOTAL_PAGES);
if (!(malloced_test_memory = malloc(TEST_PHYSMEM_TOTAL_SIZE)))
printf("Host system out of memory.\n");
PHYS_MEM_START = (unsigned int)malloced_test_memory;
PHYS_MEM_END = PHYS_MEM_START + TEST_PHYSMEM_TOTAL_SIZE;
PHYS_MEM_START = page_align_up(PHYS_MEM_START);
PHYS_MEM_END = page_align(PHYS_MEM_END);
/* Normally _end is to know where the loaded kernel image
* ends in physical memory, so the system can start allocating
* physical memory from there. Because in our mock-up there's no
* used space in the malloc()'ed memory, _end is the same as the
* beginning of malloc()'ed memory.
*/
_end = PHYS_MEM_START;
}
struct cmdline_opts {
char run_allocator;
int allocations;
int alloc_size_max;
int physmem_pages;
int page_size;
int no_of_pages;
char *finit_path;
char *fexit_path;
} options;
void print_options(struct cmdline_opts *opts)
{
printf("Running: %s\n",
((opts->run_allocator == 'p') ? "page allocator" :
((opts->run_allocator == 'k') ? "kmalloc/kfree" :
"memcache allocator")));
printf("Total suggested allocations: %d\n", opts->allocations);
printf("Maximum allocation size: %d, 0x%x(hex)\n\n",
opts->alloc_size_max, opts->alloc_size_max);
printf("Initial state file: %s\n", opts->finit_path);
printf("Exit state file: %s\n", opts->fexit_path);
}
void display_help(void)
{
printf("Main:\n");
printf("\tUsage:\n");
printf("\tmain\t-a=<p>|<k>|<m> [-n=<number of allocations>] [-s=<maximum size for any allocation>]\n"
"\t\t[-fi=<file to dump init state>] [-fx=<file to dump exit state>]\n"
"\t\t[-ps=<page size>] [-pn=<total number of pages>]\n");
printf("\n");
}
int get_cmdline_opts(int argc, char *argv[], struct cmdline_opts *opts)
{
int parsed = 0;
memset(opts, 0, sizeof (struct cmdline_opts));
if (argc <= 1)
return -1;
for (int i = 1; i < argc; i++) {
if (argv[i][0] == '-' && argv[i][2] == '=') {
if (argv[i][1] == 'a') {
if (argv[i][3] == 'k' ||
argv[i][3] == 'm' ||
argv[i][3] == 'p') {
opts->run_allocator = argv[i][3];
parsed = 1;
}
}
if (argv[i][1] == 'n') {
opts->allocations = atoi(&argv[i][3]);
parsed = 1;
}
if (argv[i][1] == 's') {
opts->alloc_size_max = atoi(&argv[i][3]);
parsed = 1;
}
}
if (argv[i][0] == '-' && argv[i][1] == 'f'
&& argv[i][3] == '=') {
if (argv[i][2] == 'i') {
opts->finit_path = &argv[i][4];
parsed = 1;
}
if (argv[i][2] == 'x') {
opts->fexit_path = &argv[i][4];
parsed = 1;
}
}
if (argv[i][0] == '-' && argv[i][1] == 'p'
&& argv[i][3] == '=') {
if (argv[i][2] == 's') {
opts->page_size = atoi(&argv[i][4]);
parsed = 1;
}
if (argv[i][2] == 'n') {
opts->no_of_pages = atoi(&argv[i][4]);
parsed = 1;
}
}
}
if (!parsed)
return -1;
return 0;
}
void get_output_files(FILE **out1, FILE **out2,
char *alloc_func_name, char *rootpath)
{
char pathbuf[150];
char *root = "/tmp/";
char *initstate_prefix = "test_initstate_";
char *endstate_prefix = "test_endstate_";
char *extention = ".out";
if (!rootpath)
rootpath = root;
/* File path manipulations */
sprintf(pathbuf, "%s%s%s%s", rootpath, initstate_prefix, alloc_func_name, extention);
*out1 = fopen(pathbuf,"w+");
sprintf(pathbuf, "%s%s%s%s", rootpath, endstate_prefix, alloc_func_name, extention);
*out2 = fopen(pathbuf, "w+");
return;
}
int main(int argc, char *argv[])
{
FILE *finit, *fexit;
int output_files = 0;
if (get_cmdline_opts(argc, argv, &options) < 0) {
display_help();
return 1;
}
print_options(&options);
if (options.finit_path && options.fexit_path) {
finit = fopen(options.finit_path, "w+");
fexit = fopen(options.fexit_path, "w+");
output_files = 1;
}
if (options.page_size) {
PAGE_SIZE = options.page_size;
PAGE_MASK = PAGE_SIZE - 1;
PAGE_BITS = 32 - __clz(PAGE_MASK);
printf("Using: Page Size: %d\n", PAGE_SIZE);
printf("Using: Page Mask: 0x%x\n", PAGE_MASK);
printf("Using: Page Bits: %d\n", PAGE_BITS);
}
if (options.no_of_pages) {
printf("Using: Total pages: %d\n", options.no_of_pages);
TEST_PHYSMEM_TOTAL_PAGES = options.no_of_pages;
}
alloc_test_memory();
printf("Initialising physical memory\n");
physmem_init();
printf("Initialising allocators:\n");
memory_init();
if (options.run_allocator == 'p') {
if (!output_files)
get_output_files(&finit, &fexit, "alloc_page", 0);
test_allocpage(options.allocations, options.alloc_size_max,
finit, fexit);
} else if (options.run_allocator == 'k') {
if (!output_files)
get_output_files(&finit, &fexit, "kmalloc", 0);
test_kmalloc(options.allocations, options.alloc_size_max,
finit, fexit);
} else if (options.run_allocator == 'm') {
if (!output_files)
get_output_files(&finit, &fexit, "memcache", 0);
test_memcache(options.allocations, options.alloc_size_max,
finit, fexit, 1);
} else {
printf("Invalid allocator option.\n");
}
free((void *)malloced_test_memory);
fclose(finit);
fclose(fexit);
return 0;
}

7
src/glue/tests/memory.c Normal file
View File

@@ -0,0 +1,7 @@
#include <macros.h>
#include <config.h>
#include INC_GLUE(memory.h)
unsigned int PAGE_SIZE = TEST_PAGE_SIZE;
unsigned int PAGE_MASK = TEST_PAGE_MASK;
unsigned int PAGE_BITS = TEST_PAGE_BITS;

View File

@@ -0,0 +1,205 @@
/*
* Generic random allocation/deallocation test
*
* Copyright 2005 (C) Bahadir Balban
*
*/
#include <macros.h>
#include <config.h>
#include INC_GLUE(memory.h)
#include <lib/printk.h>
#include <lib/list.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include "test_alloc_generic.h"
#include "debug.h"
void print_test_state(unsigned int title,
print_alloc_state_t print_allocator_state)
{
switch (title) {
case TEST_STATE_BEGIN:
printf( "=================\n"
"===== BEGIN =====\n"
"=================\n\n");
break;
case TEST_STATE_MIDDLE:
printf( "==================\n"
"===== MIDDLE =====\n"
"==================\n\n");
break;
case TEST_STATE_END:
printf( "===========\n"
"=== END ===\n"
"===========\n\n");
break;
case TEST_STATE_ERROR:
printf( "=================\n"
"===== ERROR =====\n"
"=================\n\n");
break;
default:
printf("Title error.\n");
}
print_allocator_state();
}
void get_output_filepaths(FILE **out1, FILE **out2,
char *alloc_func_name)
{
char pathbuf[150];
char *rootpath = "/tmp/";
char *initstate_prefix = "test_initstate_";
char *endstate_prefix = "test_endstate_";
char *extention = ".out";
/* File path manipulations */
sprintf(pathbuf, "%s%s%s%s", rootpath, initstate_prefix, alloc_func_name, extention);
*out1 = fopen(pathbuf,"w+");
sprintf(pathbuf, "%s%s%s%s", rootpath, endstate_prefix, alloc_func_name, extention);
*out2 = fopen(pathbuf, "w+");
return;
}
/* This function is at the heart of generic random allocation testing.
* It is made as simple as possible, and can be used for testing all
* allocators. It randomly allocates/deallocates data and prints out
* the outcome of the action. Here are a few things it does and doesn't
* do:
* - It does not test false input on the allocators, e.g. attempting
* to free an address that hasn't been allocated, or attempting to
* free address 0.
* - It does capture and compare initial and final states of the
* allocators' internal structures after all allocations are freed.
* This is done by comparing two files filled with allocator state
* by functions supplied by the allocators themselves.
* - It expects the allocator NOT to run out of memory.
*/
int
test_alloc_free_random_order(const int MAX_ALLOCATIONS,
const int ALLOC_SIZE_MAX,
alloc_func_t alloc,
free_func_t free,
print_alloc_state_t print_allocator_state,
FILE *state_init_file, FILE *state_end_file)
{
/* The last element in full_state that tells about any full index.
* This is the limit the random deallocation would use to find a full
* index */
int random_size;
int random_action;
int random_index;
int alloc_so_far = 0;
int full_state_last = -1;
int halfway_through = 0;
FILE * const default_stdout = stdout;
/* Memory pointers */
void *mem[MAX_ALLOCATIONS];
/* Each element keeps track of one currently full index number */
int full_state[MAX_ALLOCATIONS];
memset(mem, 0, MAX_ALLOCATIONS * sizeof(void *));
memset(full_state, 0, MAX_ALLOCATIONS * sizeof(int));
print_test_state(TEST_STATE_BEGIN, print_allocator_state);
stdout = state_init_file;
print_test_state(TEST_STATE_BEGIN, print_allocator_state);
stdout = default_stdout;
/* Randomly either allocate/deallocate at a random
* index, of random size */
srand(time(0));
/* Constraints */
while (1) {
if (alloc_so_far < (MAX_ALLOCATIONS / 2)) {
/* Give more chance to allocations at the beginning */
if ((rand() % 4) == 0) /* 1/4 chance */
random_action = FREE;
else /* 3/4 chance */
random_action = ALLOCATE;
} else {
if (!halfway_through) {
print_test_state(TEST_STATE_MIDDLE,
print_allocator_state);
halfway_through = 1;
}
/* Give more chane to freeing after halfway-through */
if ((rand() % 3) == 0) /* 1/3 chance */
random_action = ALLOCATE;
else /* 2/3 chance */
random_action = FREE;
}
random_size = (rand() % (ALLOC_SIZE_MAX-1)) + 1;
if (random_action == ALLOCATE) {
if (alloc_so_far < MAX_ALLOCATIONS) {
alloc_so_far++;
for (int i = 0; i < MAX_ALLOCATIONS; i++) {
if (mem[i] == 0) {
int allocation_error =
((mem[i] = alloc(random_size)) <= 0);
printf("%-12s%-8s%-12p%-8s%-10d\n",
"alloc:", "addr:", mem[i],
"size:", random_size);
if (allocation_error) {
print_test_state(TEST_STATE_ERROR,
print_allocator_state);
if (mem[i] < 0) {
printf("Error: alloc() returned negative value\n");
BUG();
} else if (mem[i] == 0) {
printf("Error: Allocator is out of memory.\n");
return 1;
}
}
full_state_last++;
full_state[full_state_last] = i;
break;
}
}
} else
random_action = FREE;
}
if (random_action == FREE) {
/* all are free, can't free anymore */
if (full_state_last < 0)
continue;
else if (full_state_last > 0)
random_index = rand() % full_state_last;
else
random_index = 0; /* Last item */
if(mem[full_state[random_index]] == 0)
BUG();
free(mem[full_state[random_index]]);
printf("%-12s%-8s%-12p\n","free:",
"addr:", mem[full_state[random_index]]);
mem[full_state[random_index]] = 0;
/* Fill in the empty gap with last element */
full_state[random_index] = full_state[full_state_last];
/* Last element now in the gap
* (somewhere inbetween first and last) */
full_state[full_state_last] = 0;
/* One less in the number of full items */
full_state_last--;
}
/* Check that all allocations and deallocations took place */
if (alloc_so_far == MAX_ALLOCATIONS && full_state_last < 0)
break;
}
print_test_state(TEST_STATE_END, print_allocator_state);
stdout = state_end_file;
print_test_state(TEST_STATE_BEGIN, print_allocator_state);
stdout = default_stdout;
return 0;
}

View File

@@ -0,0 +1,29 @@
#ifndef __TEST_ALLOC_GENERIC_H__
#define __TEST_ALLOC_GENERIC_H__
enum test_state_title {
TEST_STATE_BEGIN = 0,
TEST_STATE_MIDDLE,
TEST_STATE_END,
TEST_STATE_ERROR
};
typedef void (*print_alloc_state_t)(void);
typedef void *(*alloc_func_t)(int size);
typedef int (*free_func_t)(void *addr);
enum alloc_action {
FREE = 0,
ALLOCATE = 1,
};
void get_output_filepaths(FILE **out1, FILE **out2,
char *alloc_func_name);
int test_alloc_free_random_order(const int MAX_ALLOCATIONS,
const int ALLOC_SIZE_MAX,
alloc_func_t alloc, free_func_t free,
print_alloc_state_t print_allocator_state,
FILE *init_state, FILE *exit_state);
#endif /* __TEST_ALLOC_GENERIC_H__ */

View File

@@ -0,0 +1,99 @@
#include <macros.h>
#include <config.h>
#include INC_GLUE(memory.h)
#include <lib/printk.h>
#include <lib/list.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include "test_allocpage.h"
#include "test_alloc_generic.h"
#include "debug.h"
unsigned int PAGE_ALLOCATIONS = 30;
unsigned int PAGE_ALLOC_SIZE_MAX = 8;
extern struct page_area *areas;
extern struct mem_cache *primary_cache;
extern struct mem_cache *secondary_cache;
extern struct mem_cache *spare_cache;
void print_page_area(struct page_area *ar, int areano)
{
printf("Area starts @: 0x%x, %s, numpages: %d\n",
ar->index << PAGE_BITS,
(ar->used) ? "used" : "unused", ar->numpages);
return;
}
void print_areas(struct page_area *ar)
{
struct page_area *cur = ar;
int areano = 1;
printf("Page areas:\n-------------\n");
if (!ar) {
printf("None.\n");
return;
}
print_page_area(cur, areano++);
list_for_each_entry(cur, &ar->list, list) {
print_page_area(cur, areano++);
}
return;
}
void print_cache(struct mem_cache *c, int cacheno)
{
printf("Cache %d state:\n-------------\n", cacheno);
printf("Total: %d\n", c->total);
printf("Free: %d\n", c->free);
printf("Start: 0x%x\n", c->start);
return;
}
void print_caches(struct mem_cache *c)
{
int caches = 1;
struct mem_cache *cur = c;
if (!c) {
printf("None.\n");
return;
}
print_cache(cur, caches++);
list_for_each_entry(cur, &c->list, list) {
print_cache(cur, caches++);
}
return;
}
void print_page_allocator_state(void)
{
print_areas(areas);
printf("PRIMARY:\n--------\n");
print_caches(primary_cache);
printf("SECONDARY:\n----------\n");
print_caches(secondary_cache);
}
/* FIXME: with current default parameters (allocations = 30, sizemax = 8),
* for some odd reason, we got the bug at line 280 in alloc_page.c.
* Very weird. Find out why.
*/
void test_allocpage(int page_allocations, int page_alloc_size_max,
FILE *init_state, FILE *exit_state)
{
if (!page_allocations)
page_allocations = PAGE_ALLOCATIONS;
if (!page_alloc_size_max)
page_alloc_size_max = PAGE_ALLOC_SIZE_MAX;
printf("\nPAGE ALLOCATOR TEST:====================================\n\n");
test_alloc_free_random_order(page_allocations, page_alloc_size_max,
alloc_page, free_page,
print_page_allocator_state,
init_state, exit_state);
}

View File

@@ -0,0 +1,13 @@
#ifndef __TEST_ALLOCPAGE_H__
#define __TEST_ALLOCPAGE_H__
#include <generic/alloc_page.h>
#include "tests.h"
void test_allocpage(int num_allocs, int alloc_max, FILE *init, FILE *exit);
void print_page_area(struct page_area *a, int no);
void print_caches(struct mem_cache *c);
void print_cache(struct mem_cache *c, int cacheno);
void print_areas(struct page_area *ar);
void print_page_area(struct page_area *ar, int areano);
#endif

View File

@@ -0,0 +1,39 @@
#include <macros.h>
#include <config.h>
#include <lib/list.h>
#include <lib/printk.h>
#include INC_GLUE(memory.h)
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include "test_alloc_generic.h"
#include "test_allocpage.h"
#include "debug.h"
#include "tests.h"
extern struct subpage_area *km_areas;
extern struct page_area *areas;
void print_kmalloc_state(void)
{
print_subpage_area_list(km_areas);
}
void test_kmalloc(int kmalloc_allocations, int kmalloc_alloc_size_max,
FILE *init_state, FILE *exit_state)
{
unsigned int KMALLOC_ALLOCATIONS = 20;
unsigned int KMALLOC_ALLOC_SIZE_MAX = (PAGE_SIZE * 3);
if (!kmalloc_allocations)
kmalloc_allocations = KMALLOC_ALLOCATIONS;
if (!kmalloc_alloc_size_max)
kmalloc_alloc_size_max = KMALLOC_ALLOC_SIZE_MAX;
test_alloc_free_random_order(kmalloc_allocations, kmalloc_alloc_size_max,
kmalloc, kfree, print_kmalloc_state,
init_state, exit_state);
}

View File

@@ -0,0 +1,8 @@
#ifndef __TEST_KMALLOC_H__
#define __TEST_KMALLOC_H__
#include <generic/kmalloc.h>
void test_kmalloc(int num_allocs, int allocs_max, FILE *initstate, FILE *exitstate);
#endif

View File

@@ -0,0 +1,112 @@
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <lib/list.h>
#include <lib/printk.h>
#include <generic/memcache.h>
#include "test_memcache.h"
#include "test_alloc_generic.h"
#include "debug.h"
#include "tests.h"
#include <macros.h>
#include <config.h>
#include INC_GLUE(memory.h)
unsigned int MEM_CACHE_SIZE;
struct mem_cache *this;
void *buffer;
void *mem_cache_alloc_wrapped(int size)
{
return mem_cache_alloc(this);
}
int mem_cache_free_wrapped(void *addr)
{
return mem_cache_free(this, addr);
}
void print_memcache_state(void)
{
printf("%-15s%d\n","Total:", this->total);
printf("%-15s%d\n","Free:", this->free);
printf("Bitmap has %d words:\n", BITWISE_GETWORD(this->total) + 1);
for (int i = 0; i <= BITWISE_GETWORD(this->total); i++)
printf("0x%x\n", this->bitmap[i]);
}
int test_memcache_init_aligned(int *items_max, int item_size)
{
if (item_size * 10 > MEM_CACHE_SIZE)
MEM_CACHE_SIZE = item_size * 10;
if (!(buffer = calloc(1, MEM_CACHE_SIZE))) {
printf("System out of memory.\n");
BUG();
}
if ((this = mem_cache_init((unsigned int)buffer,
MEM_CACHE_SIZE,
item_size, 1)) == 0) {
printf("Unable to initialise cache.\n");
return -1;
}
*items_max = mem_cache_total_free(this);
printf("\nMEMCACHE TEST: ALIGNED ELEMENTS\n==========================\n");
printf("%-30s%d\n", "Item size:", item_size);
printf("%-30s0x%x\n", "Cache occupied space:", MEM_CACHE_SIZE);
printf("%-30s%d\n","Total items in cache:", *items_max);
printf("%-30s0x%x\n","Total items space:", (*items_max * item_size));
return 0;
}
int test_memcache_init(int *items_max, int item_size)
{
if (item_size * 10 > MEM_CACHE_SIZE)
MEM_CACHE_SIZE = item_size * 10;
printf("%s: Allocating cache memory.\n",__FUNCTION__);
if (!(buffer = calloc(1, MEM_CACHE_SIZE))) {
printf("System out of memory.\n");
BUG();
}
if ((this = mem_cache_init((unsigned int)buffer,
MEM_CACHE_SIZE,
item_size, 0)) == 0) {
printf("Unable to initialise cache.\n");
return -1;
}
*items_max = mem_cache_total_free(this);
printf("\nMEMCACHE TEST:\n========================\n");
printf("%-30s%d\n", "Item size:", item_size);
printf("%-30s0x%x\n", "Cache occupied space:", MEM_CACHE_SIZE);
printf("%-30s%d\n","Total items in cache:", *items_max);
printf("%-30s0x%x\n","Total items space:", (*items_max * item_size));
return 0;
}
int test_memcache(int items_max, int item_size, FILE *init_state, FILE *exit_state, int aligned)
{
const unsigned int TEST_CACHE_ITEM_SIZE = 5;
MEM_CACHE_SIZE = PAGE_SIZE * 5;
if (!item_size)
item_size = TEST_CACHE_ITEM_SIZE;
/* items_max value is ignored and overwritten because caches have fixed size. */
test_memcache_init(&items_max, item_size);
test_alloc_free_random_order(items_max, /* unused */ 2, mem_cache_alloc_wrapped,
mem_cache_free_wrapped, print_memcache_state,
init_state, exit_state);
free(buffer);
if (aligned) {
test_memcache_init_aligned(&items_max, item_size);
test_alloc_free_random_order(items_max, /* unused */ 2, mem_cache_alloc_wrapped,
mem_cache_free_wrapped, print_memcache_state,
init_state, exit_state);
}
free(buffer);
return 0;
}

View File

@@ -0,0 +1,10 @@
#ifndef __TEST_MEMCACHE__H__
#define __TEST_MEMCACHE__H__
#include <generic/memcache.h>
int test_memcache(int num_alloc, int alloc_size_max, FILE *initstate, FILE *exitstate, int aligned);
#endif /* __TEST_MEMCACHE__H__ */

21
src/glue/tests/tests.h Normal file
View File

@@ -0,0 +1,21 @@
#ifndef __TESTS_H__
#define __TESTS_H__
/* Mock-up physical memory */
extern unsigned int TEST_PHYSMEM_TOTAL_PAGES;
extern unsigned int TEST_PHYSMEM_TOTAL_SIZE;
/* Allocator test */
extern unsigned int PAGE_ALLOCATIONS;
extern unsigned int PAGE_ALLOC_SIZE_MAX;
/* Memcache test */
extern unsigned int MEMCACHE_ALLOCS_MAX;
extern unsigned int TEST_CACHE_ITEM_SIZE;
/* Kmalloc */
extern unsigned int KMALLOC_ALLOCATIONS;
extern unsigned int KMALLOC_ALLOC_SIZE_MAX;
#endif /* __TESTS_H__ */

12
src/lib/SConscript Normal file
View File

@@ -0,0 +1,12 @@
# Inherit global environment
Import('env')
Import('config_symbols')
# The set of source files associated with this SConscript file.
src_local = ['printk.c', 'putc.c', 'string.c', 'bit.c', 'wait.c', 'mutex.c', 'idpool.c', 'memcache.c']
if "ARCH_TEST" not in config_symbols:
obj = env.Object(src_local)
else:
obj = []
Return('obj')

52
src/lib/bit.c Normal file
View File

@@ -0,0 +1,52 @@
/*
* Bit manipulation functions.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/bit.h>
#include INC_GLUE(memory.h)
/* Emulation of ARM's CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}
int find_and_set_first_free_bit(u32 *word, unsigned int limit)
{
int success = 0;
int i;
for(i = 0; i < limit; i++) {
/* Find first unset bit */
if (!(word[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i))) {
/* Set it */
word[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
success = 1;
break;
}
}
/* Return bit just set */
if (success)
return i;
else
return -1;
}
int check_and_clear_bit(u32 *word, int bit)
{
/* Check that bit was set */
if (word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit)) {
word[BITWISE_GETWORD(bit)] &= ~BITWISE_GETBIT(bit);
return 0;
} else {
//printf("Trying to clear already clear bit\n");
return -1;
}
}

35
src/lib/idpool.c Normal file
View File

@@ -0,0 +1,35 @@
/*
* Used for thread and space ids.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/lib/idpool.h>
#include <l4/generic/kmalloc.h>
#include INC_GLUE(memory.h)
struct id_pool *id_pool_new_init(int totalbits)
{
int nwords = BITWISE_GETWORD(totalbits);
struct id_pool *new = kzalloc((nwords * SZ_WORD)
+ sizeof(struct id_pool));
new->nwords = nwords;
return new;
}
int id_new(struct id_pool *pool)
{
int id = find_and_set_first_free_bit(pool->bitmap,
pool->nwords * WORD_BITS);
BUG_ON(id < 0);
return id;
}
int id_del(struct id_pool *pool, int id)
{
int ret = check_and_clear_bit(pool->bitmap, id);
BUG_ON(ret < 0);
return ret;
}

170
src/lib/memcache.c Normal file
View File

@@ -0,0 +1,170 @@
/*
* Bitmap-based linked-listable fixed-size memory cache.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/memcache.h>
#include <l4/lib/string.h>
#include <l4/lib/printk.h>
#include INC_GLUE(memory.h)
#include <l4/lib/bit.h>
/* Allocate, clear and return element */
void *mem_cache_zalloc(struct mem_cache *cache)
{
void *elem = mem_cache_alloc(cache);
memset(elem, 0, cache->struct_size);
return elem;
}
/* Allocate another element from given @cache. Returns 0 when full. */
void *mem_cache_alloc(struct mem_cache *cache)
{
int bit;
if (cache->free > 0) {
mutex_lock(&cache->mutex);
cache->free--;
if ((bit = find_and_set_first_free_bit(cache->bitmap,
cache->total)) < 0) {
printk("Error: Anomaly in cache occupied state.\n"
"Bitmap full although cache->free > 0\n");
BUG();
}
mutex_unlock(&cache->mutex);
return (void *)(cache->start + (cache->struct_size * bit));
} else {
/* Cache full */
return 0;
}
}
/* Free element at @addr in @cache. Return negative on error. */
int mem_cache_free(struct mem_cache *cache, void *addr)
{
unsigned int struct_addr = (unsigned int)addr;
unsigned int bit;
int err = 0;
/* Check boundary */
if (struct_addr < cache->start || struct_addr > cache->end)
return -1; /* Address doesn't belong to cache */
bit = ((struct_addr - cache->start) / cache->struct_size);
/*
* Check alignment:
* Find out if there was a lost remainder in last division.
* There shouldn't have been, because addresses are allocated at
* struct_size offsets from cache->start.
*/
if (((bit * cache->struct_size) + cache->start) != struct_addr) {
printk("Error: This address is not aligned on a predefined "
"structure address in this cache.\n");
err = -1;
return err;
}
mutex_lock(&cache->mutex);
/* Check free/occupied state */
if (check_and_clear_bit(cache->bitmap, bit) < 0) {
printk("Error: Anomaly in cache occupied state:\n"
"Trying to free already free structure.\n");
err = -1;
goto out;
}
cache->free++;
if (cache->free > cache->total) {
printk("Error: Anomaly in cache occupied state:\n"
"More free elements than total.\n");
err = -1;
goto out;
}
out:
mutex_unlock(&cache->mutex);
return err;
}
struct mem_cache *mem_cache_init(void *start,
int cache_size,
int struct_size,
unsigned int aligned)
{
struct mem_cache *cache = start;
unsigned int area_start;
unsigned int *bitmap;
int bwords_in_structs;
int bwords;
int total;
int bsize;
if ((struct_size < 0) || (cache_size < 0) ||
((unsigned long)start == ~(0))) {
printk("Invalid parameters.\n");
return 0;
}
/*
* The cache definition itself is at the beginning.
* Skipping it to get to start of free memory. i.e. the cache.
*/
area_start = (unsigned long)start + sizeof(struct mem_cache);
cache_size -= sizeof(struct mem_cache);
if (cache_size < struct_size) {
printk("Cache too small for given struct_size\n");
return 0;
}
/* Get how much bitmap words occupy */
total = cache_size / struct_size;
bwords = total >> 5; /* Divide by 32 */
if (total & 0x1F) { /* Remainder? */
bwords++; /* Add one more word for remainder */
}
bsize = bwords * 4;
/* This many structures will be chucked from cache for bitmap space */
bwords_in_structs = ((bsize) / struct_size) + 1;
/* Total structs left after deducing bitmaps */
total = total - bwords_in_structs;
cache_size -= bsize;
/* This should always catch too small caches */
if (total <= 0) {
printk("Cache too small for given struct_size\n");
return 0;
}
if (cache_size <= 0) {
printk("Cache too small for given struct_size\n");
return 0;
}
bitmap = (unsigned int *)area_start;
area_start = (unsigned int)(bitmap + bwords);
if (aligned) {
unsigned int addr = area_start;
unsigned int addr_aligned = align_up(area_start, struct_size);
unsigned int diff = addr_aligned - addr;
BUG_ON(diff >= struct_size);
if (diff)
total--;
cache_size -= diff;
area_start = addr_aligned;
}
INIT_LIST_HEAD(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;
cache->free = cache->total;
cache->struct_size = struct_size;
cache->bitmap = bitmap;
mutex_init(&cache->mutex);
memset(cache->bitmap, 0, bwords*SZ_WORD);
return cache;
}

149
src/lib/mutex.c Normal file
View File

@@ -0,0 +1,149 @@
/*
* Mutex/Semaphore implementations.
*
* Copyright (c) 2007 Bahadir Balban
*/
#include <l4/lib/mutex.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/tcb.h>
/*
* Semaphore usage:
*
* Producer locks/produces/unlocks data.
* Producer does semaphore up.
* --
* Consumer does semaphore down.
* Consumer locks/consumes/unlocks data.
*/
/*
* Semaphore *up* for multiple producers. If any consumer is waiting, wake them
* up, otherwise, sleep. Effectively producers and consumers use the same
* waitqueue and there's only one kind in the queue at any one time.
*/
void sem_up(struct mutex *mutex)
{
int cnt;
spin_lock(&mutex->slock);
if ((cnt = mutex_inc(&mutex->lock)) <= 0) {
struct waitqueue *wq;
struct ktcb *sleeper;
/* Each producer wakes one consumer in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
spin_unlock(&mutex->slock);
sleeper = wq->task;
printk("(%d) Waking up consumer (%d)\n", current->tid,
sleeper->tid);
sched_resume_task(sleeper);
return; /* Don't iterate, wake only one task. */
}
} else if (cnt > 0) {
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_notify_sleep(current);
need_resched = 1;
printk("(%d) produced, now sleeping...\n", current->tid);
spin_unlock(&mutex->slock);
}
}
/*
* Semaphore *down* for multiple consumers. If any producer is sleeping, wake them
* up, otherwise, sleep. Effectively producers and consumers use the same
* waitqueue and there's only one kind in the queue at any one time.
*/
void sem_down(struct mutex *mutex)
{
int cnt;
spin_lock(&mutex->slock);
if ((cnt = mutex_dec(&mutex->lock)) >= 0) {
struct waitqueue *wq;
struct ktcb *sleeper;
/* Each consumer wakes one producer in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
spin_unlock(&mutex->slock);
sleeper = wq->task;
printk("(%d) Waking up producer (%d)\n", current->tid,
sleeper->tid);
sched_resume_task(sleeper);
return; /* Don't iterate, wake only one task. */
}
} else if (cnt < 0) {
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_notify_sleep(current);
need_resched = 1;
printk("(%d) Waiting to consume, now sleeping...\n", current->tid);
spin_unlock(&mutex->slock);
}
}
void mutex_lock(struct mutex *mutex)
{
/* NOTE:
* Everytime we're woken up we retry acquiring the mutex. It is
* undeterministic as to how many retries will result in success.
*/
for (;;) {
spin_lock(&mutex->slock);
if (!__mutex_lock(&mutex->lock)) { /* Could not lock, sleep. */
DECLARE_WAITQUEUE(wq, current);
INIT_LIST_HEAD(&wq.task_list);
list_add_tail(&wq.task_list, &mutex->wq.task_list);
mutex->sleepers++;
sched_notify_sleep(current);
printk("(%d) sleeping...\n", current->tid);
spin_unlock(&mutex->slock);
} else
break;
}
spin_unlock(&mutex->slock);
}
void mutex_unlock(struct mutex *mutex)
{
spin_lock(&mutex->slock);
__mutex_unlock(&mutex->lock);
BUG_ON(mutex->sleepers < 0);
if (mutex->sleepers > 0) {
struct waitqueue *wq;
struct ktcb *sleeper;
/* Each unlocker wakes one other sleeper in queue. */
mutex->sleepers--;
BUG_ON(list_empty(&mutex->wq.task_list));
list_for_each_entry(wq, &mutex->wq.task_list, task_list) {
list_del_init(&wq->task_list);
spin_unlock(&mutex->slock);
/*
* Here, someone else may get the lock, well before we
* wake up the sleeper that we *hope* would get it. This
* is fine as the sleeper would retry and re-sleep. BUT,
* this may potentially starve the sleeper causing
* non-determinisim.
*/
sleeper = wq->task;
printk("(%d) Waking up (%d)\n", current->tid,
sleeper->tid);
sched_resume_task(sleeper);
return; /* Don't iterate, wake only one task. */
}
}
spin_unlock(&mutex->slock);
}

445
src/lib/printk.c Normal file
View File

@@ -0,0 +1,445 @@
/*********************************************************************
*
* Copyright (C) 2002-2004 Karlsruhe University
*
* File path: generic/printk.cc
* Description: Implementation of printf
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
********************************************************************/
#include <stdarg.h> /* for va_list, ... comes with gcc */
#include <l4/lib/printk.h>
/* FIXME: LICENSE LICENCE */
typedef unsigned int word_t;
extern void putc(const char c);
extern int print_tid (word_t val, word_t width, word_t precision, int adjleft);
/* convert nibble to lowercase hex char */
#define hexchars(x) (((x) < 10) ? ('0' + (x)) : ('a' + ((x) - 10)))
/**
* Print hexadecimal value
*
* @param val value to print
* @param width width in caracters
* @param precision minimum number of digits to apprear
* @param adjleft left adjust the value
* @param nullpad pad with leading zeros (when right padding)
*
* Prints a hexadecimal value with leading zeroes of given width
* using putc(), or if adjleft argument is given, print
* hexadecimal value with space padding to the right.
*
* @returns the number of charaters printed (should be same as width).
*/
int print_hex64(u64 val, int width, int precision, int adjleft, int nullpad)
{
int i, n = 0;
int nwidth = 0;
u32 high, low;
high = val >> 32;
low = (u32)val;
// Find width of hexnumber
if (high) {
while ((high >> (4 * nwidth)) && ((unsigned) nwidth < 2 * sizeof (u32)))
nwidth++;
nwidth += 32;
} else {
while ((low >> (4 * nwidth)) && ((unsigned) nwidth < 2 * sizeof (u32)))
nwidth++;
}
if (nwidth == 0)
nwidth = 1;
// May need to increase number of printed digits
if (precision > nwidth)
nwidth = precision;
// May need to increase number of printed characters
if (width == 0 && width < nwidth)
width = nwidth;
// Print number with padding
if (high)
{
if (!adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (nullpad ? '0' : ' ');
for (i = 4 * (nwidth - 33); i >= 0; i -= 4, n++)
putc (hexchars ((high >> i) & 0xF));
if (adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (' ');
width -= 32;
nwidth -= 32;
nullpad = 1;
}
if (! adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (nullpad ? '0' : ' ');
for (i = 4 * (nwidth - 1); i >= 0; i -= 4, n++)
putc (hexchars ((low >> i) & 0xF));
if (adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (' ');
return n;
}
int print_hex_3arg(const word_t val, int width, int precision)
{
long i, n = 0;
long nwidth = 0;
int adjleft = 0;
int nullpad = 0;
// Find width of hexnumber
while ((val >> (4 * nwidth)) && (word_t) nwidth < 2 * sizeof (word_t))
nwidth++;
if (nwidth == 0)
nwidth = 1;
// May need to increase number of printed digits
if (precision > nwidth)
nwidth = precision;
// May need to increase number of printed characters
if (width == 0 && width < nwidth)
width = nwidth;
// Print number with padding
if (! adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (nullpad ? '0' : ' ');
for (i = 4 * (nwidth - 1); i >= 0; i -= 4, n++)
putc (hexchars ((val >> i) & 0xF));
if (adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (' ');
return n;
}
int print_hex_5arg(const word_t val, int width,
int precision, int adjleft, int nullpad)
{
long i, n = 0;
long nwidth = 0;
// Find width of hexnumber
while ((val >> (4 * nwidth)) && (word_t) nwidth < 2 * sizeof (word_t))
nwidth++;
if (nwidth == 0)
nwidth = 1;
// May need to increase number of printed digits
if (precision > nwidth)
nwidth = precision;
// May need to increase number of printed characters
if (width == 0 && width < nwidth)
width = nwidth;
// Print number with padding
if (! adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (nullpad ? '0' : ' ');
for (i = 4 * (nwidth - 1); i >= 0; i -= 4, n++)
putc (hexchars ((val >> i) & 0xF));
if (adjleft)
for (i = width - nwidth; i > 0; i--, n++)
putc (' ');
return n;
}
/**
* Print a string
*
* @param s zero-terminated string to print
* @param width minimum width of printed string
*
* Prints the zero-terminated string using putc(). The printed
* string will be right padded with space to so that it will be
* at least WIDTH characters wide.
*
* @returns the number of charaters printed.
*/
int print_string_3arg(const char * s, const int width, const int precision)
{
int n = 0;
for (;;)
{
if (*s == 0)
break;
putc(*s++);
n++;
if (precision && n >= precision)
break;
}
while (n < width) { putc(' '); n++; }
return n;
}
int print_string_1arg(const char * s)
{
int n = 0;
int width = 0;
int precision = 0;
for (;;) {
if (*s == 0)
break;
putc(*s++);
n++;
if (precision && n >= precision)
break;
}
while (n < width) {
putc(' ');
n++;
}
return n;
}
/**
* Print hexadecimal value with a separator
*
* @param val value to print
* @param bits number of lower-most bits before which to
* place the separator
* @param sep the separator to print
*
* @returns the number of charaters printed.
*/
int print_hex_sep(const word_t val, const int bits, const char *sep)
{
int n = 0;
n = print_hex_3arg(val >> bits, 0, 0);
n += print_string_1arg(sep);
n += print_hex_3arg(val & ((1 << bits) - 1), 0, 0);
return n;
}
/**
* Print decimal value
*
* @param val value to print
* @param width width of field
* @param pad character used for padding value up to width
*
* Prints a value as a decimal in the given WIDTH with leading
* whitespaces.
*
* @returns the number of characters printed (may be more than WIDTH)
*/
int print_dec(const word_t val, int width)
{
word_t divisor;
int digits;
/* estimate number of spaces and digits */
for (divisor = 1, digits = 1; val/divisor >= 10; divisor *= 10, digits++);
/* print spaces */
for ( ; digits < width; digits++ )
putc(' ');
/* print digits */
do {
putc(((val/divisor) % 10) + '0');
} while (divisor /= 10);
/* report number of digits printed */
return digits;
}
/**
* Does the real printk work
*
* @param format_p pointer to format string
* @param args list of arguments, variable length
*
* Prints the given arguments as specified by the format string.
* Implements a subset of the well-known printf plus some L4-specifics.
*
* @returns the number of characters printed
*/
int do_printk(char* format_p, va_list args)
{
const char* format = format_p;
int n = 0;
int i = 0;
int width = 8;
int precision = 0;
int adjleft = 0, nullpad = 0;
#define arg(x) va_arg(args, x)
/* sanity check */
if (format == '\0')
{
return 0;
}
while (*format)
{
switch (*(format))
{
case '%':
width = precision = 0;
adjleft = nullpad = 0;
reentry:
switch (*(++format))
{
/* modifiers */
case '.':
for (format++; *format >= '0' && *format <= '9'; format++)
precision = precision * 10 + (*format) - '0';
if (*format == 'w')
{
// Set precision to printsize of a hex word
precision = sizeof (word_t) * 2;
format++;
}
format--;
goto reentry;
case '0':
nullpad = (width == 0);
case '1'...'9':
width = width*10 + (*format)-'0';
goto reentry;
case 'w':
// Set width to printsize of a hex word
width = sizeof (word_t) * 2;
goto reentry;
case '-':
adjleft = 0;
goto reentry;
case 'l':
goto reentry;
break;
case 'c':
putc(arg(int));
n++;
break;
case 'm': /* microseconds */
{
n += print_hex64(arg(u64), width, precision,
adjleft, nullpad);
break;
}
case 'd':
{
long val = arg(long);
if (val < 0)
{
putc('-');
val = -val;
}
n += print_dec(val, width);
break;
}
case 'u':
n += print_dec(arg(long), width);
break;
case 'p':
precision = sizeof (word_t) * 2;
case 'x':
n += print_hex_5arg(arg(long), width, precision, adjleft, nullpad);
break;
case 's':
{
char* s = arg(char*);
if (s)
n += print_string_3arg(s, width, precision);
else
n += print_string_3arg("(null)", width, precision);
}
break;
case 't':
case 'T':
// Do nothing for now.
//n += print_tid (arg (word_t), width, precision, adjleft);
break;
case '%':
putc('%');
n++;
format++;
continue;
default:
n += print_string_1arg("?");
break;
};
i++;
break;
default:
putc(*format);
n++;
break;
}
format++;
}
return n;
}
/**
* Flexible print function
*
* @param format string containing formatting and parameter type
* information
* @param ... variable list of parameters
*
* @returns the number of characters printed
*/
int printk(char *format, ...)
{
va_list args;
int i;
va_start(args, format);
i = do_printk(format, args);
va_end(args);
return i;
};

14
src/lib/putc.c Normal file
View File

@@ -0,0 +1,14 @@
/*
* Generic putc implementation that ties with platform-specific uart driver.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_PLAT(uart.h)
void putc(char c)
{
if (c == '\n')
uart_putc('\r');
uart_putc(c);
}

69
src/lib/string.c Normal file
View File

@@ -0,0 +1,69 @@
void *memset(void *p, int c, int size)
{
char ch;
char *pp;
pp = (char *)p;
ch = (char)c;
for (int i = 0; i < size; i++) {
*pp++ = ch;
}
return p;
}
void *memcpy(void *d, void *s, int size)
{
char *dst = (char *)d;
char *src = (char *)s;
for (int i = 0; i < size; i++) {
*dst = *src;
dst++;
src++;
}
return d;
}
int strcmp(const char *s1, const char *s2)
{
unsigned int i = 0;
int d;
while(1) {
d = (unsigned char)s1[i] - (unsigned char)s2[i];
if (d != 0 || s1[i] == '\0')
return d;
i++;
}
}
/* LICENCE: Taken from linux for now BB.
* strncpy - Copy a length-limited, %NUL-terminated string
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: The maximum number of bytes to copy
*
* The result is not %NUL-terminated if the source exceeds
* @count bytes.
*
* In the case where the length of @src is less than that of
* count, the remainder of @dest will be padded with %NUL.
*
*/
char *strncpy(char *dest, const char *src, int count)
{
char *tmp = dest;
while (count) {
if ((*tmp = *src) != 0)
src++;
tmp++;
count--;
}
return dest;
}

60
src/lib/wait.c Normal file
View File

@@ -0,0 +1,60 @@
/*
* Implementation of wakeup/wait for processes.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include <l4/lib/wait.h>
#include <l4/lib/spinlock.h>
/* Sleep if the given condition isn't true. */
#define wait_event(wqh, condition) \
do { \
for (;;) { \
if (condition) \
break; \
DECLARE_WAITQUEUE(wq, current); \
spin_lock(&wqh->slock); \
wqh->sleepers++; \
list_add_tail(&wq.task_list, &wqh->task_list); \
sched_tell(current, SCHED_FL_SLEEP); \
need_resched = 1; \
printk("(%d) waiting...\n", current->tid); \
spin_unlock(&wqh->slock); \
} \
} while(0);
/* Sleep without any condition */
#define wait_on(wqh) \
do { \
DECLARE_WAITQUEUE(wq, current); \
spin_lock(&wqh->slock); \
wqh->sleepers++; \
list_add_tail(&wq.task_list, &wqh->task_list); \
sched_tell(current, SCHED_FL_SLEEP); \
need_resched = 1; \
printk("(%d) waiting...\n", current->tid); \
spin_unlock(&wqh->slock); \
} while(0);
/* Wake up single waiter */
void wake_up(struct waitqueue_head *wqh)
{
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
if (wqh->sleepers > 0) {
struct waitqueue *wq = list_entry(wqh->task_list.next,
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
list_del_init(&wq->task_list);
wqh->sleepers--;
BUG_ON(list_empty(&wqh->task_list));
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
sched_notify_resume(sleeper);
spin_unlock(&wqh->slock);
return;
}
spin_unlock(&wqh->slock);
}

View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['printascii.S','platform.c', 'uart.c', 'timer.c', 'irq.c']
obj = env.Object(src_local)
Return('obj')

57
src/platform/pb926/irq.c Normal file
View File

@@ -0,0 +1,57 @@
/*
* Support for generic irq handling using platform irq controller (PL190)
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/irq.h>
#include <l4/generic/time.h>
#include INC_PLAT(irq.h)
#include INC_PLAT(platform.h)
#include INC_ARCH(exception.h)
#include <l4/drivers/irq/pl190/pl190_vic.h>
#include <l4/drivers/timer/sp804/sp804_timer.h>
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "Vectored irq controller",
.level = 0,
.cascade = IRQ_SIC,
.offset = 0,
.ops = {
.init = pl190_vic_init,
.read_irq = pl190_read_irq,
.ack_and_mask = pl190_mask_irq,
.unmask = pl190_unmask_irq,
},
},
[1] = {
.name = "Secondary irq controller",
.level = 1,
.cascade = IRQ_NIL,
.offset = SIRQ_CHIP_OFFSET,
.ops = {
.init = pl190_sic_init,
.read_irq = pl190_sic_read_irq,
.ack_and_mask = pl190_sic_mask_irq,
.unmask = pl190_sic_unmask_irq,
},
},
};
static int platform_timer_handler(void)
{
sp804_irq_handler();
return do_timer_irq();
}
/* Built-in irq handlers initialised at compile time.
* Else register with register_irq() */
struct irq_desc irq_desc_array[IRQS_MAX] = {
[IRQ_TIMER01] = {
.name = "Timer01",
.chip = &irq_chip_array[0],
.handler = platform_timer_handler,
},
};

View File

@@ -0,0 +1,63 @@
/*
* PB926 platform-specific initialisation and setup
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <l4/generic/platform.h>
#include <l4/generic/space.h>
#include <l4/generic/irq.h>
#include INC_ARCH(linker.h)
#include INC_PLAT(printascii.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_PLAT(offsets.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(uart.h)
#include INC_PLAT(irq.h)
#include INC_ARCH(asm.h)
void init_platform_console(void)
{
add_mapping(PB926_UART0_BASE, PL011_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
/*
* Map same UART IO area to userspace so that primitive uart-based
* userspace printf can work. Note, this raw mapping is to be
* removed in the future, when file-based io is implemented.
*/
add_mapping(PB926_UART0_BASE, USERSPACE_UART_BASE, PAGE_SIZE,
MAP_USR_IO_FLAGS);
uart_init();
}
void init_platform_timer(void)
{
add_mapping(PB926_TIMER01_BASE, PLATFORM_TIMER_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_mapping(PB926_SYSCTRL_BASE, PB926_SYSCTRL_VBASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
timer_init();
}
void init_platform_irq_controller()
{
add_mapping(PB926_VIC_BASE, PLATFORM_IRQCTRL_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_mapping(PB926_SIC_BASE, PLATFORM_SIRQCTRL_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
irq_controllers_init();
}
void platform_init(void)
{
init_platform_console();
init_platform_timer();
init_platform_irq_controller();
}

View File

@@ -0,0 +1,74 @@
#include INC_PLAT(debug-macro.S)
#include INC_ARCH(asm.h)
#include INC_GLUE(memlayout.h)
.text
/*
* Useful debugging routines
*/
BEGIN_PROC(printhex8)
mov r1, #8
b printhex
BEGIN_PROC(printhex4)
mov r1, #4
b printhex
BEGIN_PROC(printhex2)
mov r1, #2
printhex: adr r2, hexbuf
add r3, r2, r1
mov r1, #0
strb r1, [r3]
1: and r1, r0, #15
mov r0, r0, lsr #4
cmp r1, #10
addlt r1, r1, #'0'
addge r1, r1, #'a' - 10
strb r1, [r3, #-1]!
teq r3, r2
bne 1b
mov r0, r2
b printascii
.ltorg
.align
@ vmem-linked image has strings in vmem addresses. This replaces
@ the reference with corresponding physical address. Note this
@ won't work if memory offsets aren't clear cut values for
@ orr'ing and bic'ing. rm = mmu bits rs = string address.
.macro get_straddr rs, rm
mrc p15, 0, \rm, c1, c0 @ Get MMU bits.
tst \rm, #1 @ MMU enabled?
biceq \rs, \rs, #KERNEL_AREA_START @ Clear Virtual mem offset.
orreq \rs, \rs, #PHYS_ADDR_BASE @ Add Phy mem offset.
.endm
BEGIN_PROC(printascii)
get_straddr r0, r1
addruart r3
b 2f
1: waituart r2, r3
senduart r1, r3
busyuart r2, r3
teq r1, #'\n'
moveq r1, #'\r'
beq 1b
2: teq r0, #0
ldrneb r1, [r0], #1
teqne r1, #0
bne 1b
mov pc, lr
END_PROC(printascii)
BEGIN_PROC(printch)
addruart r3
mov r1, r0
mov r0, #0
b 1b
hexbuf: .space 16

View File

@@ -0,0 +1,28 @@
/*
* Ties up platform timer with generic timer api
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <l4/generic/irq.h>
#include <l4/generic/platform.h>
#include INC_PLAT(platform.h)
#include <l4/drivers/timer/sp804/sp804_timer.h>
#include <l4/drivers/misc/sp810/sp810_sysctrl.h>
void timer_init(void)
{
/* Set timer 0 to 1MHz */
sp810_set_timclk(0, 1);
/* Initialise timer */
sp804_init();
}
void timer_start(void)
{
irq_enable(IRQ_TIMER01);
sp804_set_irq(0, 1); /* Enable timer0 irq */
sp804_enable(0, 1); /* Enable timer0 */
}

28
src/platform/pb926/uart.c Normal file
View File

@@ -0,0 +1,28 @@
/*
* Ties up platform's uart driver functions with generic API
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include INC_PLAT(platform.h)
#include <l4/drivers/uart/pl011/pl011_uart.h>
extern struct pl011_uart uart;
void uart_init()
{
uart.base = PL011_BASE;
uart.ops.initialise(&uart);
}
/* Generic uart function that lib/putchar.c expects to see implemented */
void uart_putc(char c)
{
int res;
/* Platform specific uart implementation */
do {
res = uart.ops.tx_char(c);
} while (res < 0);
}

View File

@@ -0,0 +1,10 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['offsets.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,7 @@
#include <macros.h>
#include <config.h>
#include INC_PLAT(offsets.h)
unsigned int PHYS_MEM_START = 0; /* Dynamically allocated */
unsigned int PHYS_MEM_END = 0; /* Dynamically allocated */
unsigned int PHYS_ADDR_BASE = 0; /* Dynamically allocated */