Kernel updates since December 2009

This commit is contained in:
Bahadir Balban
2010-03-25 01:12:40 +02:00
parent 16818191b3
commit 74b5963fcb
487 changed files with 22477 additions and 3857 deletions

View File

@@ -3,7 +3,7 @@ Import('env')
Import('symbols')
# The set of source files associated with this SConscript file.
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'map.c', 'mutex.c', 'cap.c', 'exregs.c', 'irq.c']
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'map.c', 'mutex.c', 'cap.c', 'exregs.c', 'irq.c', 'cache.c']
obj = env.Object(src_local)

50
src/api/cache.c Normal file
View File

@@ -0,0 +1,50 @@
/*
* Low level cache control functions.
*
* Copyright (C) 2009 - 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/api/errno.h>
#include <l4/generic/tcb.h>
#include <l4/api/cache.h>
#include <l4/generic/capability.h>
#include INC_GLUE(cache.h)
int sys_cache_control(unsigned long start, unsigned long end,
unsigned int flags)
{
int ret = 0;
if ((ret = cap_cache_check(start, end, flags)) < 0)
return ret;
switch (flags) {
case L4_INVALIDATE_ICACHE:
arch_invalidate_icache(start, end);
break;
case L4_INVALIDATE_DCACHE:
arch_invalidate_dcache(start, end);
break;
case L4_CLEAN_DCACHE:
arch_clean_dcache(start, end);
break;
case L4_CLEAN_INVALIDATE_DCACHE:
arch_clean_invalidate_dcache(start, end);
break;
case L4_INVALIDATE_TLB:
arch_invalidate_tlb(start, end);
break;
default:
ret = -EINVAL;
}
return ret;
}

View File

@@ -673,14 +673,14 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
case CAP_CONTROL_NCAPS:
if ((err = check_access((unsigned long)userbuf,
sizeof(int),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
break;
case CAP_CONTROL_READ:
if ((err = check_access((unsigned long)userbuf,
cap_count(current) *
sizeof(struct capability),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
break;
case CAP_CONTROL_SHARE:
@@ -694,7 +694,7 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
case CAP_CONTROL_DESTROY:
if ((err = check_access((unsigned long)userbuf,
sizeof(struct capability),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
break;
default:

View File

@@ -71,7 +71,7 @@ flags:
/*
* If task is the one currently runnable,
* update kip utcb reference
* update utcb reference
*/
if (task == current)
task_update_utcb(task);
@@ -161,7 +161,7 @@ int sys_exchange_registers(struct exregs_data *exregs, l4id_t tid)
if ((err = check_access((unsigned long)exregs,
sizeof(*exregs),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/* Find tcb from its list */

View File

@@ -208,7 +208,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
struct waitqueue_head *wqhs, *wqhr;
int ret = 0;
if (!(receiver = tcb_find(recv_tid)))
if (!(receiver = tcb_find_lock(recv_tid)))
return -ESRCH;
wqhs = &receiver->wqh_send;
@@ -240,8 +240,11 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
// printk("%s: (%d) Waking up (%d)\n", __FUNCTION__,
// current->tid, receiver->tid);
/* Wake it up, we can yield here. */
sched_resume_sync(receiver);
/* Wake it up async */
sched_resume_async(receiver);
/* Release thread lock (protects for delete) */
spin_unlock(&receiver->thread_lock);
return ret;
}
@@ -253,6 +256,7 @@ int ipc_send(l4id_t recv_tid, unsigned int flags)
sched_prepare_sleep();
spin_unlock(&wqhr->slock);
spin_unlock(&wqhs->slock);
spin_unlock(&receiver->thread_lock);
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
// current->tid, recv_tid);
schedule();
@@ -405,7 +409,7 @@ int ipc_recv_extended(l4id_t sendertid, unsigned int flags)
/* Page fault user pages if needed */
if ((err = check_access(ipc_address, size,
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/*
@@ -455,7 +459,7 @@ int ipc_send_extended(l4id_t recv_tid, unsigned int flags)
/* Page fault those pages on the current task if needed */
if ((err = check_access(ipc_address, size,
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/*

View File

@@ -5,5 +5,5 @@
*/
#include INC_API(kip.h)
SECTION(".data.kip") struct kip kip;
SECTION(".data.kip") ALIGN(SZ_4K) struct kip kip;

View File

@@ -8,9 +8,29 @@
#include INC_SUBARCH(mm.h)
#include <l4/api/errno.h>
#include <l4/api/space.h>
#include INC_GLUE(mapping.h)
int sys_map(unsigned long phys, unsigned long virt, unsigned long npages,
unsigned int flags, l4id_t tid)
/*
* Userspace syscall requests can only map
* using read/write/exec userspace flags.
*/
int user_map_flags_validate(unsigned int flags)
{
switch (flags) {
case MAP_USR_RO:
case MAP_USR_RW:
case MAP_USR_RWX:
case MAP_USR_RX:
case MAP_USR_IO:
return 1;
default:
return 0;
}
return 0;
}
int sys_map(unsigned long phys, unsigned long virt,
unsigned long npages, unsigned int flags, l4id_t tid)
{
struct ktcb *target;
int err;
@@ -18,10 +38,18 @@ int sys_map(unsigned long phys, unsigned long virt, unsigned long npages,
if (!(target = tcb_find(tid)))
return -ESRCH;
/* Check flags validity */
if (!user_map_flags_validate(flags))
return -EINVAL;
if (!npages || !phys || !virt)
return -EINVAL;
if ((err = cap_map_check(target, phys, virt, npages, flags)) < 0)
return err;
add_mapping_pgd(phys, virt, npages << PAGE_BITS, flags, TASK_PGD(target));
add_mapping_pgd(phys, virt, npages << PAGE_BITS,
flags, TASK_PGD(target));
return 0;
}
@@ -36,17 +64,22 @@ int sys_unmap(unsigned long virtual, unsigned long npages, unsigned int tid)
struct ktcb *target;
int ret = 0, retval = 0;
if (tid == current->tid)
target = current;
else if (!(target = tcb_find(tid)))
if (!(target = tcb_find(tid)))
return -ESRCH;
if (!npages || !virtual)
return -EINVAL;
if ((ret = cap_unmap_check(target, virtual, npages)) < 0)
return ret;
for (int i = 0; i < npages; i++) {
ret = remove_mapping_pgd(virtual + i * PAGE_SIZE, TASK_PGD(target));
ret = remove_mapping_pgd(TASK_PGD(target),
virtual + i * PAGE_SIZE);
if (ret)
retval = ret;
}
return retval;
return ret;
}

View File

@@ -15,6 +15,7 @@
#include INC_API(syscall.h)
#include INC_ARCH(exception.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
void init_mutex_queue_head(struct mutex_queue_head *mqhead)
{
@@ -205,6 +206,22 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead,
return wait_on_prepared_wait();
}
/*
* Note, the mutex in userspace was left free before the
* syscall was entered.
*
* It is possible that a thread has acquired it, another
* contended on it and the holder made it to the kernel
* quicker than us. We detect this situation here.
*/
if (mutex_queue->wqh_holders.sleepers) {
/*
* Let the first holder do all the waking up
*/
mutex_queue_head_unlock(mqhead);
return 0;
}
/*
* Found it, if it exists, there are contenders,
* now wake all of them up in FIFO order.
@@ -226,6 +243,8 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
unsigned long mutex_physical;
int ret = 0;
// printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
/* Check valid operation */
if (mutex_op != MUTEX_CONTROL_LOCK &&
mutex_op != MUTEX_CONTROL_UNLOCK) {
@@ -249,8 +268,7 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
* capabilities of current task.
*/
if (!(mutex_physical =
virt_to_phys_by_pgd(mutex_address,
TASK_PGD(current))))
virt_to_phys_by_pgd(TASK_PGD(current), mutex_address)))
return -EINVAL;
switch (mutex_op) {

View File

@@ -39,6 +39,12 @@ int sys_schedule(void)
int sys_getid(struct task_ids *ids)
{
struct ktcb *this = current;
int err;
if ((err = check_access((unsigned long)ids,
sizeof(struct task_ids),
MAP_USR_RW, 1)) < 0)
return err;
ids->tid = this->tid;
ids->spid = this->space->spid;

View File

@@ -16,6 +16,7 @@
#include <l4/generic/capability.h>
#include INC_ARCH(asm.h)
#include INC_SUBARCH(mm.h)
#include INC_GLUE(mapping.h)
int sys_thread_switch(void)
{
@@ -56,8 +57,7 @@ int thread_suspend(struct ktcb *task)
int thread_exit(struct ktcb *task)
{
return thread_signal(task, TASK_EXITING, TASK_DEAD);
return thread_signal(task, TASK_SUSPENDING, TASK_INACTIVE);
}
static inline int task_is_child(struct ktcb *task)
@@ -68,18 +68,26 @@ static inline int task_is_child(struct ktcb *task)
int thread_destroy_child(struct ktcb *task)
{
/* Wait until thread exits */
thread_exit(task);
/* Hint scheduler that an exit occured */
current->flags |= TASK_EXITED;
/* Now remove it atomically */
tcb_remove(task);
/* Wake up waiters */
/* Wake up waiters that arrived before removing */
wake_up_all(&task->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&task->wqh_recv, WAKEUP_INTERRUPT);
BUG_ON(task->wqh_pager.sleepers > 0);
BUG_ON(task->state != TASK_DEAD);
BUG_ON(task->state != TASK_INACTIVE);
/* Place the task on the zombie queue for its cpu */
ktcb_list_add(task, &per_cpu_byid(kernel_resources.zombie_list,
task->affinity));
tcb_delete(task);
return 0;
}
@@ -104,14 +112,37 @@ int thread_destroy_children(void)
void thread_destroy_self(unsigned int exit_code)
{
/* Destroy all children first */
thread_destroy_children();
/* Wake up waiters */
wake_up_all(&current->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&current->wqh_recv, WAKEUP_INTERRUPT);
/* If self-paged, finish everything except deletion */
if (current->tid == current->pagerid) {
/* Remove self safe against ipc */
tcb_remove(current);
/* Wake up any waiters queued up before removal */
wake_up_all(&current->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&current->wqh_recv, WAKEUP_INTERRUPT);
/* Move capabilities to current cpu idle task */
cap_list_move(&per_cpu(scheduler).idle_task->cap_list,
&current->cap_list);
/* Place self on the per-cpu zombie queue */
ktcb_list_add(current, &per_cpu(kernel_resources.zombie_list));
}
/*
* Both child and a self-paging would set exit
* code and quit the scheduler
*/
current->exit_code = exit_code;
sched_exit_sync();
/*
* Hint scheduler that an exit has occured
*/
current->flags |= TASK_EXITED;
sched_suspend_sync();
}
int thread_wait(struct ktcb *task)
@@ -119,27 +150,50 @@ int thread_wait(struct ktcb *task)
unsigned int exit_code;
int ret;
// printk("%s: (%d) for (%d)\n", __FUNCTION__, current->tid, task->tid);
/* Wait until task switches to desired state */
WAIT_EVENT(&task->wqh_pager,
task->state == TASK_DEAD, ret);
task->state == TASK_INACTIVE, ret);
/* Return if interrupted by async event */
if (ret < 0)
return ret;
else {
exit_code = (int)task->exit_code;
tcb_remove(task);
tcb_delete(task);
return exit_code;
}
/* Now remove it safe against ipc */
tcb_remove(task);
/* Wake up waiters that arrived before removing */
wake_up_all(&task->wqh_send, WAKEUP_INTERRUPT);
wake_up_all(&task->wqh_recv, WAKEUP_INTERRUPT);
BUG_ON(task->wqh_pager.sleepers > 0);
BUG_ON(task->state != TASK_INACTIVE);
/* Obtain exit code */
exit_code = (int)task->exit_code;
/* Place it on the zombie queue */
ktcb_list_add(task,
&per_cpu_byid(kernel_resources.zombie_list,
task->affinity));
return exit_code;
}
int thread_destroy(struct ktcb *task, unsigned int exit_code)
{
// printk("%s: (%d) for (%d)\n", __FUNCTION__, current->tid, task->tid);
exit_code &= THREAD_EXIT_MASK;
if (task_is_child(task))
return thread_destroy_child(task);
else if (task == current)
thread_destroy_self(exit_code);
else
BUG();
return 0;
}
@@ -208,13 +262,12 @@ int thread_start(struct ktcb *task)
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
/* FIXME: Refuse to run dead tasks */
/* Notify scheduler of task resume */
sched_resume_async(task);
/* Release lock and return */
mutex_unlock(&task->thread_control_lock);
return 0;
}
@@ -229,10 +282,15 @@ int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
}
BUG_ON(!orig);
/* If original has no syscall context yet, don't copy */
if (!orig->syscall_regs)
return 0;
/*
* For duplicated threads pre-syscall context is saved on
* the kernel stack. We copy this context of original
* into the duplicate thread's current context structure
* into the duplicate thread's current context structure,
*
* No locks needed as the thread is not known to the system yet.
*/
@@ -262,6 +320,24 @@ int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
return 0;
}
static DECLARE_SPINLOCK(task_select_affinity_lock);
static unsigned int cpu_rr_affinity;
/* Select which cpu to place the new task in round-robin fashion */
void thread_setup_affinity(struct ktcb *task)
{
spin_lock(&task_select_affinity_lock);
task->affinity = cpu_rr_affinity;
//printk("Set up thread %d affinity=%d\n",
// task->tid, task->affinity);
cpu_rr_affinity++;
if (cpu_rr_affinity >= CONFIG_NCPU)
cpu_rr_affinity = 0;
spin_unlock(&task_select_affinity_lock);
}
static inline void
thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
struct ktcb *new, struct ktcb *orig)
@@ -344,8 +420,8 @@ int thread_create(struct task_ids *ids, unsigned int flags)
& TC_COPY_SPACE & TC_NEW_SPACE) || !flags)
return -EINVAL;
/* Can't have multiple pager specifiers */
if (flags & TC_SHARE_PAGER & TC_AS_PAGER)
/* Must have one space flag */
if ((flags & THREAD_SPACE_MASK) == 0)
return -EINVAL;
/* Can't request shared utcb or tgid without shared space */
@@ -371,29 +447,23 @@ int thread_create(struct task_ids *ids, unsigned int flags)
}
}
/*
* Note this is a kernel-level relationship
* between the creator and the new thread.
*
* Any higher layer may define parent/child
* relationships between orig and new separately.
*/
if (flags & TC_AS_PAGER)
new->pagerid = current->tid;
else if (flags & TC_SHARE_PAGER)
new->pagerid = current->pagerid;
else
new->pagerid = new->tid;
/* Set creator as pager */
new->pagerid = current->tid;
//printk("Thread (%d) pager set as (%d)\n", new->tid, new->pagerid);
/*
* Setup container-generic fields from current task
*/
/* Setup container-generic fields from current task */
new->container = current->container;
/*
* Set up cpu affinity.
*
* This is the default setting, it may be changed
* by a subsequent exchange_registers call
*/
thread_setup_affinity(new);
/* Set up new thread context by using parent ids and flags */
thread_setup_new_ids(ids, flags, new, orig);
arch_setup_new_thread(new, orig, flags);
tcb_add(new);
@@ -406,7 +476,7 @@ int thread_create(struct task_ids *ids, unsigned int flags)
out_err:
/* Pre-mature tcb needs freeing by free_ktcb */
free_ktcb(new);
free_ktcb(new, current);
return err;
}
@@ -421,13 +491,25 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
int err, ret = 0;
if ((err = check_access((unsigned long)ids, sizeof(*ids),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE)
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) {
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
/*
* Tasks may only operate on their children. They may
* also destroy themselves or any children.
*/
if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY &&
!task_is_child(task) && task != current)
return -EPERM;
if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY
&& !task_is_child(task))
return -EPERM;
}
if ((err = cap_thread_check(task, flags, ids)) < 0)
return err;

View File

@@ -1,10 +1,23 @@
# Inherit global environment
Import('env')
import os, sys, glob
PROJRELROOT = '../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import('env', 'symbols')
# The set of source files associated with this SConscript file.
src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception.c']
src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception-common.c', 'mapping-common.c', 'memset.S', 'memcpy.S']
for name, val in symbols:
if 'CONFIG_SMP' == name:
src_local += ['head-smp.S']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,334 @@
/*
* Common exception handling code
*
* Copyright (C) 2008 - 2010 B Labs Ltd.
* Written by Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include <l4/generic/thread.h>
#include <l4/api/thread.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include <l4/generic/platform.h>
#include <l4/generic/debug.h>
#include <l4/lib/printk.h>
#include <l4/api/ipc.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include INC_ARCH(exception.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(message.h)
#include INC_GLUE(ipc.h)
#include INC_SUBARCH(mm.h)
void abort_die(void)
{
disable_irqs();
print_early("Unhandled kernel abort.\n");
print_early("Kernel panic.\n");
print_early("Halting system...\n");
while (1)
;
}
struct ipc_state {
u32 mr[MR_TOTAL];
unsigned int flags;
};
void ipc_save_state(struct ipc_state *state)
{
unsigned int *mr0_current = KTCB_REF_MR0(current);
BUG_ON(!mr0_current);
/* Save primary message registers */
for (int i = 0; i < MR_TOTAL; i++)
state->mr[i] = mr0_current[i];
/* Save ipc flags */
state->flags = tcb_get_ipc_flags(current);
}
void ipc_restore_state(struct ipc_state *state)
{
unsigned int *mr0_current = KTCB_REF_MR0(current);
BUG_ON(!mr0_current);
/* Restore primary message registers */
for (int i = 0; i < MR_TOTAL; i++)
mr0_current[i] = state->mr[i];
/* Restore ipc flags */
tcb_set_ipc_flags(current, state->flags);
}
/* Send data fault ipc to the faulty task's pager */
int __attribute__((optimize("O0")))
fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag)
{
int err;
/* mr[0] has the fault tag. The rest is the fault structure */
u32 mr[MR_TOTAL] = {
[MR_TAG] = ipc_tag,
[MR_SENDER] = current->tid
};
fault_kdata_t *fault = (fault_kdata_t *)&mr[MR_UNUSED_START];
/* Fill in fault information to pass over during ipc */
fault->faulty_pc = faulty_pc;
fault->fsr = fsr;
fault->far = far;
/*
* Write pte of the abort address,
* which is different on pabt/dabt
*/
if (is_prefetch_abort(fsr))
fault->pte = virt_to_pte(faulty_pc);
else
fault->pte = virt_to_pte(far);
/*
* System calls save arguments (and message registers)
* on the kernel stack. They are then referenced from
* the caller's ktcb. Here, we forge a fault structure
* as if an ipc syscall has occured. Then the reference
* to the fault structure is set in the ktcb such that
* it lies on the mr0 offset when referred as the syscall
* context.
*/
/*
* Assign fault such that it overlaps
* as the MR0 reference in ktcb.
*/
current->syscall_regs = (syscall_context_t *)
((unsigned long)&mr[0] -
offsetof(syscall_context_t, r3));
/* Set current flags to short ipc */
tcb_set_ipc_flags(current, IPC_FLAGS_SHORT);
/* Detect if a pager is self-faulting */
if (current->tid == current->pagerid) {
printk("Pager (%d) faulted on itself. "
"FSR: 0x%x, FAR: 0x%x, PC: 0x%x pte: 0x%x CPU%d Exiting.\n",
current->tid, fault->fsr, fault->far,
fault->faulty_pc, fault->pte, smp_get_cpuid());
thread_destroy(current);
}
/* Send ipc to the task's pager */
if ((err = ipc_sendrecv(current->pagerid,
current->pagerid, 0)) < 0) {
BUG_ON(current->nlocks);
/* Return on interrupt */
if (err == -EINTR) {
printk("Thread (%d) page-faulted "
"and got interrupted by its pager.\n",
current->tid);
return err;
} else { /* Suspend on any other error */
printk("Thread (%d) faulted in kernel "
"and an error occured during "
"page-fault ipc. err=%d. "
"Suspending task.\n",
current->tid, err);
current->flags |= TASK_SUSPENDING;
sched_suspend_sync();
}
}
return 0;
}
/*
* When a task calls the kernel and the supplied user buffer is
* not mapped, the kernel generates a page fault to the task's
* pager so that the pager can make the decision on mapping the
* buffer. Remember that if a task maps its own user buffer to
* itself this way, the kernel can access it, since it shares
* that task's page table.
*/
int pager_pagein_request(unsigned long addr, unsigned long size,
unsigned int flags)
{
int err;
u32 abort = 0;
unsigned long npages = __pfn(align_up(size, PAGE_SIZE));
struct ipc_state ipc_state;
set_abort_type(abort, ABORT_TYPE_DATA);
/* Save current ipc state */
ipc_save_state(&ipc_state);
/* For every page to be used by the
* kernel send a page-in request */
for (int i = 0; i < npages; i++)
if ((err = fault_ipc_to_pager(0, abort,
addr + (i * PAGE_SIZE),
L4_IPC_TAG_PFAULT)) < 0)
return err;
/* Restore ipc state */
ipc_restore_state(&ipc_state);
return 0;
}
/*
* @r0: The address where the program counter was during the fault.
* @r1: Contains the fault status register
* @r2: Contains the fault address register
*/
void data_abort_handler(u32 faulted_pc, u32 dfsr, u32 dfar, u32 spsr)
{
int ret;
system_account_dabort();
/* Indicate abort type on dfsr */
set_abort_type(dfsr, ABORT_TYPE_DATA);
dbg_abort("Data abort PC:0x%x, FAR: 0x%x, FSR: 0x%x, CPU%d\n",
faulted_pc, dfar, dfsr, smp_get_cpuid());
/*
* Check abort type and tell
* if it's an irrecoverable fault
*/
if ((ret = check_abort_type(faulted_pc, dfsr, dfar, spsr)) < 0)
goto die; /* Die if irrecoverable */
else if (ret == ABORT_HANDLED)
return;
/* Notify the pager */
fault_ipc_to_pager(faulted_pc, dfsr, dfar, L4_IPC_TAG_PFAULT);
/*
* FIXME:
* Check return value of pager, and also make a record of
* the fault that has occured. We ought to expect progress
* from the pager. If the same fault is occuring a number
* of times consecutively, we might want to kill the pager.
*/
/* See if current task has various flags set by its pager */
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
}
return;
die:
dprintk("FAR:", dfar);
dprintk("PC:", faulted_pc);
abort_die();
}
void prefetch_abort_handler(u32 faulted_pc, u32 ifsr, u32 ifar, u32 spsr)
{
int ret;
system_account_pabort();
/* Indicate abort type on dfsr */
set_abort_type(ifsr, ABORT_TYPE_PREFETCH);
dbg_abort("Prefetch abort PC:0x%x, FAR: 0x%x, FSR: 0x%x, CPU%d\n",
faulted_pc, ifar, ifsr, smp_get_cpuid());
/*
* Check abort type and tell
* if it's an irrecoverable fault
*/
if ((ret = check_abort_type(0, ifsr, ifar, spsr)) < 0)
goto die; /* Die if irrecoverable */
else if (ret == ABORT_HANDLED)
return; /* Return if handled internally */
/* Notify the pager */
fault_ipc_to_pager(faulted_pc, ifsr, ifar, L4_IPC_TAG_PFAULT);
/*
* FIXME:
* Check return value of pager, and also make a record of
* the fault that has occured. We ought to expect progress
* from the pager. If the same fault is occuring a number
* of times consecutively, we might want to kill the pager.
*/
/* See if current task has various flags set by its pager */
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
}
return;
die:
dprintk("FAR:", ifar);
abort_die();
}
void undefined_instr_handler(u32 undefined_address, u32 spsr, u32 lr)
{
dbg_abort("Undefined instruction. PC:0x%x", undefined_address);
system_account_undef_abort();
fault_ipc_to_pager(undefined_address, 0, undefined_address,
L4_IPC_TAG_UNDEF_FAULT);
if (!is_user_mode(spsr)) {
dprintk("Undefined instruction occured in "
"non-user mode. addr=", undefined_address);
goto die;
}
/* See if current task has various flags set by its pager */
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
}
return;
die:
abort_die();
}
extern int current_irq_nest_count;
/*
* This is called right where the nest count is increased
* in case the nesting is beyond the predefined max limit.
* It is another matter whether this limit is enough to
* guarantee the kernel stack is not overflown.
*
* FIXME: Take measures to recover. (E.g. disable irqs etc)
*
* Note that this is called in irq context, and it *also*
* thrashes the designated irq stack which is only 12 bytes.
*
* It really is assumed the system has come to a halt when
* this happens.
*/
void irq_overnest_error(void)
{
dprintk("Irqs nested beyond limit. Current count: ",
current_irq_nest_count);
print_early("System halted...\n");
while(1)
;
}

114
src/arch/arm/head-smp.S Normal file
View File

@@ -0,0 +1,114 @@
/*
* Kernel Entry point for secondary cpus
*
* Copyright (C) 2010 B Labs Ltd.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
#include INC_ARCH(asm.h)
#include INC_PLAT(offsets.h)
#include INC_ARCH(asm-macros.S)
#define C15_C0_M 0x0001 /* MMU */
#define C15_C0_A 0x0002 /* Alignment */
#define C15_C0_C 0x0004 /* (D) Cache */
#define C15_C0_W 0x0008 /* Write buffer */
#define C15_C0_B 0x0080 /* Endianness */
#define C15_C0_S 0x0100 /* System */
#define C15_C0_R 0x0200 /* ROM */
#define C15_C0_Z 0x0800 /* Branch Prediction */
#define C15_C0_I 0x1000 /* I cache */
#define C15_C0_V 0x2000 /* High vectors */
.section .text.head
BEGIN_PROC(__smp_start)
msr cpsr_fxsc, #ARM_NOIRQ_SVC
/* Disable mmu if it is enabled */
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #C15_C0_M @ Disable MMU
bic r0, r0, #C15_C0_C @ Disable (D) Cache
bic r0, r0, #C15_C0_I @ Disable I cache
bic r0, r0, #C15_C0_W @ Disable Write buffer
mcr p15, 0, r0, c1, c0, 0
/* Setup boot stack (physical address) */
/*
* Each processor gets a unique 1024 byte stack.
* This stack is used until the first task becomes
* runnable, so there needs to be one for each core
*
* +----------+
* |CPU3 Stack|
* +----------+
* |CPU2 Stack|
* +----------+
* |CPU1 Stack|
* +----------+
* |CPU0 Stack|
* +----------+ _bootstack_physical
*/
get_cpuid r0
mov r0, r0, lsl #12 /* 4 KB stack per-cpu */
ldr sp, _secondary_cpu_stack
sub sp, sp, r0
/*
* Each processor will get its own irq/fiq/abt/und/svc stack
* of size 16 bytes per mode. Each mode would have 64 bytes
* of stack used in total for 4 cores.
*
* Note, unlike SVC mode all abort modes also include the
* stack for primary core, i.e CPU0. There's no separation
* of primary and secondary stack regions.
*
* +------------------+ __abt_stack_high
* | CPU0 ABT Stack |
* +------------------+ __abt_stack_high - 0x10
* | CPU1 ABT Stack |
* +------------------+ __abt_stack_high - 0x20
* | CPU2 ABT Stack |
* +------------------+ __abt_stack_high - 0x30
* | CPU3 ABT Stack |
* +------------------+ __abt_stack_high - 0x40
*
*/
get_cpuid r0
mov r0, r0, lsl #4 /* 16 byte stack for each core */
/* Exception stacks are defined in vector page */
msr cpsr_fcx, #ARM_NOIRQ_ABT
ldr sp, _sec_kern_abt_stack
sub sp, sp, r0
msr cpsr_fcx, #ARM_NOIRQ_IRQ
ldr sp, _sec_kern_irq_stack
sub sp, sp, r0
msr cpsr_fcx, #ARM_NOIRQ_FIQ
ldr sp, _sec_kern_fiq_stack
sub sp, sp, r0
msr cpsr_fcx, #ARM_NOIRQ_UND
ldr sp, _sec_kern_und_stack
sub sp, sp, r0
msr cpsr_fcx, #ARM_NOIRQ_SVC
/* Jump to start_kernel */
bl smp_secondary_init
/* Never reached */
1:
b 1b
_secondary_cpu_stack:
.word _bootstack_physical
/* Exception stacks are defined in vector page */
_sec_kern_abt_stack:
.word __abt_stack_high
_sec_kern_irq_stack:
.word __irq_stack_high
_sec_kern_fiq_stack:
.word __fiq_stack_high
_sec_kern_und_stack:
.word __und_stack_high

View File

@@ -26,7 +26,7 @@
.section .text.head
BEGIN_PROC(_start)
/* Setup status register for supervisor mode, interrupts disabled */
msr cpsr_fc, #ARM_MODE_SVC
msr cpsr_fcxs, #ARM_MODE_SVC
/* Disable mmu if it is enabled */
mrc p15, 0, r0, c1, c0, 0
@@ -34,6 +34,7 @@ BEGIN_PROC(_start)
bic r0, r0, #C15_C0_C @ Disable (D) Cache
bic r0, r0, #C15_C0_I @ Disable I cache
bic r0, r0, #C15_C0_W @ Disable Write buffer
bic r0, r0, #C15_C0_Z @ Disable Branch prediction
mcr p15, 0, r0, c1, c0, 0
/* Setup boot stack (physical address) */

View File

@@ -0,0 +1,418 @@
/*
* Low-level page table functions that are common
* and abstracted between v5-v7 ARM architectures
*
* Copyright (C) 2007 - 2010 B Labs Ltd.
* Written by Bahadir Balban
*/
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(linker.h)
#include INC_GLUE(mapping.h)
#include <l4/generic/platform.h>
#include <l4/api/errno.h>
#include <l4/lib/printk.h>
#include <l4/generic/tcb.h>
#include <l4/generic/bootmem.h>
#include <l4/generic/space.h>
/* Find out whether a pmd exists or not and return it */
pmd_table_t *pmd_exists(pgd_table_t *task_pgd, unsigned long vaddr)
{
pmd_t *pmd = arch_pick_pmd(task_pgd, vaddr);
/*
* Check that it has a valid pmd
* (i.e. not a fault, not a section)
*/
if ((*pmd & PMD_TYPE_MASK) == PMD_TYPE_PMD)
return (pmd_table_t *)
phys_to_virt(*pmd & PMD_ALIGN_MASK);
else if ((*pmd & PMD_TYPE_MASK) == 0)
return 0;
else
BUG(); /* Anything that's not a pmd or fault is bug */
return 0;
}
/*
* Convert virtual address to a pte from a task-specific pgd
* FIXME: Remove this by using ptep version, leaving due to
* too many things to test right now.
*/
pte_t virt_to_pte_from_pgd(pgd_table_t *task_pgd,
unsigned long virtual)
{
pmd_table_t *pmd = pmd_exists(task_pgd, virtual);
if (pmd)
return (pte_t)pmd->entry[PMD_INDEX(virtual)];
else
return (pte_t)0;
}
/* Convert virtual address to a pte from a task-specific pgd */
pte_t *virt_to_ptep_from_pgd(pgd_table_t *task_pgd,
unsigned long virtual)
{
pmd_table_t *pmd = pmd_exists(task_pgd, virtual);
if (pmd)
return (pte_t *)&pmd->entry[PMD_INDEX(virtual)];
else
return (pte_t *)0;
}
/*
* Convert a virtual address to a pte if it
* exists in the page tables.
*/
pte_t virt_to_pte(unsigned long virtual)
{
return virt_to_pte_from_pgd(TASK_PGD(current), virtual);
}
pte_t *virt_to_ptep(unsigned long virtual)
{
return virt_to_ptep_from_pgd(TASK_PGD(current), virtual);
}
unsigned long virt_to_phys_by_pgd(pgd_table_t *pgd, unsigned long vaddr)
{
pte_t pte = virt_to_pte_from_pgd(pgd, vaddr);
return pte & ~PAGE_MASK;
}
static inline unsigned long
virt_to_phys_by_task(struct ktcb *task, unsigned long vaddr)
{
return virt_to_phys_by_pgd(TASK_PGD(task), vaddr);
}
/*
* Attaches a pmd to either a task or the global pgd
* depending on the virtual address passed.
*/
void attach_pmd(pgd_table_t *task_pgd, pmd_table_t *pmd_table,
unsigned long vaddr)
{
u32 pmd_phys = virt_to_phys(pmd_table);
pmd_t *pmd;
BUG_ON(!is_aligned(pmd_phys, PMD_SIZE));
/*
* Pick the right pmd from the right pgd.
* It makes a difference if split tables are used.
*/
pmd = arch_pick_pmd(task_pgd, vaddr);
/* Write the pmd into hardware pgd */
arch_write_pmd(pmd, pmd_phys, vaddr);
}
void add_mapping_pgd(unsigned long physical, unsigned long virtual,
unsigned int sz_bytes, unsigned int flags,
pgd_table_t *task_pgd)
{
unsigned long npages = (sz_bytes >> PFN_SHIFT);
pmd_table_t *pmd_table;
if (sz_bytes < PAGE_SIZE) {
print_early("Error: Mapping size less than PAGE_SIZE. "
"Mapping size is in bytes not pages.\n");
BUG();
}
if (sz_bytes & PAGE_MASK)
npages++;
/* Convert generic map flags to arch specific flags */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all pages that cover given size */
for (int i = 0; i < npages; i++) {
/* Check if a pmd was attached previously */
if (!(pmd_table = pmd_exists(task_pgd, virtual))) {
/* First mapping in pmd, allocate it */
pmd_table = alloc_pmd();
/* Prepare the pte but don't sync */
arch_prepare_pte(physical, virtual, flags,
&pmd_table->entry[PMD_INDEX(virtual)]);
/* Attach pmd to its pgd and sync it */
attach_pmd(task_pgd, pmd_table, virtual);
} else {
/* Prepare, write the pte and sync */
arch_prepare_write_pte(physical, virtual,
flags, &pmd_table->entry[PMD_INDEX(virtual)]);
}
/* Move on to the next page */
physical += PAGE_SIZE;
virtual += PAGE_SIZE;
}
}
void add_boot_mapping(unsigned long physical, unsigned long virtual,
unsigned int sz_bytes, unsigned int flags)
{
unsigned long npages = (sz_bytes >> PFN_SHIFT);
pmd_table_t *pmd_table;
if (sz_bytes < PAGE_SIZE) {
print_early("Error: Mapping size less than PAGE_SIZE. "
"Mapping size should be in _bytes_ "
"not pages.\n");
BUG();
}
if (sz_bytes & PAGE_MASK)
npages++;
/* Convert generic map flags to arch specific flags */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all pages that cover given size */
for (int i = 0; i < npages; i++) {
/* Check if a pmd was attached previously */
if (!(pmd_table = pmd_exists(&init_pgd, virtual))) {
/* First mapping in pmd, allocate it */
pmd_table = alloc_boot_pmd();
/* Prepare the pte but don't sync */
arch_prepare_pte(physical, virtual, flags,
&pmd_table->entry[PMD_INDEX(virtual)]);
/* Attach pmd to its pgd and sync it */
attach_pmd(&init_pgd, pmd_table, virtual);
} else {
/* Prepare, write the pte and sync */
arch_prepare_write_pte(physical, virtual,
flags, &pmd_table->entry[PMD_INDEX(virtual)]);
}
/* Move on to the next page */
physical += PAGE_SIZE;
virtual += PAGE_SIZE;
}
}
void add_mapping(unsigned long paddr, unsigned long vaddr,
unsigned int size, unsigned int flags)
{
add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
}
/*
* Checks if a virtual address range has same or more permissive
* flags than the given ones, returns 0 if not, and 1 if OK.
*/
int check_mapping_pgd(unsigned long vaddr, unsigned long size,
unsigned int flags, pgd_table_t *pgd)
{
unsigned int npages = __pfn(align_up(size, PAGE_SIZE));
pte_t pte;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
for (int i = 0; i < npages; i++) {
pte = virt_to_pte_from_pgd(pgd, vaddr + i * PAGE_SIZE);
/* Check if pte perms are equal or gt given flags */
if (arch_check_pte_access_perms(pte, flags))
continue;
else
return 0;
}
return 1;
}
int check_mapping(unsigned long vaddr, unsigned long size,
unsigned int flags)
{
return check_mapping_pgd(vaddr, size, flags,
TASK_PGD(current));
}
/*
* This can be made common for v5/v7, keeping split/page table
* and cache flush parts in arch-specific files.
*/
int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
{
pmd_table_t *pmd_table;
int pgd_i, pmd_i;
pmd_t *pmd;
unsigned int pmd_type, pte_type;
vaddr = page_align(vaddr);
pgd_i = PGD_INDEX(vaddr);
pmd_i = PMD_INDEX(vaddr);
/*
* Get the right pgd's pmd according to whether
* the address is global or task-specific.
*/
pmd = arch_pick_pmd(task_pgd, vaddr);
pmd_type = *pmd & PMD_TYPE_MASK;
if (pmd_type == PMD_TYPE_FAULT)
return -ENOMAP;
/* Anything else must be a proper pmd */
BUG_ON(pmd_type != PMD_TYPE_PMD);
/* Get the 2nd level pmd table */
pmd_table = (pmd_table_t *)
phys_to_virt((unsigned long)*pmd
& PMD_ALIGN_MASK);
/* Get the pte type already there */
pte_type = pmd_table->entry[pmd_i] & PTE_TYPE_MASK;
/* If it's a fault we're done */
if (pte_type == PTE_TYPE_FAULT)
return -ENOMAP;
/* It must be a small pte if not fault */
else if (pte_type != PTE_TYPE_SMALL)
BUG();
/* Write to pte, also syncing it as required by arch */
arch_prepare_write_pte(0, vaddr,
space_flags_to_ptflags(MAP_FAULT),
(pte_t *)&pmd_table->entry[pmd_i]);
return 0;
}
int remove_mapping(unsigned long vaddr)
{
return remove_mapping_pgd(TASK_PGD(current), vaddr);
}
int delete_page_tables(struct address_space *space)
{
remove_mapping_pgd_all_user(space->pgd);
free_pgd(space->pgd);
return 0;
}
/*
* Copies userspace entries of one task to another.
* In order to do that, it allocates new pmds and
* copies the original values into new ones.
*/
int copy_user_tables(struct address_space *new,
struct address_space *orig_space)
{
pgd_table_t *to = new->pgd, *from = orig_space->pgd;
pmd_table_t *pmd, *orig;
/* Allocate and copy all pmds that will be exclusive to new task. */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pmd entry that is not a global pmd? */
if (!is_global_pgdi(i) &&
((from->entry[i] & PMD_TYPE_MASK)
== PMD_TYPE_PMD)) {
/* Allocate new pmd */
if (!(pmd = alloc_pmd()))
goto out_error;
/* Find original pmd */
orig = (pmd_table_t *)
phys_to_virt((from->entry[i] &
PMD_ALIGN_MASK));
/* Copy original to new */
memcpy(pmd, orig, sizeof(pmd_table_t));
/* Replace original pmd entry in pgd with new */
to->entry[i] = (pmd_t)(virt_to_phys(pmd)
| PMD_TYPE_PMD);
}
}
/* Just in case the new table is written to any ttbr
* after here, make sure all writes on it are complete. */
dmb();
return 0;
out_error:
/* Find all non-kernel pmds we have just allocated and free them */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Non-kernel pmd that has just been allocated. */
if (!is_global_pgdi(i) &&
(to->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
/* Obtain the pmd handle */
pmd = (pmd_table_t *)
phys_to_virt((to->entry[i] &
PMD_ALIGN_MASK));
/* Free pmd */
free_pmd(pmd);
}
}
return -ENOMEM;
}
/*
* Useful for upgrading to page-grained control
* over the kernel section mapping.
*
* Remaps a section mapping in pages. It allocates a pmd,
* fills in the page information, and replaces the direct
* section physical translation with the address of the
* pmd. Syncs the caches.
*
* NOTE: Assumes only a single pmd is enough.
*/
void remap_as_pages(void *vstart, void *vend)
{
unsigned long pstart = virt_to_phys(vstart);
unsigned long pend = virt_to_phys(vend);
unsigned long paddr = pstart;
unsigned long vaddr = (unsigned long)vstart;
int pmd_i = PMD_INDEX(vstart);
pgd_table_t *pgd = &init_pgd;
pmd_table_t *pmd = alloc_boot_pmd();
int npages = __pfn(pend - pstart);
int map_flags;
/* Map the whole kernel into the pmd first */
for (int n = 0; n < npages; n++) {
/* Map text pages as executable */
if ((vaddr >= (unsigned long)_start_text &&
vaddr < page_align_up(_end_text)) ||
(vaddr >= (unsigned long)_start_vectors &&
vaddr < page_align_up(_end_vectors)))
map_flags = MAP_KERN_RWX;
else
map_flags = MAP_KERN_RW;
arch_prepare_pte(paddr, vaddr,
space_flags_to_ptflags(map_flags),
&pmd->entry[pmd_i + n]);
paddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
attach_pmd(pgd, pmd, (unsigned long)vstart);
printk("%s: Kernel area 0x%lx - 0x%lx "
"remapped as %d pages\n", __KERNELNAME__,
(unsigned long)vstart, (unsigned long)vend,
npages);
}

60
src/arch/arm/memcpy.S Normal file
View File

@@ -0,0 +1,60 @@
/*
* Copyright 2010 B Labs.Ltd.
*
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*
* Description: Optimized memcpy for ARM
*
*/
#include INC_ARCH(asm.h)
/*
void*
_memcpy(void *dst, const void *src, register uint len)
*/
BEGIN_PROC(_memcpy)
push {r0, r4-r11, lr}
loop32:
cmp r2, #32
blt loop16
ldmia r1!, {r4 - r11}
stmia r0!, {r4 - r11}
sub r2, r2, #32
b loop32
loop16:
cmp r2, #16
blt loop8
ldmia r1!, {r4 - r7}
stmia r0!, {r4 - r7}
sub r2, r2, #16
b loop16
loop8:
cmp r2, #8
blt loop4
ldmia r1!, {r4, r5}
stmia r0!, {r4, r5}
sub r2, r2, #8
b loop8
loop4:
cmp r2, #4
blt end
ldmia r1!, {r4}
stmia r0!, {r4}
sub r2, r2, #4
b loop4
end:
last:
teq r2, #0
ldrgtb r4, [r1]
strneb r4, [r0] // V7 supports strneb <rt>, [<rb>, +/-<index>] !, with write back
lsrne r4, r4, #8
subne r2, r2, #1 // Can be reduced to 1 LDR, but has a catch if it is end of memory
addne r0, r0, #1 // we dont want to fetch 1 byte extra to end up in abort
addne r1, r1, #1 // so, playing safe, worst case 3 LDRs
bne last
1:
pop {r0, r4 - r11, pc}
END_PROC(_memcpy)

68
src/arch/arm/memset.S Normal file
View File

@@ -0,0 +1,68 @@
/*
* Copyright 2010 (C) B Labs.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
* Description: Optimized memset for ARM
*/
#include INC_ARCH(asm.h)
/*
void *
memset(void *dst, int c, int len)
*/
BEGIN_PROC(_memset)
stmfd sp!, {r4 - r11, lr}
and r1, r1, #255 /* c &= 0xff */
orr r1, r1, lsl #8 /* c |= c<<8 */
orr r1, r1, lsl #16 /* c |= c<<16 */
mov r4, r1
cmp r2, #8
blt 4f
movge r5, r4
cmpge r2, #16
blt 8f
movge r6, r4
movge r7, r4
cmpge r2, #32
blt 16f
movge r8, r4
movge r9, r4
movge r10, r4
movge r11, r4
32:
cmp r2, #32
blt 16f
stmia r0!, {r4 - r11}
sub r2, r2, #32
b 32b
16:
cmp r2, #16
blt 8f
stmia r0!, {r4 - r7}
sub r2, r2, #16
b 16b
8:
cmp r2, #8
blt 4f
stmia r0!, {r4, r5}
sub r2, r2, #8
b 8b
4:
cmp r2, #4
blt end
stmia r0!, {r4}
sub r2, r2, #4
b 4b
end:
teq r2, #0
strneb r4, [r0, #0]
subne r2, r2, #1
addne r0, r0, #1
bne end
ldmfd sp!, {r4 - r11, pc}
END_PROC(_memset)

View File

@@ -31,5 +31,6 @@ BEGIN_PROC(arm_system_calls)
swi 0x14 @ kmem_control /* 0x2C */
swi 0x14 @ time /* 0x30 */
swi 0x14 @ mutex_control /* 0x34 */
swi 0x14 @ cache_control /* 0x38 */
END_PROC(arm_system_calls)

View File

@@ -4,7 +4,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['mm.c', 'mmu_ops.S', 'mutex.S', 'irq.S']
src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c']
obj = env.Object(src_local)
Return('obj')

32
src/arch/arm/v5/cache.c Normal file
View File

@@ -0,0 +1,32 @@
/*
* Generic layer over ARMv5 soecific cache calls
*
* Copyright B-Labs Ltd 2010.
*/
#include INC_SUBARCH(mmu_ops.h)
void arch_invalidate_dcache(unsigned long start, unsigned long end)
{
arm_invalidate_dcache();
}
void arch_clean_invalidate_dcache(unsigned long start, unsigned long end)
{
arm_clean_invalidate_dcache();
}
void arch_invalidate_icache(unsigned long start, unsigned long end)
{
arm_invalidate_icache();
}
void arch_clean_dcache(unsigned long start, unsigned long end)
{
arm_clean_dcache();
}
void arch_invalidate_tlb(unsigned long start, unsigned long end)
{
arm_invalidate_tlb();
}

246
src/arch/arm/v5/exception.c Normal file
View File

@@ -0,0 +1,246 @@
/*
* Memory exception handling in process context.
*
* Copyright (C) 2007, 2008 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include <l4/generic/thread.h>
#include <l4/api/thread.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include <l4/generic/platform.h>
#include <l4/lib/printk.h>
#include <l4/api/ipc.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include INC_ARCH(exception.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(message.h)
#include INC_GLUE(ipc.h)
#include INC_SUBARCH(mm.h)
int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
{
int ret = 0;
/*
* On ARMv5, prefetch aborts dont have different
* status values. We validate them here and return.
*/
if (is_prefetch_abort(fsr)) {
dbg_abort("Prefetch abort: 0x%x\n", faulted_pc);
/* Happened in any mode other than user */
if (!is_user_mode(spsr)) {
dprintk("Unhandled kernel prefetch "
"abort at address ", far);
return -EABORT;
}
return 0;
}
switch (fsr & FSR_FS_MASK) {
/* Aborts that are expected on page faults: */
case DABT_PERM_PAGE:
dbg_abort("Page permission fault 0x%x\n", far);
ret = 0;
break;
case DABT_XLATE_PAGE:
dbg_abort("Page translation fault 0x%x\n", far);
ret = 0;
break;
case DABT_XLATE_SECT:
dbg_abort("Section translation fault 0x%x\n", far);
ret = 0;
break;
/* Aborts that can't be handled by a pager yet: */
case DABT_TERMINAL:
dprintk("Terminal fault dabt %x", far);
ret = -EABORT;
break;
case DABT_VECTOR:
dprintk("Vector abort (obsolete!) %x", far);
ret = -EABORT;
break;
case DABT_ALIGN:
dprintk("Alignment fault dabt %x", far);
ret = -EABORT;
break;
case DABT_EXT_XLATE_LEVEL1:
dprintk("External LVL1 translation fault %x", far);
ret = -EABORT;
break;
case DABT_EXT_XLATE_LEVEL2:
dprintk("External LVL2 translation fault %x", far);
ret = -EABORT;
break;
case DABT_DOMAIN_SECT:
dprintk("Section domain fault dabt %x", far);
ret = -EABORT;
break;
case DABT_DOMAIN_PAGE:
dprintk("Page domain fault dabt %x", far);
ret = -EABORT;
break;
case DABT_PERM_SECT:
dprintk("Section permission fault dabt %x", far);
ret = -EABORT;
break;
case DABT_EXT_LFETCH_SECT:
dprintk("External section linefetch "
"fault dabt %x", far);
ret = -EABORT;
break;
case DABT_EXT_LFETCH_PAGE:
dprintk("Page perm fault dabt %x", far);
ret = -EABORT;
break;
case DABT_EXT_NON_LFETCH_SECT:
dprintk("External section non-linefetch "
"fault dabt %x ", far);
ret = -EABORT;
break;
case DABT_EXT_NON_LFETCH_PAGE:
dprintk("External page non-linefetch "
"fault dabt %x ", far);
ret = -EABORT;
break;
default:
dprintk("FATAL: Unrecognised/Unknown "
"data abort %x ", far);
dprintk("FATAL: FSR code: ", fsr);
ret = -EABORT;
}
/*
* Check validity of data abort's source.
*
* FIXME: Why not use spsr to do this?
*/
if (is_kernel_address(faulted_pc)) {
dprintk("Unhandled kernel data "
"abort at address %x",
faulted_pc);
ret = -EABORT;
}
return ret;
}
#if 0
void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
{
set_abort_type(fsr, ARM_DABT);
dbg_abort("Data abort @ PC: ", faulted_pc);
//printk("Data abort: %d, PC: 0x%x\n",
//current->tid, faulted_pc);
/* Check for more details */
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by "
"any pager.\n");
goto error;
}
/* This notifies the pager */
fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return;
error:
disable_irqs();
dprintk("Unhandled data abort @ PC address: ", faulted_pc);
dprintk("FAR:", far);
dprintk("FSR:", fsr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while (1)
;
}
void prefetch_abort_handler(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
{
set_abort_type(fsr, ARM_PABT);
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by any pager.\n");
goto error;
}
/* Did the abort occur in kernel mode? */
if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
goto error;
fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return;
error:
disable_irqs();
dprintk("Unhandled prefetch abort @ address: ", faulted_pc);
dprintk("FAR:", far);
dprintk("FSR:", fsr);
dprintk("Aborted PSR:", spsr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while (1)
;
}
void undef_handler(u32 undef_addr, u32 spsr, u32 lr)
{
dbg_abort("Undefined instruction @ PC: ", undef_addr);
//printk("Undefined instruction: tid: %d, PC: 0x%x, Mode: %s\n",
// current->tid, undef_addr,
// (spsr & ARM_MODE_MASK) == ARM_MODE_SVC ? "SVC" : "User");
if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC) {
printk("Panic: Undef in Kernel\n");
goto error;
}
fault_ipc_to_pager(undef_addr, 0, undef_addr, L4_IPC_TAG_UNDEF_FAULT);
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return;
error:
disable_irqs();
dprintk("SPSR:", spsr);
dprintk("LR:", lr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while(1)
;
}
#endif

146
src/arch/arm/v5/init.c Normal file
View File

@@ -0,0 +1,146 @@
/*
* ARM v5 specific init routines
*
* Copyright (C) 2007 - 2010 B Labs Ltd.
*/
#include <l4/generic/tcb.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/platform.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_ARCH(linker.h)
SECTION(".data.pgd") ALIGN(PGD_SIZE) pgd_table_t init_pgd;
struct address_space init_space;
void system_identify(void)
{
}
void jump(struct ktcb *task)
{
__asm__ __volatile__ (
"mov lr, %0\n" /* Load pointer to context area */
"ldr r0, [lr]\n" /* Load spsr value to r0 */
"msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
"add sp, lr, %1\n" /* Reset SVC stack */
"sub sp, sp, %2\n" /* Align to stack alignment */
"ldmib lr, {r0-r14}^\n" /* Load all USR registers */
"nop \n" /* Spec says dont touch banked registers
* right after LDM {no-pc}^ for one instruction */
"add lr, lr, #64\n" /* Manually move to PC location. */
"ldr lr, [lr]\n" /* Load the PC_USR to LR */
"movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
:
: "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
);
}
void switch_to_user(struct ktcb *task)
{
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(TASK_PGD(task)));
arm_invalidate_tlb();
jump(task);
}
/* Maps the early memory regions needed to bootstrap the system */
void init_kernel_mappings(void)
{
//memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
/* Map kernel area to its virtual region */
add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
align((unsigned int)_start_text, SZ_1MB), 1,
cacheable | bufferable);
/* Map kernel one-to-one to its physical region */
add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
align(virt_to_phys(_start_text), SZ_1MB),
1, 0);
}
/*
* Enable virtual memory using kernel's pgd
* and continue execution on virtual addresses.
*/
void start_virtual_memory()
{
/*
* TTB must be 16K aligned. This is because first level tables are
* sized 16K.
*/
if ((unsigned int)&init_pgd & 0x3FFF)
dprintk("kspace not properly aligned for ttb:",
(u32)&init_pgd);
// memset((void *)&kspace, 0, sizeof(pgd_table_t));
arm_set_ttb(virt_to_phys(&init_pgd));
/*
* This sets all 16 domains to zero and domain 0 to 1. The outcome
* is that page table access permissions are in effect for domain 0.
* All other domains have no access whatsoever.
*/
arm_set_domain(1);
/* Enable everything before mmu permissions are in place */
arm_enable_caches();
arm_enable_wbuffer();
arm_enable_high_vectors();
/*
* Leave the past behind. Tlbs are invalidated, write buffer is drained.
* The whole of I + D caches are invalidated unconditionally. This is
* important to ensure that the cache is free of previously loaded
* values. Otherwise unpredictable data aborts may occur at arbitrary
* times, each time a load/store operation hits one of the invalid
* entries and those entries are cleaned to main memory.
*/
arm_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_enable_mmu();
/* Jump to virtual memory addresses */
__asm__ __volatile__ (
"add sp, sp, %0 \n" /* Update stack pointer */
"add fp, fp, %0 \n" /* Update frame pointer */
/* On the next instruction below, r0 gets
* current PC + KOFFSET + 2 instructions after itself. */
"add r0, pc, %0 \n"
/* Special symbol that is extracted and included in the loader.
* Debuggers can break on it to load the virtual symbol table */
".global break_virtual;\n"
"break_virtual:\n"
"mov pc, r0 \n" /* (r0 has next instruction) */
:
: "r" (KERNEL_OFFSET)
: "r0"
);
/*
* Restore link register (LR) for this function.
*
* NOTE: LR values are pushed onto the stack at each function call,
* which means the restored return values will be physical for all
* functions in the call stack except this function. So the caller
* of this function must never return but initiate scheduling etc.
*/
__asm__ __volatile__ (
"add %0, %0, %1 \n"
"mov pc, %0 \n"
:: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
);
/* should never come here */
while(1);
}

72
src/arch/arm/v5/irq.c Normal file
View File

@@ -0,0 +1,72 @@
/*
* Low-level irq routines.
*
* Copyright (C) 2010 B Labs Ltd.
* Written by Bahadir Balban
* Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
void irq_local_disable_save(unsigned long *state)
{
unsigned int tmp, tmp2;
__asm__ __volatile__ (
"mrs %0, cpsr_fc \n"
"orr %1, %0, #0x80 \n"
"msr cpsr_fc, %1 \n"
: "=&r"(tmp), "=r"(tmp2)
:
: "cc"
);
*state = tmp;
}
void irq_local_restore(unsigned long state)
{
__asm__ __volatile__ (
"msr cpsr_fc, %0\n"
:
: "r"(state)
: "cc"
);
}
u8 l4_atomic_dest_readb(unsigned long *location)
{
#if 0
unsigned int tmp;
__asm__ __volatile__ (
"swpb r0, r2, [r1] \n"
: "=r"(tmp)
: "r"(location), "r"(0)
: "memory"
);
return (u8)tmp;
#endif
unsigned int tmp;
unsigned long state;
irq_local_disable_save(&state);
tmp = *location;
*location = 0;
irq_local_restore(state);
return (u8)tmp;
}
int irqs_enabled(void)
{
int tmp;
__asm__ __volatile__ (
"mrs %0, cpsr_fc\n"
: "=r"(tmp)
);
if (tmp & 0x80)
return 0;
return 1;
}

381
src/arch/arm/v5/mapping.c Normal file
View File

@@ -0,0 +1,381 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/lib/mutex.h>
#include <l4/lib/string.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/space.h>
#include <l4/generic/bootmem.h>
#include <l4/generic/resource.h>
#include <l4/generic/platform.h>
#include <l4/generic/debug.h>
#include <l4/api/errno.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
#include INC_API(kip.h)
#include INC_ARCH(io.h)
/*
* Removes initial mappings needed for transition to virtual memory.
* Used one-time only.
*/
void remove_section_mapping(unsigned long vaddr)
{
pgd_table_t *pgd = &init_pgd;
pmd_t pgd_i = PGD_INDEX(vaddr);
if (!((pgd->entry[pgd_i] & PMD_TYPE_MASK)
& PMD_TYPE_SECTION))
while(1);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PMD_TYPE_FAULT;
arm_invalidate_tlb();
}
/*
* Maps given section-aligned @paddr to @vaddr using enough number
* of section-units to fulfill @size in sections. Note this overwrites
* a mapping if same virtual address was already mapped.
*/
void __add_section_mapping_init(unsigned int paddr,
unsigned int vaddr,
unsigned int size,
unsigned int flags)
{
pte_t *ppte;
unsigned int l1_ptab;
unsigned int l1_offset;
/* 1st level page table address */
l1_ptab = virt_to_phys(&init_pgd);
/* Get the section offset for this vaddr */
l1_offset = (vaddr >> 18) & 0x3FFC;
/* The beginning entry for mapping */
ppte = (unsigned int *)(l1_ptab + l1_offset);
for(int i = 0; i < size; i++) {
*ppte = 0; /* Clear out old value */
*ppte |= paddr; /* Assign physical address */
*ppte |= PMD_TYPE_SECTION; /* Assign translation type */
/* Domain is 0, therefore no writes. */
/* Only kernel access allowed */
*ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
/* Cacheability/Bufferability flags */
*ppte |= flags;
ppte++; /* Next section entry */
paddr += SECTION_SIZE; /* Next physical section */
}
return;
}
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
unsigned int psection;
unsigned int vsection;
/* Align each address to the pages they reside in */
psection = paddr & ~SECTION_MASK;
vsection = vaddr & ~SECTION_MASK;
if (size == 0)
return;
__add_section_mapping_init(psection, vsection, size, flags);
return;
}
void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
pte_t *ptep)
{
/* They must be aligned at this stage */
if (!is_page_aligned(paddr) || !is_page_aligned(vaddr)) {
printk("address not aligned, phys address %x"
" virtual address %x\n", paddr, vaddr);
BUG();
}
/*
* NOTE: In v5, the flags converted from generic
* by space_flags_to_ptflags() can be directly
* written to the pte. No further conversion is needed.
* Therefore this function doesn't do much on flags. In
* contrast in ARMv7 the flags need an extra level of
* processing.
*/
if (flags == __MAP_FAULT)
*ptep = paddr | flags | PTE_TYPE_FAULT;
else
*ptep = paddr | flags | PTE_TYPE_SMALL;
}
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
{
/* FIXME:
* Clean the dcache and invalidate the icache
* for the old translation first?
*
* The dcache is virtual, therefore the data
* in those entries should be cleaned first,
* before the translation of that virtual
* address is changed to a new physical address.
*
* Check that the entry was not faulty first.
*/
arm_clean_invalidate_cache();
*ptep = pte;
/* FIXME: Fix this!
* - Use vaddr to clean the dcache pte by MVA.
* - Use mapped area to invalidate the icache
* - Invalidate the tlb for mapped area
*/
arm_clean_invalidate_cache();
arm_invalidate_tlb();
}
void arch_prepare_write_pte(u32 paddr, u32 vaddr,
unsigned int flags, pte_t *ptep)
{
pte_t pte = 0;
/* They must be aligned at this stage */
BUG_ON(!is_page_aligned(paddr));
BUG_ON(!is_page_aligned(vaddr));
arch_prepare_pte(paddr, vaddr, flags, &pte);
arch_write_pte(ptep, pte, vaddr);
}
pmd_t *
arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
{
return &pgd->entry[PGD_INDEX(vaddr)];
}
/*
* v5 pmd writes
*/
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
{
/* FIXME: Clean the dcache if there was a valid entry */
*pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
arm_clean_invalidate_cache(); /*FIXME: Write these properly! */
arm_invalidate_tlb();
}
int arch_check_pte_access_perms(pte_t pte, unsigned int flags)
{
if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
return 1;
else
return 0;
}
/*
* Tell if a pgd index is a common kernel index.
* This is used to distinguish common kernel entries
* in a pgd, when copying page tables.
*/
int is_global_pgdi(int i)
{
if ((i >= PGD_INDEX(KERNEL_AREA_START) &&
i < PGD_INDEX(KERNEL_AREA_END)) ||
(i >= PGD_INDEX(IO_AREA_START) &&
i < PGD_INDEX(IO_AREA_END)) ||
(i == PGD_INDEX(USER_KIP_PAGE)) ||
(i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
(i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
(i == PGD_INDEX(USERSPACE_CONSOLE_VBASE)))
return 1;
else
return 0;
}
extern pmd_table_t *pmd_array;
void remove_mapping_pgd_all_user(pgd_table_t *pgd)
{
pmd_table_t *pmd;
/* Traverse through all pgd entries. */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
if (!is_global_pgdi(i)) {
/* Detect a pmd entry */
if (((pgd->entry[i] & PMD_TYPE_MASK)
== PMD_TYPE_PMD)) {
/* Obtain the user pmd handle */
pmd = (pmd_table_t *)
phys_to_virt((pgd->entry[i] &
PMD_ALIGN_MASK));
/* Free it */
free_pmd(pmd);
}
/* Clear the pgd entry */
pgd->entry[i] = PMD_TYPE_FAULT;
}
}
}
int pgd_count_boot_pmds()
{
int npmd = 0;
pgd_table_t *pgd = &init_pgd;
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
npmd++;
return npmd;
}
/*
* Jumps from boot pmd/pgd page tables to tables allocated from the cache.
*/
pgd_table_t *arch_realloc_page_tables(void)
{
pgd_table_t *pgd_new = alloc_pgd();
pgd_table_t *pgd_old = &init_pgd;
pmd_table_t *orig, *pmd;
/* Copy whole pgd entries */
memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
/* Allocate and copy all pmds */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pmd entry */
if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
/* Allocate new pmd */
if (!(pmd = alloc_pmd())) {
printk("FATAL: PMD allocation "
"failed during system initialization\n");
BUG();
}
/* Find original pmd */
orig = (pmd_table_t *)
phys_to_virt((pgd_old->entry[i] &
PMD_ALIGN_MASK));
/* Copy original to new */
memcpy(pmd, orig, sizeof(pmd_table_t));
/* Replace original pmd entry in pgd with new */
pgd_new->entry[i] = (pmd_t)virt_to_phys(pmd);
pgd_new->entry[i] |= PMD_TYPE_PMD;
}
}
/* Switch the virtual memory system into new area */
arm_clean_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(pgd_new));
arm_invalidate_tlb();
printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
__KERNELNAME__, virt_to_phys(pgd_old),
virt_to_phys(pgd_new));
return pgd_new;
}
/*
* Copies global kernel entries into another pgd. Even for
* sub-pmd ranges the associated pmd entries are copied,
* assuming any pmds copied are applicable to all tasks in
* the system.
*/
void copy_pgd_global_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
/* Extend sub-pmd ranges to their respective pmd boundaries */
start = align(start, PMD_MAP_SIZE);
if (end < start)
end = 0;
/* Aligning would overflow if mapping the last virtual pmd */
if (end < align(~0, PMD_MAP_SIZE) ||
start > end) /* end may have already overflown as input */
end = align_up(end, PMD_MAP_SIZE);
else
end = 0;
copy_pgds_by_vrange(to, from, start, end);
}
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
unsigned long start_i = PGD_INDEX(start);
unsigned long end_i = PGD_INDEX(end);
unsigned long irange = (end_i != 0) ? (end_i - start_i)
: (PGD_ENTRY_TOTAL - start_i);
memcpy(&to->entry[start_i], &from->entry[start_i],
irange * sizeof(pmd_t));
}
void arch_copy_pgd_kernel_entries(pgd_table_t *to)
{
pgd_table_t *from = TASK_PGD(current);
copy_pgd_global_by_vrange(to, from, KERNEL_AREA_START,
KERNEL_AREA_END);
copy_pgd_global_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
copy_pgd_global_by_vrange(to, from, USER_KIP_PAGE,
USER_KIP_PAGE + PAGE_SIZE);
copy_pgd_global_by_vrange(to, from, ARM_HIGH_VECTOR,
ARM_HIGH_VECTOR + PAGE_SIZE);
copy_pgd_global_by_vrange(to, from, ARM_SYSCALL_VECTOR,
ARM_SYSCALL_VECTOR + PAGE_SIZE);
/* We temporarily map uart registers to every process */
copy_pgd_global_by_vrange(to, from, USERSPACE_CONSOLE_VBASE,
USERSPACE_CONSOLE_VBASE + PAGE_SIZE);
}
void arch_update_utcb(unsigned long utcb_address)
{
/* Update the KIP pointer */
kip.utcb = utcb_address;
}
/* Scheduler uses this to switch context */
void arch_space_switch(struct ktcb *to)
{
pgd_table_t *pgd = TASK_PGD(to);
system_account_space_switch();
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(pgd));
arm_invalidate_tlb();
}
void idle_task(void)
{
while(1) {
tcb_delete_zombies();
// printk("Idle task.\n");
schedule();
}
}

View File

@@ -88,7 +88,7 @@ END_PROC(arm_enable_high_vectors)
BEGIN_PROC(arm_invalidate_cache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c7 @ Flush I cache and D cache
mcr p15, 0, r0, c7, c7, 0 @ Flush I cache and D cache
mov pc, lr
END_PROC(arm_invalidate_cache)
@@ -105,7 +105,7 @@ BEGIN_PROC(arm_invalidate_dcache)
END_PROC(arm_invalidate_dcache)
BEGIN_PROC(arm_clean_dcache)
mrc p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
mrc p15, 0 , r15, c7, c10, 3 @ Test/clean dcache line
bne arm_clean_dcache
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
@@ -113,7 +113,7 @@ END_PROC(arm_clean_dcache)
BEGIN_PROC(arm_clean_invalidate_dcache)
1:
mrc p15, 0, pc, c7, c14, 3 @ Test/clean/flush dcache line
mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
@ COMMENT: Why use PC?
bne 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain WB

75
src/arch/arm/v5/mutex.c Normal file
View File

@@ -0,0 +1,75 @@
/*
* ARM v5 Binary semaphore (mutex) implementation.
*
* Copyright (C) 2007-2010 B Labs Ltd.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
#include <l4/lib/printk.h>
/* Recap on swp:
* swp rx, ry, [rz]
* In one instruction:
* 1) Stores the value in ry into location pointed by rz.
* 2) Loads the value in the location of rz into rx.
* By doing so, in one instruction one can attempt to lock
* a word, and discover whether it was already locked.
*/
#define MUTEX_UNLOCKED 0
#define MUTEX_LOCKED 1
void __spin_lock(unsigned long *s)
{
int tmp = 0, tmp2;
__asm__ __volatile__(
"1: \n"
"swp %0, %1, [%2] \n"
"teq %0, %3 \n"
"bne 1b \n"
: "=&r" (tmp2)
: "r" (tmp), "r"(s), "r"(0)
: "cc", "memory"
);
}
void __spin_unlock(unsigned long *s)
{
int tmp = 1, tmp2;
__asm__ __volatile__(
"1: \n"
"swp %0, %1, [%2] \n"
"teq %0, %3 \n"
"bne 1b \n"
: "=&r" (tmp2)
: "r" (tmp), "r"(s), "r"(0)
: "cc", "memory"
);
}
int __mutex_lock(unsigned long *s)
{
int tmp = MUTEX_LOCKED, tmp2;
__asm__ __volatile__(
"swp %0, %1, [%2] \n"
: "=&r"(tmp2)
: "r"(tmp), "r"(s)
: "cc", "memory"
);
if (tmp2 == MUTEX_UNLOCKED)
return 1;
return 0;
}
void __mutex_unlock(unsigned long *s)
{
int tmp, tmp2=MUTEX_UNLOCKED;
__asm__ __volatile__(
"swp %0, %1, [%2] \n"
: "=&r"(tmp)
: "r"(tmp2), "r"(s)
: "cc", "memory"
);
BUG_ON(tmp != MUTEX_LOCKED);
}

View File

@@ -4,7 +4,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['mm.c', 'mmu_ops.S', 'mutex.S']
src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'mutex.c', 'irq.c', 'init.c', 'cpu_startup.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,36 @@
/*
* Copyright (C) 2010 B Labs Ltd.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
#include INC_CPU(cpu.h)
//#include INC_SUBARCH(cpu.h)
//#include INC_ARCH(cpu.h)
/* This code is guaranteed to be executed before MMU is enabled */
void cpu_startup(void)
{
/* For now this should have
* cache disabling
* branch prediction disabling
*/
/* Here enable the common bits
* cache
* branch prediction
* write buffers
*/
/* Enable V6 page tables */
//unsigned int val = arm_get_cp15_cr() | 1<<23;
//arm_set_cp15_cr(val);
#if defined (CONFIG_SMP)
/* Enable SCU*/
/* Enable SMP bit in CP15 */
#endif
}

246
src/arch/arm/v6/exception.c Normal file
View File

@@ -0,0 +1,246 @@
/*
* Memory exception handling in process context.
*
* Copyright (C) 2007, 2008 Bahadir Balban
*/
#include <l4/generic/scheduler.h>
#include <l4/generic/thread.h>
#include <l4/api/thread.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include <l4/generic/platform.h>
#include <l4/lib/printk.h>
#include <l4/api/ipc.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include INC_ARCH(exception.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(message.h)
#include INC_GLUE(ipc.h)
#include INC_SUBARCH(mm.h)
int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
{
int ret = 0;
/*
* On ARMv5, prefetch aborts dont have different
* status values. We validate them here and return.
*/
if (is_prefetch_abort(fsr)) {
dbg_abort("Prefetch abort @ ", faulted_pc);
/* Happened in any mode other than user */
if (!is_user_mode(spsr)) {
dprintk("Unhandled kernel prefetch "
"abort at address ", far);
return -EABORT;
}
return 0;
}
switch (fsr & FSR_FS_MASK) {
/* Aborts that are expected on page faults: */
case DABT_PERM_PAGE:
dbg_abort("Page permission fault @ ", far);
ret = 0;
break;
case DABT_XLATE_PAGE:
dbg_abort("Page translation fault @ ", far);
ret = 0;
break;
case DABT_XLATE_SECT:
dbg_abort("Section translation fault @ ", far);
ret = 0;
break;
/* Aborts that can't be handled by a pager yet: */
case DABT_TERMINAL:
dprintk("Terminal fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_VECTOR:
dprintk("Vector abort (obsolete!) @ ", far);
ret = -EABORT;
break;
case DABT_ALIGN:
dprintk("Alignment fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_EXT_XLATE_LEVEL1:
dprintk("External LVL1 translation fault @ ", far);
ret = -EABORT;
break;
case DABT_EXT_XLATE_LEVEL2:
dprintk("External LVL2 translation fault @ ", far);
ret = -EABORT;
break;
case DABT_DOMAIN_SECT:
dprintk("Section domain fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_DOMAIN_PAGE:
dprintk("Page domain fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_PERM_SECT:
dprintk("Section permission fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_EXT_LFETCH_SECT:
dprintk("External section linefetch "
"fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_EXT_LFETCH_PAGE:
dprintk("Page perm fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_EXT_NON_LFETCH_SECT:
dprintk("External section non-linefetch "
"fault dabt @ ", far);
ret = -EABORT;
break;
case DABT_EXT_NON_LFETCH_PAGE:
dprintk("External page non-linefetch "
"fault dabt @ ", far);
ret = -EABORT;
break;
default:
dprintk("FATAL: Unrecognised/Unknown "
"data abort @ ", far);
dprintk("FATAL: FSR code: ", fsr);
ret = -EABORT;
}
/*
* Check validity of data abort's source.
*
* FIXME: Why not use spsr to do this?
*/
if (is_kernel_address(faulted_pc)) {
dprintk("Unhandled kernel data "
"abort at address ",
faulted_pc);
ret = -EABORT;
}
return ret;
}
#if 0
void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
{
set_abort_type(fsr, ARM_DABT);
dbg_abort("Data abort @ PC: ", faulted_pc);
//printk("Data abort: %d, PC: 0x%x\n",
//current->tid, faulted_pc);
/* Check for more details */
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by "
"any pager.\n");
goto error;
}
/* This notifies the pager */
fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return;
error:
disable_irqs();
dprintk("Unhandled data abort @ PC address: ", faulted_pc);
dprintk("FAR:", far);
dprintk("FSR:", fsr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while (1)
;
}
void prefetch_abort_handler(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
{
set_abort_type(fsr, ARM_PABT);
if (check_aborts(faulted_pc, fsr, far) < 0) {
printascii("This abort can't be handled by any pager.\n");
goto error;
}
/* Did the abort occur in kernel mode? */
if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
goto error;
fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return;
error:
disable_irqs();
dprintk("Unhandled prefetch abort @ address: ", faulted_pc);
dprintk("FAR:", far);
dprintk("FSR:", fsr);
dprintk("Aborted PSR:", spsr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while (1)
;
}
void undef_handler(u32 undef_addr, u32 spsr, u32 lr)
{
dbg_abort("Undefined instruction @ PC: ", undef_addr);
//printk("Undefined instruction: tid: %d, PC: 0x%x, Mode: %s\n",
// current->tid, undef_addr,
// (spsr & ARM_MODE_MASK) == ARM_MODE_SVC ? "SVC" : "User");
if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC) {
printk("Panic: Undef in Kernel\n");
goto error;
}
fault_ipc_to_pager(undef_addr, 0, undef_addr, L4_IPC_TAG_UNDEF_FAULT);
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return;
error:
disable_irqs();
dprintk("SPSR:", spsr);
dprintk("LR:", lr);
printascii("Kernel panic.\n");
printascii("Halting system...\n");
while(1)
;
}
#endif

139
src/arch/arm/v6/init.c Normal file
View File

@@ -0,0 +1,139 @@
/*
* ARM v5 specific init routines
*
* Copyright (C) 2007 - 2010 B Labs Ltd.
*/
#include <l4/generic/tcb.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/platform.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_ARCH(linker.h)
SECTION(".init.pgd") ALIGN(PGD_SIZE) pgd_table_t init_pgd;
void jump(struct ktcb *task)
{
__asm__ __volatile__ (
"mov lr, %0\n" /* Load pointer to context area */
"ldr r0, [lr]\n" /* Load spsr value to r0 */
"msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
"add sp, lr, %1\n" /* Reset SVC stack */
"sub sp, sp, %2\n" /* Align to stack alignment */
"ldmib lr, {r0-r14}^\n" /* Load all USR registers */
"nop \n" /* Spec says dont touch banked registers
* right after LDM {no-pc}^ for one instruction */
"add lr, lr, #64\n" /* Manually move to PC location. */
"ldr lr, [lr]\n" /* Load the PC_USR to LR */
"movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
:
: "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
);
}
void switch_to_user(struct ktcb *task)
{
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(TASK_PGD(task)));
arm_invalidate_tlb();
jump(task);
}
/* Maps the early memory regions needed to bootstrap the system */
void init_kernel_mappings(void)
{
memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
/* Map kernel area to its virtual region */
add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
align((unsigned int)_start_text, SZ_1MB), 1,
cacheable | bufferable);
/* Map kernel one-to-one to its physical region */
add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
align(virt_to_phys(_start_text), SZ_1MB),
1, 0);
}
/*
* Enable virtual memory using kernel's pgd
* and continue execution on virtual addresses.
*/
void start_virtual_memory()
{
/*
* TTB must be 16K aligned. This is because first level tables are
* sized 16K.
*/
if ((unsigned int)&init_pgd & 0x3FFF)
dprintk("kspace not properly aligned for ttb:",
(u32)&init_pgd);
// memset((void *)&kspace, 0, sizeof(pgd_table_t));
arm_set_ttb(virt_to_phys(&init_pgd));
/*
* This sets all 16 domains to zero and domain 0 to 1. The outcome
* is that page table access permissions are in effect for domain 0.
* All other domains have no access whatsoever.
*/
arm_set_domain(1);
/* Enable everything before mmu permissions are in place */
arm_enable_caches();
arm_enable_wbuffer();
arm_enable_high_vectors();
/*
* Leave the past behind. Tlbs are invalidated, write buffer is drained.
* The whole of I + D caches are invalidated unconditionally. This is
* important to ensure that the cache is free of previously loaded
* values. Otherwise unpredictable data aborts may occur at arbitrary
* times, each time a load/store operation hits one of the invalid
* entries and those entries are cleaned to main memory.
*/
arm_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_enable_mmu();
/* Jump to virtual memory addresses */
__asm__ __volatile__ (
"add sp, sp, %0 \n" /* Update stack pointer */
"add fp, fp, %0 \n" /* Update frame pointer */
/* On the next instruction below, r0 gets
* current PC + KOFFSET + 2 instructions after itself. */
"add r0, pc, %0 \n"
/* Special symbol that is extracted and included in the loader.
* Debuggers can break on it to load the virtual symbol table */
".global break_virtual;\n"
"break_virtual:\n"
"mov pc, r0 \n" /* (r0 has next instruction) */
:
: "r" (KERNEL_OFFSET)
: "r0"
);
/*
* Restore link register (LR) for this function.
*
* NOTE: LR values are pushed onto the stack at each function call,
* which means the restored return values will be physical for all
* functions in the call stack except this function. So the caller
* of this function must never return but initiate scheduling etc.
*/
__asm__ __volatile__ (
"add %0, %0, %1 \n"
"mov pc, %0 \n"
:: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
);
/* should never come here */
while(1);
}

60
src/arch/arm/v6/irq.c Normal file
View File

@@ -0,0 +1,60 @@
/*
* Low-level irq routines.
*
* Copyright (C) 2010 B Labs Ltd.
* Written by Bahadir Balban
* Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
void irq_local_disable_save(unsigned long *state)
{
unsigned int tmp;
__asm__ __volatile__ (
"mrs %0, cpsr_fc \n"
"cpsid ia \n"
: "=r"(tmp)
:
: "cc"
);
*state = tmp;
}
void irq_local_restore(unsigned long state)
{
__asm__ __volatile__ (
"msr cpsr_fc, %0\n"
:
: "r"(state)
: "cc"
);
}
u8 l4_atomic_dest_readb(u8 *location)
{
unsigned int tmp, res;
__asm__ __volatile__ (
"1: \n"
"ldrex %0, [%2] \n"
"strex %1, %3, [%2] \n"
"teq %1, #0 \n"
"bne 1b \n"
: "=&r"(tmp), "=&r"(res)
: "r"(location), "r"(0)
: "cc", "memory"
);
return (u8)tmp;
}
int irqs_enabled(void)
{
int tmp;
__asm__ __volatile__ (
"mrs %0, cpsr_fc\n"
: "=r"(tmp)
);
if (tmp & 0x80)
return 0;
return 1;
}

366
src/arch/arm/v6/mapping.c Normal file
View File

@@ -0,0 +1,366 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/lib/mutex.h>
#include <l4/lib/string.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/space.h>
#include <l4/generic/bootmem.h>
#include <l4/generic/resource.h>
#include <l4/generic/platform.h>
#include <l4/api/errno.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
#include INC_API(kip.h)
#include INC_ARCH(io.h)
/*
* Removes initial mappings needed for transition to virtual memory.
* Used one-time only.
*/
void remove_section_mapping(unsigned long vaddr)
{
pgd_table_t *pgd = &init_pgd;
pmd_t pgd_i = PGD_INDEX(vaddr);
if (!((pgd->entry[pgd_i] & PMD_TYPE_MASK)
& PMD_TYPE_SECTION))
while(1);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PMD_TYPE_FAULT;
arm_invalidate_tlb();
}
/*
* Maps given section-aligned @paddr to @vaddr using enough number
* of section-units to fulfill @size in sections. Note this overwrites
* a mapping if same virtual address was already mapped.
*/
void __add_section_mapping_init(unsigned int paddr,
unsigned int vaddr,
unsigned int size,
unsigned int flags)
{
pte_t *ppte;
unsigned int l1_ptab;
unsigned int l1_offset;
/* 1st level page table address */
l1_ptab = virt_to_phys(&init_pgd);
/* Get the section offset for this vaddr */
l1_offset = (vaddr >> 18) & 0x3FFC;
/* The beginning entry for mapping */
ppte = (unsigned int *)(l1_ptab + l1_offset);
for(int i = 0; i < size; i++) {
*ppte = 0; /* Clear out old value */
*ppte |= paddr; /* Assign physical address */
*ppte |= PMD_TYPE_SECTION; /* Assign translation type */
/* Domain is 0, therefore no writes. */
/* Only kernel access allowed */
*ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
/* Cacheability/Bufferability flags */
*ppte |= flags;
ppte++; /* Next section entry */
paddr += SECTION_SIZE; /* Next physical section */
}
return;
}
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
unsigned int psection;
unsigned int vsection;
/* Align each address to the pages they reside in */
psection = paddr & ~SECTION_MASK;
vsection = vaddr & ~SECTION_MASK;
if (size == 0)
return;
__add_section_mapping_init(psection, vsection, size, flags);
return;
}
void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
pte_t *ptep)
{
/* They must be aligned at this stage */
BUG_ON(!is_page_aligned(paddr));
BUG_ON(!is_page_aligned(vaddr));
/*
* NOTE: In v5, the flags converted from generic
* by space_flags_to_ptflags() can be directly
* written to the pte. No further conversion is needed.
* Therefore this function doesn't do much on flags. In
* contrast in ARMv7 the flags need an extra level of
* processing.
*/
if (flags == __MAP_FAULT)
*ptep = paddr | flags | PTE_TYPE_FAULT;
else
*ptep = paddr | flags | PTE_TYPE_SMALL;
}
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
{
/* FIXME:
* Clean the dcache and invalidate the icache
* for the old translation first?
*
* The dcache is virtual, therefore the data
* in those entries should be cleaned first,
* before the translation of that virtual
* address is changed to a new physical address.
*
* Check that the entry was not faulty first.
*/
arm_clean_invalidate_cache();
*ptep = pte;
/* FIXME: Fix this!
* - Use vaddr to clean the dcache pte by MVA.
* - Use mapped area to invalidate the icache
* - Invalidate the tlb for mapped area
*/
arm_clean_invalidate_cache();
arm_invalidate_tlb();
}
void arch_prepare_write_pte(u32 paddr, u32 vaddr,
unsigned int flags, pte_t *ptep)
{
pte_t pte = 0;
/* They must be aligned at this stage */
BUG_ON(!is_page_aligned(paddr));
BUG_ON(!is_page_aligned(vaddr));
arch_prepare_pte(paddr, vaddr, flags, &pte);
arch_write_pte(ptep, pte, vaddr);
}
pmd_t *
arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
{
return &pgd->entry[PGD_INDEX(vaddr)];
}
/*
* v5 pmd writes
*/
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
{
/* FIXME: Clean the dcache if there was a valid entry */
*pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
arm_clean_invalidate_cache(); /*FIXME: Write these properly! */
arm_invalidate_tlb();
}
int arch_check_pte_access_perms(pte_t pte, unsigned int flags)
{
if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
return 1;
else
return 0;
}
/*
* Tell if a pgd index is a common kernel index.
* This is used to distinguish common kernel entries
* in a pgd, when copying page tables.
*/
int is_global_pgdi(int i)
{
if ((i >= PGD_INDEX(KERNEL_AREA_START) &&
i < PGD_INDEX(KERNEL_AREA_END)) ||
(i >= PGD_INDEX(IO_AREA_START) &&
i < PGD_INDEX(IO_AREA_END)) ||
(i == PGD_INDEX(USER_KIP_PAGE)) ||
(i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
(i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
(i == PGD_INDEX(USERSPACE_CONSOLE_VBASE)))
return 1;
else
return 0;
}
extern pmd_table_t *pmd_array;
void remove_mapping_pgd_all_user(pgd_table_t *pgd)
{
pmd_table_t *pmd;
/* Traverse through all pgd entries. */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
if (!is_global_pgdi(i)) {
/* Detect a pmd entry */
if (((pgd->entry[i] & PMD_TYPE_MASK)
== PMD_TYPE_PMD)) {
/* Obtain the user pmd handle */
pmd = (pmd_table_t *)
phys_to_virt((pgd->entry[i] &
PMD_ALIGN_MASK));
/* Free it */
free_pmd(pmd);
}
/* Clear the pgd entry */
pgd->entry[i] = PMD_TYPE_FAULT;
}
}
}
int pgd_count_boot_pmds()
{
int npmd = 0;
pgd_table_t *pgd = &init_pgd;
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
npmd++;
return npmd;
}
/*
* Jumps from boot pmd/pgd page tables to tables allocated from the cache.
*/
pgd_table_t *arch_realloc_page_tables(void)
{
pgd_table_t *pgd_new = alloc_pgd();
pgd_table_t *pgd_old = &init_pgd;
pmd_table_t *orig, *pmd;
/* Copy whole pgd entries */
memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
/* Allocate and copy all pmds */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pmd entry */
if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
/* Allocate new pmd */
if (!(pmd = alloc_pmd())) {
printk("FATAL: PMD allocation "
"failed during system initialization\n");
BUG();
}
/* Find original pmd */
orig = (pmd_table_t *)
phys_to_virt((pgd_old->entry[i] &
PMD_ALIGN_MASK));
/* Copy original to new */
memcpy(pmd, orig, sizeof(pmd_table_t));
/* Replace original pmd entry in pgd with new */
pgd_new->entry[i] = (pmd_t)virt_to_phys(pmd);
pgd_new->entry[i] |= PMD_TYPE_PMD;
}
}
/* Switch the virtual memory system into new area */
arm_clean_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(pgd_new));
arm_invalidate_tlb();
printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
__KERNELNAME__, virt_to_phys(pgd_old),
virt_to_phys(pgd_new));
return pgd_new;
}
/*
* Copies global kernel entries into another pgd. Even for
* sub-pmd ranges the associated pmd entries are copied,
* assuming any pmds copied are applicable to all tasks in
* the system.
*/
void copy_pgd_global_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
/* Extend sub-pmd ranges to their respective pmd boundaries */
start = align(start, PMD_MAP_SIZE);
if (end < start)
end = 0;
/* Aligning would overflow if mapping the last virtual pmd */
if (end < align(~0, PMD_MAP_SIZE) ||
start > end) /* end may have already overflown as input */
end = align_up(end, PMD_MAP_SIZE);
else
end = 0;
copy_pgds_by_vrange(to, from, start, end);
}
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
unsigned long start_i = PGD_INDEX(start);
unsigned long end_i = PGD_INDEX(end);
unsigned long irange = (end_i != 0) ? (end_i - start_i)
: (PGD_ENTRY_TOTAL - start_i);
memcpy(&to->entry[start_i], &from->entry[start_i],
irange * sizeof(pmd_t));
}
void arch_copy_pgd_kernel_entries(pgd_table_t *to)
{
pgd_table_t *from = TASK_PGD(current);
copy_pgd_global_by_vrange(to, from, KERNEL_AREA_START,
KERNEL_AREA_END);
copy_pgd_global_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
copy_pgd_global_by_vrange(to, from, USER_KIP_PAGE,
USER_KIP_PAGE + PAGE_SIZE);
copy_pgd_global_by_vrange(to, from, ARM_HIGH_VECTOR,
ARM_HIGH_VECTOR + PAGE_SIZE);
copy_pgd_global_by_vrange(to, from, ARM_SYSCALL_VECTOR,
ARM_SYSCALL_VECTOR + PAGE_SIZE);
/* We temporarily map uart registers to every process */
copy_pgd_global_by_vrange(to, from, USERSPACE_CONSOLE_VBASE,
USERSPACE_CONSOLE_VBASE + PAGE_SIZE);
}
/* Scheduler uses this to switch context */
void arch_space_switch(struct ktcb *to)
{
pgd_table_t *pgd = TASK_PGD(to);
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(pgd));
arm_invalidate_tlb();
}
void idle_task(void)
{
printk("Idle task.\n");
while(1);
}

View File

@@ -105,7 +105,7 @@ BEGIN_PROC(arm_invalidate_dcache)
END_PROC(arm_invalidate_dcache)
BEGIN_PROC(arm_clean_dcache)
mcr p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
mrc p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
bne arm_clean_dcache
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr

91
src/arch/arm/v6/mutex.c Normal file
View File

@@ -0,0 +1,91 @@
/*
* Copyright (C) 2009 B Labs Ltd.
*/
#include <l4/macros.h>
#include <l4/lib/printk.h>
#include <l4/lib/mutex.h>
#include INC_SUBARCH(mmu_ops.h)
#define MUTEX_UNLOCKED 0
#define MUTEX_LOCKED 1
/* Notes on ldrex/strex:
* ldrex rD, [rN, #imm] : loads rD with contents of at address (rN + imm)
* strex rD, rS, [rN, #imm]: pushes contents of rS to memory location (rN + imm)
* rD is 0 if operation is successful, 1 otherwise
*/
void __spin_lock(unsigned int *s)
{
unsigned int tmp;
__asm__ __volatile__ (
"1:\n"
"ldrex %0, [%2]\n"
"teq %0, #0\n"
"strexeq %0, %1, [%2]\n"
"teq %0, #0\n"
#ifdef CONFIG_SMP
"wfene\n"
#endif
"bne 1b\n"
: "=&r" (tmp)
: "r"(1), "r"(s)
: "cc", "memory"
);
dsb();
}
void __spin_unlock(unsigned int *s)
{
__asm__ __volatile__ (
"str %0, [%1]\n"
:
: "r"(0), "r"(s)
: "memory"
);
#ifdef CONFIG_SMP
dsb();
__asm__ __volatile__ ("sev\n");
#endif
}
/*
* Current implementation uses __mutex_(un)lock within a protected
* spinlock, needs to be revisited in the future
*/
unsigned int __mutex_lock(unsigned int *m)
{
unsigned int tmp, res;
__asm__ __volatile__ (
"1:\n"
"ldrex %0, [%3]\n"
"tst %0, #0\n"
"strexeq %1, %2, [%3]\n"
"tsteq %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "=&r"(res)
: "r"(1), "r"(m)
: "cc", "memory"
);
if ((tmp | res) != 0)
return 0;
return 1;
}
void __mutex_unlock(unsigned int *m)
{
__asm__ __volatile__ (
"str %0, [%1] \n"
:
: "r"(0), "r"(m)
: "memory"
);
}

View File

@@ -1,726 +0,0 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/lib/mutex.h>
#include <l4/lib/string.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/space.h>
#include <l4/generic/bootmem.h>
#include <l4/generic/resource.h>
#include <l4/api/errno.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(printascii.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
#include INC_API(kip.h)
/*
* These are indices into arrays with pgd_t or pmd_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) & 0x3FFC) / sizeof(pgd_t))
/* Strip out the page offset in this megabyte from a total of 256 pages. */
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) & 0x3FC) / sizeof (pmd_t))
/*
* Removes initial mappings needed for transition to virtual memory.
* Used one-time only.
*/
void remove_section_mapping(unsigned long vaddr)
{
pgd_table_t *pgd = &init_pgd;;
pgd_t pgd_i = PGD_INDEX(vaddr);
if (!((pgd->entry[pgd_i] & PGD_TYPE_MASK)
& PGD_TYPE_SECTION))
while(1);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
arm_invalidate_tlb();
}
/*
* Maps given section-aligned @paddr to @vaddr using enough number
* of section-units to fulfill @size in sections. Note this overwrites
* a mapping if same virtual address was already mapped.
*/
void __add_section_mapping_init(unsigned int paddr,
unsigned int vaddr,
unsigned int size,
unsigned int flags)
{
pte_t *ppte;
unsigned int l1_ptab;
unsigned int l1_offset;
/* 1st level page table address */
l1_ptab = virt_to_phys(&init_pgd);
/* Get the section offset for this vaddr */
l1_offset = (vaddr >> 18) & 0x3FFC;
/* The beginning entry for mapping */
ppte = (unsigned int *)(l1_ptab + l1_offset);
for(int i = 0; i < size; i++) {
*ppte = 0; /* Clear out old value */
*ppte |= paddr; /* Assign physical address */
*ppte |= PGD_TYPE_SECTION; /* Assign translation type */
/* Domain is 0, therefore no writes. */
/* Only kernel access allowed */
*ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
/* Cacheability/Bufferability flags */
*ppte |= flags;
ppte++; /* Next section entry */
paddr += ARM_SECTION_SIZE; /* Next physical section */
}
return;
}
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
unsigned int psection;
unsigned int vsection;
/* Align each address to the pages they reside in */
psection = paddr & ~ARM_SECTION_MASK;
vsection = vaddr & ~ARM_SECTION_MASK;
if(size == 0)
return;
__add_section_mapping_init(psection, vsection, size, flags);
return;
}
/* TODO: Make sure to flush tlb entry and caches */
void __add_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int flags, pmd_table_t *pmd)
{
unsigned int pmd_i = PMD_INDEX(vaddr);
pmd->entry[pmd_i] = paddr;
pmd->entry[pmd_i] |= PMD_TYPE_SMALL; /* Small page type */
pmd->entry[pmd_i] |= flags;
/* TODO: Is both required? Investigate */
/* TEST:
* I think cleaning or invalidating the cache is not required,
* because the entries in the cache aren't for the new mapping anyway.
* It's required if a mapping is removed, but not when newly added.
*/
arm_clean_invalidate_cache();
/* TEST: tlb must be flushed because a new mapping is present in page
* tables, and tlb is inconsistent with the page tables */
arm_invalidate_tlb();
}
/* Return whether a pmd associated with @vaddr is mapped on a pgd or not. */
pmd_table_t *pmd_exists(pgd_table_t *pgd, unsigned long vaddr)
{
unsigned int pgd_i = PGD_INDEX(vaddr);
/* Return true if non-zero pgd entry */
switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
case PGD_TYPE_COARSE:
return (pmd_table_t *)
phys_to_virt((pgd->entry[pgd_i] &
PGD_COARSE_ALIGN_MASK));
break;
case PGD_TYPE_FAULT:
return 0;
break;
case PGD_TYPE_SECTION:
dprintk("Warning, a section is already mapped "
"where a coarse page mapping is attempted:",
(u32)(pgd->entry[pgd_i]
& PGD_SECTION_ALIGN_MASK));
BUG();
break;
case PGD_TYPE_FINE:
dprintk("Warning, a fine page table is already mapped "
"where a coarse page mapping is attempted:",
(u32)(pgd->entry[pgd_i]
& PGD_FINE_ALIGN_MASK));
printk("Fine tables are unsupported. ");
printk("What is this doing here?");
BUG();
break;
default:
dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
BUG();
break;
}
return 0;
}
/* Convert a virtual address to a pte if it exists in the page tables. */
pte_t virt_to_pte_from_pgd(unsigned long virtual, pgd_table_t *pgd)
{
pmd_table_t *pmd = pmd_exists(pgd, virtual);
if (pmd)
return (pte_t)pmd->entry[PMD_INDEX(virtual)];
else
return (pte_t)0;
}
/* Convert a virtual address to a pte if it exists in the page tables. */
pte_t virt_to_pte(unsigned long virtual)
{
return virt_to_pte_from_pgd(virtual, TASK_PGD(current));
}
unsigned long virt_to_phys_by_pgd(unsigned long vaddr, pgd_table_t *pgd)
{
pte_t pte = virt_to_pte_from_pgd(vaddr, pgd);
return pte & ~PAGE_MASK;
}
unsigned long virt_to_phys_by_task(unsigned long vaddr, struct ktcb *task)
{
return virt_to_phys_by_pgd(vaddr, TASK_PGD(task));
}
void attach_pmd(pgd_table_t *pgd, pmd_table_t *pmd, unsigned int vaddr)
{
u32 pgd_i = PGD_INDEX(vaddr);
u32 pmd_phys = virt_to_phys(pmd);
/* Domain is 0, therefore no writes. */
pgd->entry[pgd_i] = (pgd_t)pmd_phys;
pgd->entry[pgd_i] |= PGD_TYPE_COARSE;
}
/*
* Same as normal mapping but with some boot tweaks.
*/
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
pmd_table_t *pmd;
pgd_table_t *pgd = &init_pgd;
unsigned int numpages = (size >> PAGE_BITS);
if (size < PAGE_SIZE) {
printascii("Error: Mapping size must be in bytes not pages.\n");
while(1);
}
if (size & PAGE_MASK)
numpages++;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all consecutive pages that cover given size */
for (int i = 0; i < numpages; i++) {
/* Check if another mapping already has a pmd attached. */
pmd = pmd_exists(pgd, vaddr);
if (!pmd) {
/*
* If this is the first vaddr in
* this pmd, allocate new pmd
*/
pmd = alloc_boot_pmd();
/* Attach pmd to its entry in pgd */
attach_pmd(pgd, pmd, vaddr);
}
/* Attach paddr to this pmd */
__add_mapping(page_align(paddr),
page_align(vaddr), flags, pmd);
/* Go to the next page to be mapped */
paddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
}
/*
* Maps @paddr to @vaddr, covering @size bytes also allocates new pmd if
* necessary. This flavor explicitly supplies the pgd to modify. This is useful
* when modifying userspace of processes that are not currently running. (Only
* makes sense for userspace mappings since kernel mappings are common.)
*/
void add_mapping_pgd(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags,
pgd_table_t *pgd)
{
pmd_table_t *pmd;
unsigned int numpages = (size >> PAGE_BITS);
if (size < PAGE_SIZE) {
printascii("Error: Mapping size must be in bytes not pages.\n");
while(1);
}
if (size & PAGE_MASK)
numpages++;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
/* Map all consecutive pages that cover given size */
for (int i = 0; i < numpages; i++) {
/* Check if another mapping already has a pmd attached. */
pmd = pmd_exists(pgd, vaddr);
if (!pmd) {
/*
* If this is the first vaddr in
* this pmd, allocate new pmd
*/
pmd = alloc_pmd();
/* Attach pmd to its entry in pgd */
attach_pmd(pgd, pmd, vaddr);
}
/* Attach paddr to this pmd */
__add_mapping(page_align(paddr),
page_align(vaddr), flags, pmd);
/* Go to the next page to be mapped */
paddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
}
void add_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags)
{
add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
}
/*
* Checks if a virtual address range has same or more permissive
* flags than the given ones, returns 0 if not, and 1 if OK.
*/
int check_mapping_pgd(unsigned long vaddr, unsigned long size,
unsigned int flags, pgd_table_t *pgd)
{
unsigned int npages = __pfn(align_up(size, PAGE_SIZE));
pte_t pte;
/* Convert generic map flags to pagetable-specific */
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
for (int i = 0; i < npages; i++) {
pte = virt_to_pte_from_pgd(vaddr + i * PAGE_SIZE, pgd);
/* Check if pte perms are equal or gt given flags */
if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
continue;
else
return 0;
}
return 1;
}
int check_mapping(unsigned long vaddr, unsigned long size,
unsigned int flags)
{
return check_mapping_pgd(vaddr, size, flags, TASK_PGD(current));
}
/* FIXME: Empty PMDs should be returned here !!! */
int __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
{
pmd_t pmd_i = PMD_INDEX(vaddr);
int ret;
switch (pmd->entry[pmd_i] & PMD_TYPE_MASK) {
case PMD_TYPE_FAULT:
ret = -ENOENT;
break;
case PMD_TYPE_LARGE:
pmd->entry[pmd_i] = 0;
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
ret = 0;
break;
case PMD_TYPE_SMALL:
pmd->entry[pmd_i] = 0;
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
ret = 0;
break;
default:
printk("Unknown page mapping in pmd. Assuming bug.\n");
BUG();
}
return ret;
}
/*
* Tell if a pgd index is a common kernel index. This is used to distinguish
* common kernel entries in a pgd, when copying page tables.
*/
int is_kern_pgdi(int i)
{
if ((i >= PGD_INDEX(KERNEL_AREA_START) && i < PGD_INDEX(KERNEL_AREA_END)) ||
(i >= PGD_INDEX(IO_AREA_START) && i < PGD_INDEX(IO_AREA_END)) ||
(i == PGD_INDEX(USER_KIP_PAGE)) ||
(i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
(i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
(i == PGD_INDEX(USERSPACE_UART_BASE)))
return 1;
else
return 0;
}
/*
* Removes all userspace mappings from a pgd. Frees any pmds that it
* detects to be user pmds
*/
int remove_mapping_pgd_all_user(pgd_table_t *pgd)
{
pmd_table_t *pmd;
/* Traverse through all pgd entries */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pgd entry that is not a kernel entry */
if (!is_kern_pgdi(i)) {
/* Detect a pmd entry */
if (((pgd->entry[i] & PGD_TYPE_MASK)
== PGD_TYPE_COARSE)) {
/* Obtain the user pmd handle */
pmd = (pmd_table_t *)
phys_to_virt((pgd->entry[i] &
PGD_COARSE_ALIGN_MASK));
/* Free it */
free_pmd(pmd);
}
/* Clear the pgd entry */
pgd->entry[i] = PGD_TYPE_FAULT;
}
}
return 0;
}
int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
{
pgd_t pgd_i = PGD_INDEX(vaddr);
pmd_table_t *pmd;
pmd_t pmd_i;
int ret;
/*
* Clean the cache to main memory before removing the mapping. Otherwise
* entries in the cache for this mapping will cause tranlation faults
* if they're cleaned to main memory after the mapping is removed.
*/
arm_clean_invalidate_cache();
/* TEST:
* Can't think of a valid reason to flush tlbs here, but keeping it just
* to be safe. REMOVE: Remove it if it's unnecessary.
*/
arm_invalidate_tlb();
/* Return true if non-zero pgd entry */
switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
case PGD_TYPE_COARSE:
// printk("Removing coarse mapping @ 0x%x\n", vaddr);
pmd = (pmd_table_t *)
phys_to_virt((pgd->entry[pgd_i]
& PGD_COARSE_ALIGN_MASK));
pmd_i = PMD_INDEX(vaddr);
ret = __remove_mapping(pmd, vaddr);
break;
case PGD_TYPE_FAULT:
ret = -1;
break;
case PGD_TYPE_SECTION:
printk("Removing section mapping for 0x%lx",
vaddr);
pgd->entry[pgd_i] = 0;
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
ret = 0;
break;
case PGD_TYPE_FINE:
printk("Table mapped is a fine page table.\n"
"Fine tables are unsupported. Assuming bug.\n");
BUG();
break;
default:
dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
printk("Assuming bug.\n");
BUG();
break;
}
/* The tlb must be invalidated here because it might have cached the
* old translation for this mapping. */
arm_invalidate_tlb();
return ret;
}
int remove_mapping(unsigned long vaddr)
{
return remove_mapping_pgd(vaddr, TASK_PGD(current));
}
int delete_page_tables(struct address_space *space)
{
remove_mapping_pgd_all_user(space->pgd);
free_pgd(space->pgd);
return 0;
}
/*
* Copies userspace entries of one task to another. In order to do that,
* it allocates new pmds and copies the original values into new ones.
*/
int copy_user_tables(struct address_space *new, struct address_space *orig_space)
{
pgd_table_t *to = new->pgd, *from = orig_space->pgd;
pmd_table_t *pmd, *orig;
/* Allocate and copy all pmds that will be exclusive to new task. */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pmd entry that is not a kernel pmd? */
if (!is_kern_pgdi(i) &&
((from->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
/* Allocate new pmd */
if (!(pmd = alloc_pmd()))
goto out_error;
/* Find original pmd */
orig = (pmd_table_t *)
phys_to_virt((from->entry[i] &
PGD_COARSE_ALIGN_MASK));
/* Copy original to new */
memcpy(pmd, orig, sizeof(pmd_table_t));
/* Replace original pmd entry in pgd with new */
to->entry[i] = (pgd_t)virt_to_phys(pmd);
to->entry[i] |= PGD_TYPE_COARSE;
}
}
return 0;
out_error:
/* Find all non-kernel pmds we have just allocated and free them */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Non-kernel pmd that has just been allocated. */
if (!is_kern_pgdi(i) &&
(to->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
/* Obtain the pmd handle */
pmd = (pmd_table_t *)
phys_to_virt((to->entry[i] &
PGD_COARSE_ALIGN_MASK));
/* Free pmd */
free_pmd(pmd);
}
}
return -ENOMEM;
}
int pgd_count_pmds(pgd_table_t *pgd)
{
int npmd = 0;
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
if ((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)
npmd++;
return npmd;
}
/*
* Allocates and copies all levels of page tables from one task to another.
* Useful when forking.
*
* The copied page tables end up having shared pmds for kernel entries
* and private copies of same pmds for user entries.
*/
pgd_table_t *copy_page_tables(pgd_table_t *from)
{
pmd_table_t *pmd, *orig;
pgd_table_t *pgd;
/* Allocate and copy pgd. This includes all kernel entries */
if (!(pgd = alloc_pgd()))
return PTR_ERR(-ENOMEM);
/* First copy whole pgd entries */
memcpy(pgd, from, sizeof(pgd_table_t));
/* Allocate and copy all pmds that will be exclusive to new task. */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pmd entry that is not a kernel pmd? */
if (!is_kern_pgdi(i) &&
((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
/* Allocate new pmd */
if (!(pmd = alloc_pmd()))
goto out_error;
/* Find original pmd */
orig = (pmd_table_t *)
phys_to_virt((pgd->entry[i] &
PGD_COARSE_ALIGN_MASK));
/* Copy original to new */
memcpy(pmd, orig, sizeof(pmd_table_t));
/* Replace original pmd entry in pgd with new */
pgd->entry[i] = (pgd_t)virt_to_phys(pmd);
pgd->entry[i] |= PGD_TYPE_COARSE;
}
}
return pgd;
out_error:
/* Find all allocated non-kernel pmds and free them */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Non-kernel pmd that has just been allocated. */
if (!is_kern_pgdi(i) &&
(pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
/* Obtain the pmd handle */
pmd = (pmd_table_t *)
phys_to_virt((pgd->entry[i] &
PGD_COARSE_ALIGN_MASK));
/* Free pmd */
free_pmd(pmd);
}
}
/* Free the pgd */
free_pgd(pgd);
return PTR_ERR(-ENOMEM);
}
extern pmd_table_t *pmd_array;
/*
* Jumps from boot pmd/pgd page tables to tables allocated from the cache.
*/
pgd_table_t *realloc_page_tables(void)
{
pgd_table_t *pgd_new = alloc_pgd();
pgd_table_t *pgd_old = &init_pgd;
pmd_table_t *orig, *pmd;
/* Copy whole pgd entries */
memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
/* Allocate and copy all pmds */
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
/* Detect a pmd entry */
if ((pgd_old->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
/* Allocate new pmd */
if (!(pmd = alloc_pmd())) {
printk("FATAL: PMD allocation "
"failed during system initialization\n");
BUG();
}
/* Find original pmd */
orig = (pmd_table_t *)
phys_to_virt((pgd_old->entry[i] &
PGD_COARSE_ALIGN_MASK));
/* Copy original to new */
memcpy(pmd, orig, sizeof(pmd_table_t));
/* Replace original pmd entry in pgd with new */
pgd_new->entry[i] = (pgd_t)virt_to_phys(pmd);
pgd_new->entry[i] |= PGD_TYPE_COARSE;
}
}
/* Switch the virtual memory system into new area */
arm_clean_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(pgd_new));
arm_invalidate_tlb();
printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
__KERNELNAME__, virt_to_phys(pgd_old),
virt_to_phys(pgd_new));
return pgd_new;
}
/*
* Useful for upgrading to page-grained control over a section mapping:
* Remaps a section mapping in pages. It allocates a pmd, (at all times because
* there can't really be an already existing pmd for a section mapping) fills
* in the page information, and origaces the direct section physical translation
* with the address of the pmd. Flushes the caches/tlbs.
*/
void remap_as_pages(void *vstart, void *vend)
{
unsigned long pstart = virt_to_phys(vstart);
unsigned long pend = virt_to_phys(vend);
unsigned long paddr = pstart;
pgd_t pgd_i = PGD_INDEX(vstart);
pmd_t pmd_i = PMD_INDEX(vstart);
pgd_table_t *pgd = &init_pgd;
pmd_table_t *pmd = alloc_boot_pmd();
u32 pmd_phys = virt_to_phys(pmd);
int numpages = __pfn(pend - pstart);
/* Fill in the pmd first */
for (int n = 0; n < numpages; n++) {
pmd->entry[pmd_i + n] = paddr;
pmd->entry[pmd_i + n] |= PMD_TYPE_SMALL; /* Small page type */
pmd->entry[pmd_i + n] |= space_flags_to_ptflags(MAP_SVC_DEFAULT_FLAGS);
paddr += PAGE_SIZE;
}
/* Fill in the type to produce a complete pmd translator information */
pmd_phys |= PGD_TYPE_COARSE;
/* Make sure memory is coherent first. */
arm_clean_invalidate_cache();
arm_invalidate_tlb();
/* Replace the direct section physical address with pmd's address */
pgd->entry[pgd_i] = (pgd_t)pmd_phys;
printk("%s: Kernel area 0x%lx - 0x%lx remapped as %d pages\n", __KERNELNAME__,
(unsigned long)vstart, (unsigned long)vend, numpages);
}
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
{
unsigned long start_i = PGD_INDEX(start);
unsigned long end_i = PGD_INDEX(end);
unsigned long irange = (end_i != 0) ? (end_i - start_i)
: (PGD_ENTRY_TOTAL - start_i);
memcpy(&to->entry[start_i], &from->entry[start_i],
irange * sizeof(pgd_t));
}
/* Scheduler uses this to switch context */
void arch_hardware_flush(pgd_table_t *pgd)
{
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(pgd));
arm_invalidate_tlb();
}

View File

@@ -1,155 +0,0 @@
/*
* low-level mmu operations
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_ARCH(asm.h)
#define C15_id c0
#define C15_control c1
#define C15_ttb c2
#define C15_dom c3
#define C15_fsr c5
#define C15_far c6
#define C15_tlb c8
#define C15_C0_M 0x0001 /* MMU */
#define C15_C0_A 0x0002 /* Alignment */
#define C15_C0_C 0x0004 /* (D) Cache */
#define C15_C0_W 0x0008 /* Write buffer */
#define C15_C0_B 0x0080 /* Endianness */
#define C15_C0_S 0x0100 /* System */
#define C15_C0_R 0x0200 /* ROM */
#define C15_C0_Z 0x0800 /* Branch Prediction */
#define C15_C0_I 0x1000 /* I cache */
#define C15_C0_V 0x2000 /* High vectors */
/* FIXME: Make sure the ops that need r0 dont trash r0, or if they do,
* save it on stack before these operations.
*/
/*
* In ARM terminology, flushing the cache means invalidating its contents.
* Cleaning the cache means, writing the contents of the cache back to
* main memory. In write-back caches the cache must be cleaned before
* flushing otherwise in-cache data is lost.
*/
BEGIN_PROC(arm_set_ttb)
mcr p15, 0, r0, C15_ttb, c0, 0
mov pc, lr
END_PROC(arm_set_ttb)
BEGIN_PROC(arm_get_domain)
mrc p15, 0, r0, C15_dom, c0, 0
mov pc, lr
END_PROC(arm_get_domain)
BEGIN_PROC(arm_set_domain)
mcr p15, 0, r0, C15_dom, c0, 0
mov pc, lr
END_PROC(arm_set_domain)
BEGIN_PROC(arm_enable_mmu)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_M
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_mmu)
BEGIN_PROC(arm_enable_icache)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_I
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_icache)
BEGIN_PROC(arm_enable_dcache)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_C
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_dcache)
BEGIN_PROC(arm_enable_wbuffer)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_W
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_wbuffer)
BEGIN_PROC(arm_enable_high_vectors)
mrc p15, 0, r0, C15_control, c0, 0
orr r0, r0, #C15_C0_V
mcr p15, 0, r0, C15_control, c0, 0
mov pc, lr
END_PROC(arm_enable_high_vectors)
BEGIN_PROC(arm_invalidate_cache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c7 @ Flush I cache and D cache
mov pc, lr
END_PROC(arm_invalidate_cache)
BEGIN_PROC(arm_invalidate_icache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c5, 0 @ Flush I cache
mov pc, lr
END_PROC(arm_invalidate_icache)
BEGIN_PROC(arm_invalidate_dcache)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c6, 0 @ Flush D cache
mov pc, lr
END_PROC(arm_invalidate_dcache)
BEGIN_PROC(arm_clean_dcache)
mcr p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
bne arm_clean_dcache
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
END_PROC(arm_clean_dcache)
BEGIN_PROC(arm_clean_invalidate_dcache)
1:
mrc p15, 0, pc, c7, c14, 3 @ Test/clean/flush dcache line
@ COMMENT: Why use PC?
bne 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
END_PROC(arm_clean_invalidate_dcache)
BEGIN_PROC(arm_clean_invalidate_cache)
1:
mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
@ COMMENT: Why use PC?
bne 1b
mcr p15, 0, ip, c7, c5, 0 @ Flush icache
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
mov pc, lr
END_PROC(arm_clean_invalidate_cache)
BEGIN_PROC(arm_drain_writebuffer)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c7, c10, 4
mov pc, lr
END_PROC(arm_drain_writebuffer)
BEGIN_PROC(arm_invalidate_tlb)
mcr p15, 0, ip, c8, c7
mov pc, lr
END_PROC(arm_invalidate_tlb)
BEGIN_PROC(arm_invalidate_itlb)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c8, c5, 0
mov pc, lr
END_PROC(arm_invalidate_itlb)
BEGIN_PROC(arm_invalidate_dtlb)
mov r0, #0 @ FIX THIS
mcr p15, 0, r0, c8, c6, 0
mov pc, lr
END_PROC(arm_invalidate_dtlb)

View File

@@ -1,90 +0,0 @@
/*
* ARM v5 Binary semaphore (mutex) implementation.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include INC_ARCH(asm.h)
/* Recap on swp:
* swp rx, ry, [rz]
* In one instruction:
* 1) Stores the value in ry into location pointed by rz.
* 2) Loads the value in the location of rz into rx.
* By doing so, in one instruction one can attempt to lock
* a word, and discover whether it was already locked.
*/
#define MUTEX_UNLOCKED 0
#define MUTEX_LOCKED 1
BEGIN_PROC(__spin_lock)
mov r1, #1
__spin:
swp r2, r1, [r0]
cmp r2, #0
bne __spin
mov pc, lr
END_PROC(__spin_lock)
BEGIN_PROC(__spin_unlock)
mov r1, #0
swp r2, r1, [r0]
cmp r2, #1 @ Debug check.
1:
bne 1b
mov pc, lr
END_PROC(__spin_unlock)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_lock)
mov r1, #1
swp r2, r1, [r0]
cmp r2, #0
movne r0, #0
moveq r0, #1
mov pc, lr
END_PROC(__mutex_lock)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_unlock)
mov r1, #0
swp r2, r1, [r0]
cmp r2, #1
1: @ Debug check.
bne 1b
mov pc, lr
END_PROC(__mutex_unlock)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_inc)
swp r2, r1, [r0]
mov r1, #1
swp r2, r1, [r0]
cmp r2, #0
movne r0, #0
moveq r0, #1
mov pc, lr
END_PROC(__mutex_inc)
/*
* @r0: Address of mutex location.
*/
BEGIN_PROC(__mutex_dec)
mov r1, #0
swp r2, r1, [r0]
cmp r2, #1
1: @ Debug check.
bne 1b
mov pc, lr
END_PROC(__mutex_dec)

View File

@@ -5,6 +5,7 @@
*/
#include INC_ARCH(asm.h)
#include INC_ARCH(asm-macros.S)
.balign 4096
.section .data.vectors
@@ -39,30 +40,8 @@ END_PROC(arm_high_vector)
BEGIN_PROC(arm_reset_exception)
END_PROC(arm_reset_exception)
/*
* vect_undef
*
* Upon Entry:
* - R14: Address of next instruction after undefined instruction
* - PC: 0x00000004
* - IRQs are disabled (CPSR[7] = 1)
*
*
* PURPOSE:
* A co-processor instruction not supported by the core can be
* emulated here. Also unrecognised/invalid instructions are handled.
*/
BEGIN_PROC(arm_undef_exception)
sub lr, lr, #4
mov r0, lr @ Get undefined abort address
mrs r1, spsr @ Get previous abort state
mov r5, lr @ Save it in r5 in case r0 is trashed
mov lr, pc @ Save return address
ldr pc, =undef_handler
1:
b 1b
END_PROC(arm_undef_exception)
#if defined(CONFIG_SUBARCH_V5)
.macro disable_irqs rx
mrs \rx, cpsr_fc
orr \rx, #ARM_IRQ_BIT
@@ -73,6 +52,26 @@ END_PROC(arm_undef_exception)
bic \rx, #ARM_IRQ_BIT
msr cpsr_fc, \rx
.endm
#endif
#if defined (CONFIG_SUBARCH_V7) || defined(CONFIG_SUBARCH_V6)
.macro disable_irqs rx
cpsid ia
.endm
.macro enable_irqs rx
cpsie ia
.endm
#endif
#if defined (CONFIG_SUBARCH_V7)
.macro clear_exclusive
clrex
.endm
#else
.macro clear_exclusive
.endm
#endif
/* Only works in SVC MODE. Know what you are doing! */
.macro get_current rx
bic \rx, sp, #0xFF0
@@ -114,14 +113,61 @@ END_PROC(arm_undef_exception)
cmp \rx, #ARM_MODE_USR
.endm
/* These really both read the same unified FSR and FAR registers */
#if defined (CONFIG_SUBARCH_V5)
.macro cp15_read_ifsr rx
mrc p15, 0, \rx, c5, c0, 0 @ Read FSR (Tells why the fault occured)
.endm
.macro cp15_read_ifar rx
mrc p15, 0, \rx, c6, c0, 0 @ Read FAR (Contains the faulted data address)
.endm
.macro cp15_read_dfsr rx
mrc p15, 0, \rx, c5, c0, 0 @ Read FSR (Tells why the fault occured)
.endm
.macro cp15_read_dfar rx
mrc p15, 0, \rx, c6, c0, 0 @ Read FAR (Contains the faulted data address)
.endm
#endif
/* These read the distinguished IFSR, IFAR, DFSR and DFAR registers */
#if defined (CONFIG_SUBARCH_V6) || defined (CONFIG_SUBARCH_V7)
.macro cp15_read_ifsr rx
mrc p15, 0, \rx, c5, c0, 1 @ Read IFSR (Tells why the fault occured)
.endm
.macro cp15_read_ifar rx
mrc p15, 0, \rx, c6, c0, 2 @ Read IFAR (Contains the faulted data address)
.endm
.macro cp15_read_dfsr rx
mrc p15, 0, \rx, c5, c0, 0 @ Read DFSR (Tells why the fault occured)
.endm
.macro cp15_read_dfar rx
mrc p15, 0, \rx, c6, c0, 0 @ Read DFAR (Contains the faulted data address)
.endm
#endif
#define UNDEF_R0 0
#define UNDEF_SPSR -4
#define UNDEF_R14 -8
/*
* vect_undef
*
* Upon Entry:
* - R14: Address of next instruction after undefined instruction
* - PC: 0x00000004
* - IRQs are disabled (CPSR[7] = 1)
*
*
* PURPOSE:
* A co-processor instruction not supported by the core can be
* emulated here. Also unrecognised/invalid instructions are handled.
*/
BEGIN_PROC(arm_undef_exception_reentrant)
@ lr contains address of instruction following the one caused undef
@ sub lr, lr, #4
clear_exclusive
str lr, [sp, #UNDEF_R14] @ Store undef address
mrs lr, spsr @ Get SPSR
str lr, [sp, #UNDEF_SPSR] @ Store SPSR
@@ -142,8 +188,9 @@ BEGIN_PROC(arm_undef_exception_reentrant)
ldr r0, [r0, #UNDEF_R0]
stmfd sp!, {r0-r3,r12,lr}
@ Stack state: |R0<-|R1|R2|R3|R12|UNDEF_SPSR|LR_SVC|LR_DUNDEF|{original SP_SVC}|
push_user_sp_lr sp
push_user_sp_lr sp @ NOTE: These must be pushed to avoid trashing them if preempted
@ Stack state: |SP_USR<-|LR_USR|R0<-|R1|R2|R3|R12|UNDEF_SPSR|LR_SVC|LR_DUNDEF|{original SP_SVC}|
@ All undef state saved. Can safely enable irqs here, if need be.
ldr r3, [sp, #28] @ Load UNDEF_SPSR
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
@@ -151,7 +198,7 @@ BEGIN_PROC(arm_undef_exception_reentrant)
enable_irqs r3
1:
/* Now check in what mode exception occured, and return that mode's LR in R4
* Also poplulate r0,r1,r2 parameters for undef_handler
* Also poplulate r0,r1,r2 parameters for undefined_instr_handler
*/
ldr r1, [sp, #28] @ Load UNDEF_SPSR
is_psr_usr r0 @ Test if UNDEF_SPSR was user mode.
@@ -159,7 +206,7 @@ BEGIN_PROC(arm_undef_exception_reentrant)
ldreq r2, [sp, #4] @ Abort occured in user, load LR_USR
ldr r0, [sp, #36] @ Load LR_UNDEF saved previously.
mov lr, pc
ldr pc, =undef_handler @ Jump to function outside this page.
ldr pc, =undefined_instr_handler @ Jump to function outside this page.
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
@ (i.e. an interrupt could overwrite spsr with current psr)
ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
@@ -173,7 +220,6 @@ BEGIN_PROC(arm_undef_exception_reentrant)
@ down from where svc stack had left.
END_PROC(arm_undef_exception_reentrant)
/*
* vect_swi
*
@@ -210,6 +256,7 @@ END_PROC(arm_undef_exception_reentrant)
* determine the correct system call, rather than [7:0] bits of the SWI.
*/
BEGIN_PROC(arm_swi_exception)
clear_exclusive
sub lr, lr, #4 @ Get address of swi instruction user executed.
stmfd sp, {r0-r12,sp,lr}^ @ Push arguments, LR_USR and SP_USR to stack.
nop
@@ -278,7 +325,7 @@ END_PROC(arm_swi_exception)
* vect_pabt
*
* Upon Entry:
* - R14_svc: Address of next instruction after aborted instruction
* - R14_abt: Address of next instruction after aborted instruction
* - R14_usr: Address of return instruction in last function call**
* - PC: 0x0000000c
* - IRQs are disabled (CPSR[7] = 1)
@@ -300,6 +347,7 @@ END_PROC(arm_swi_exception)
* where this call came from.
*/
BEGIN_PROC(arm_prefetch_abort_exception_reentrant)
clear_exclusive
sub lr, lr, #4 @ lr-4 points at aborted instruction
str lr, [r13, #ABT_R14] @ Store abort address.
mrs lr, spsr @ Get SPSR
@@ -320,23 +368,19 @@ transfer_pabt_state_to_svc: @ Move data saved on PABT stack to SVC stack.
ldr r0, [r0, #ABT_R0]
stmfd sp!, {r0-r3,r12,lr}
@ Stack state: |R0<-|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
push_user_sp_lr sp
push_user_sp_lr sp @ NOTE: These must be pushed to avoid trashing if preempted
@ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
read_pabt_state:
mrc p15, 0, r1, c5, c0, 0 @ Read FSR (Tells why the fault occured) FIXME: Do we need this in pabt?
mrc p15, 0, r2, c6, c0, 0 @ Read FAR (Contains the faulted data address) Do we need this in pabt?
cp15_read_ifsr r1 @ Reads FSR on ARMv5, IFSR on ARMv6-v7. Fault status information
cp15_read_ifar r2 @ Reads FAR on ARMv5, IFAR on ARMv6-v7. Fault address information
@ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
ldr r3, [sp, #28] @ Load PABT_SPSR
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
bne 1f @ Branch here based on previous irq judgement.
enable_irqs r3
1:
/* Now check in what mode abort occured, and return that mode's LR in R4 */
ldr r0, [sp, #28] @ Load PABT_SPSR
is_psr_usr r0 @ Test if PABT_SPSR was user mode.
ldrne r3, [sp, #32] @ Abort occured in kernel, load LR_SVC
ldreq r3, [sp, #4] @ Abort occured in user, load LR_USR
ldr r0, [sp, #36] @ Load LR_PABT saved previously.
ldr r3, [sp, #28] @ Load PABT_SPSR to r3, the spsr for the aborted mode
ldr r0, [sp, #36] @ Load LR_PABT - 4 saved previously. (Address that aborted)
mov lr, pc
ldr pc, =prefetch_abort_handler @ Jump to function outside this page.
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
@@ -381,12 +425,14 @@ BEGIN_PROC(arm_data_abort_exception)
1:
b 1b
END_PROC(arm_data_abort_exception)
/*
* The method of saving abort state to svc stack is identical with that of
* reentrant irq vector. Natural to this, Restoring of the previous state
* is also identical.
*/
BEGIN_PROC(arm_data_abort_exception_reentrant)
clear_exclusive
sub lr, lr, #8 @ Get abort address
str lr, [r13, #ABT_R14] @ Store abort address
mrs lr, spsr @ Get SPSR
@@ -411,8 +457,8 @@ transfer_dabt_state_to_svc:
push_user_sp_lr sp
@ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
read_dabt_state:
mrc p15, 0, r1, c5, c0, 0 @ Read FSR (Tells why the fault occured)
mrc p15, 0, r2, c6, c0, 0 @ Read FAR (Contains the faulted data address)
cp15_read_dfsr r1 @ Read DFSR (Tells why the fault occured)
cp15_read_dfar r2 @ Read DFAR (Contains the faulted data address)
@ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
ldr r3, [sp, #28] @ Load DABT_SPSR
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
@@ -493,12 +539,6 @@ BEGIN_PROC(arm_irq_exception_reentrant)
@ down from where svc stack had left.
END_PROC(arm_irq_exception_reentrant)
.macro was_irq_mode rx
mrs rx, spsr_fc
and rx, rx, 0x1F
cmp rx, #ARM_MODE_IRQ
.endm
.macro need_resched rx, ry
get_current \rx
ldr \ry, =need_resched_offset
@@ -514,20 +554,39 @@ END_PROC(arm_irq_exception_reentrant)
.global preempted_psr;
preempted_psr:
.word 0
.word 0
.word 0
.word 0
/* Keeps track of how many nests of irqs have happened. */
.global current_irq_nest_count;
current_irq_nest_count:
.word 0
.word 0
.word 0
.word 0
#if defined (CONFIG_SMP)
@ Rx contains the address of per cpu variable
.macro per_cpu adr, temp, varname
get_cpuid \temp
ldr \adr, =\varname
add \adr, \adr, \temp, lsl #2
.endm
#else
.macro per_cpu adr, temp, varname
ldr \adr, =\varname
.endm
#endif
/*
* FIXME: current_irq_nest_count also counts for any preempt_disable() calls.
* However this nesting check assumes all nests come from real irqs.
* We should make this check just the real ones.
*/
#define IRQ_NESTING_MAX 15
#define IRQ_NESTING_MAX 32
.macro inc_irq_cnt_with_overnest_check rx, ry
ldr \rx, =current_irq_nest_count @ Load the irq nest status word.
per_cpu \rx, \ry, current_irq_nest_count @ Get per-cpu address of variable
ldr \ry, [\rx]
add \ry, \ry, #1 @ No need for atomic inc since irqs are disabled.
str \ry, [\rx]
@@ -541,21 +600,37 @@ current_irq_nest_count:
@ it back to the original value as it was when we read it, before it returns. So effectively
@ anything that runs during the decrement does not affect the value of the count.
.macro dec_irq_nest_cnt rx, ry
ldr \ry, =current_irq_nest_count
per_cpu \ry, \rx, current_irq_nest_count
ldr \rx, [\ry]
sub \rx, \rx, #1
str \rx, [\ry]
.endm
.macro in_process_context rx
ldr \rx, =current_irq_nest_count
.macro in_process_context rx, ry
per_cpu \rx, \ry, current_irq_nest_count
ldr \rx, [\rx]
cmp \rx, #0
.endm
/* If interrupted a process (as opposed to another irq), saves spsr value to preempted_psr */
.macro cmp_and_save_process_psr rx, process_psr
in_process_context \rx @ If nest count is 0, a running process is preempted.
ldreq \rx, =preempted_psr
streq \process_psr, [\rx]
.macro cmp_and_save_process_psr rx, ry
in_process_context \rx, \ry @ If nest count is 0, a running process is preempted.
bne 9999f @ Branch ahead if not a process
per_cpu \rx, \ry, preempted_psr @ Get per-cpu preempted psr
mrs \ry, SPSR @ Re-read spsr since register was trashed
str \ry, [\rx] @ Store it in per-cpu preempted psr
9999:
.endm
/*
* Clear irq bits on register.
*
* If ARMv5, only I-bit is cleared, but if ARMv6-v7,
* A-bit is also cleared.
*/
.macro clr_irq_bits_on_reg rx
bic \rx, #ARM_IRQ_BIT
#if defined (CONFIG_SUBARCH_V6) || defined (CONFIG_SUBARCH_V7)
bic \rx, #ARM_A_BIT
#endif
.endm
#define CONTEXT_PSR 0
@@ -576,7 +651,14 @@ current_irq_nest_count:
#define CONTEXT_R14 60
#define CONTEXT_PC 64
/*
* TODO: Optimization:
* May use SRS/RFE on irq exception _only_. But not
* yet aware of its implications. Only irq handler can
* do it because RFE enables interrupts unconditionally.
*/
BEGIN_PROC(arm_irq_exception_reentrant_with_schedule)
clear_exclusive
sub lr, lr, #4
str lr, [r13, #IRQ_R14] @ Save lr_irq
mrs r14, spsr @ Copy spsr
@@ -600,12 +682,22 @@ BEGIN_PROC(arm_irq_exception_reentrant_with_schedule)
mov lr, pc
ldr pc, =do_irq @ Read irq number etc. Free to re-enable irqs here.
@ stack state: (Low) r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ| (High)
ldr r0, =current_irq_nest_count
/*
* Decision point for taking the preemption path
*/
#if !defined(CONFIG_PREEMPT_DISABLE)
per_cpu r0, r1, current_irq_nest_count
ldr r0, [r0]
cmp r0, #1 @ Expect 1 as lowest since each irq increase preempt cnt by 1.
bgt return_to_prev_context @ if (irq_nest > 1) return_to_prev_context();
need_resched r0, r1 @ if (irq_nest == 1 && need_resched) schedule();
beq preemption_path @ if (irq_nest == 1 && !need_resched) return_to_prev_context();
#endif
/*
* Return to previous context path
*/
return_to_prev_context:
dec_irq_nest_cnt r0, r1
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
@@ -615,6 +707,11 @@ return_to_prev_context:
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
@ and pc gets lr_irq. Saved at #4 and #8 offsets
@ down from where svc stack had left.
/*
* Preemption path
*/
#if !defined(CONFIG_PREEMPT_DISABLE)
preemption_path:
disable_irqs r0 @ Interrupts can corrupt stack state.
get_current r0 @ Get the interrupted process
@@ -661,6 +758,8 @@ prepare_schedule:
ldr pc, =schedule
1:
b 1b /* To catch if schedule returns in irq mode */
#endif /* End of !CONFIG_PREEMPT_DISABLE */
END_PROC(arm_irq_exception_reentrant_with_schedule)
/*
@@ -688,8 +787,9 @@ END_PROC(arm_irq_exception_reentrant_with_schedule)
* Furthermore, irqs are also disabled shortly before calling switch_to() from both contexts.
* This happens at points where stack state would be irrecoverable if an irq occured.
*/
BEGIN_PROC(arch_switch)
in_process_context r2 @ Note this depends on preempt count being 0.
BEGIN_PROC(arch_context_switch)
clear_exclusive
in_process_context r2, r3 @ Note this depends on preempt count being 0.
beq save_process_context @ Voluntary switch needs explicit saving of current state.
dec_irq_nest_cnt r2, r3 @ Soon leaving irq context, so reduce preempt count here.
b load_next_context @ Interrupted context already saved by irq handler.
@@ -702,7 +802,7 @@ load_next_context:
@ stack state: (Low) |..|..|..|..|..|..|..|..|..|->(Original)| (High)
mov sp, r1
ldr r0, [sp, #CONTEXT_PSR] @ Load r0 with SPSR
bic r0, r0, #ARM_IRQ_BIT @ Enable irqs on will-be-restored context.
clr_irq_bits_on_reg r0 @ Enable irqs on will-be-restored context.
msr spsr_fcxs, r0 @ Restore spsr from r0.
is_psr_usr r0
bne load_next_context_svc @ Loading user context is different than svc.
@@ -715,7 +815,7 @@ load_next_context_usr:
load_next_context_svc:
ldmib sp, {r0-r15}^ @ Switch to svc context and jump, loading R13 and R14 from stack.
@ This is OK since the jump is to current context.
END_PROC(arch_switch)
END_PROC(arch_context_switch)
/*
@@ -788,11 +888,13 @@ END_PROC(arm_fiq_exception)
* (use a macro, e.g. ____arm_asm_cache_aligned)
*/
.balign 4
__stacks_end: .space 256
__abt_stack: .space 256
__irq_stack: .space 256
__fiq_stack: .space 256
__und_stack: .space 256
/* 16 bytes each per-cpu, up to 8 cpus */
__stacks_end: .space 128
__abt_stack: .space 128
__irq_stack: .space 128
__fiq_stack: .space 128
__und_stack: .space 128
.balign 4096

View File

@@ -1,19 +1,32 @@
import glob
import os, sys, glob
Import("env", "symbols")
PROJRELROOT = '../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import("env", "symbols", "platform", "bdir")
src_local = []
objs = []
for name, val in symbols:
if "CONFIG_DRIVER_UART_PL011" == name:
src_local.append(glob.glob("uart/pl011/*.c"))
objs += SConscript("uart/pl011/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'pl011')
if "CONFIG_DRIVER_TIMER_SP804" == name:
src_local.append(glob.glob("timer/sp804/*.c"))
objs += SConscript("timer/sp804/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'timer')
if "CONFIG_DRIVER_IRQ_PL190" == name:
src_local.append(glob.glob("irq/pl190/*.c"))
objs += SConscript("irq/pl190/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'vic')
if "CONFIG_DRIVER_IRQ_GIC" == name:
src_local.append(glob.glob("irq/gic/*.c"))
objs += SConscript("irq/gic/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'gic')
if "CONFIG_DRIVER_INTC_OMAP" == name:
objs += SConscript("irq/omap3/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + '/omap/intc')
if "CONFIG_DRIVER_UART_OMAP" == name:
objs += SConscript("uart/omap/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + '/omap/uart')
if "CONFIG_DRIVER_TIMER_OMAP" == name:
objs += SConscript("timer/omap/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + '/omap/timer')
obj = env.Object(src_local)
Return('obj')
Return('objs')

View File

@@ -0,0 +1,17 @@
# Inherit global environment
#import os, sys
#PROJRELROOT = '../../../'
#sys.path.append(PROJRELROOT)
#from config.projpaths import *
#from configure import *
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['gic.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -1,106 +1,222 @@
/*
* PL190 Vectored irq controller support.
* PLXXX Generic Interrupt Controller support.
*
* This is more pb926 specific as it also touches the SIC, a partial irq
* controller.Normally, irq controller must be independent and singular. Later
* other generic code should make thlongwork in cascaded setup.
*
* Copyright (C) 2007 Bahadir Balban
* This is more ARM Realview EB/PB
* Copyright (C) 2009-2010 B Labs Ltd.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
#include <l4/lib/bit.h>
#include <l4/drivers/irq/pl190/pl190_vic.h>
#include <l4/lib/printk.h>
#include <l4/generic/irq.h>
#include INC_PLAT(irq.h)
#include INC_SUBARCH(mmu_ops.h) /* for dmb/dsb() */
#include <l4/drivers/irq/gic/gic.h>
/* FIXME: Fix the stupid uart driver and change to single definition of this! */
#if defined(read)
#undef read
#endif
#if defined(write)
#undef write
#endif
volatile struct gic_data gic_data[IRQ_CHIPS_MAX];
#define read(a) *((volatile unsigned int *)(a))
#define write(v, a) (*((volatile unsigned int *)(a)) = v)
#define setbit(bitvect, a) write(read(a) | (bitvect), a)
#define clrbit(bitvect, a) write(read(a) & ~(bitvect), a)
#define devio(base, reg, bitvect, setclr) \
((setclr) ? setbit(bitvect, (base + reg)) \
: clrbit(bitvect, (base + reg)))
static inline struct gic_data *get_gic_data(l4id_t irq)
{
struct irq_chip *chip = irq_desc_array[irq].chip;
if (chip)
return (struct gic_data *)irq_desc_array[irq].chip->data;
else
return 0;
}
#if 0
/* Returns the irq number on this chip converting the irq bitvector */
int pl190_read_irq(void)
l4id_t gic_read_irq(void *data)
{
/* This also correctly returns a negative value for a spurious irq. */
return 31 - __clz(read(PL190_VIC_IRQSTATUS));
int irq;
volatile struct gic_data *gic = (struct gic_data *)data;
irq = gic->cpu->ack & 0x1ff;
if (irq == 1023)
return -1023; /* Spurious */
return irq;
}
void pl190_mask_irq(int irq)
void gic_mask_irq(l4id_t irq)
{
/* Reading WO registers blows QEMU/PB926.
* setbit((1 << irq), PL190_VIC_INTENCLEAR); */
write(1 << irq, PL190_VIC_INTENCLEAR);
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
volatile struct gic_data *gic = get_gic_data(irq);
gic->dist->clr_en[offset] = 1 << (irq % 32);
}
/* Ack is same as mask */
void pl190_ack_irq(int irq)
void gic_unmask_irq(l4id_t irq)
{
pl190_mask_irq(irq);
volatile struct gic_data *gic = get_gic_data(irq);
u32 offset = irq >> 5 ; /* offset = irq / 32 */
gic->dist->set_en[offset] = 1 << (irq % 32);
}
void pl190_unmask_irq(int irq)
void gic_ack_irq(l4id_t irq)
{
setbit(1 << irq, PL190_VIC_INTENABLE);
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
volatile struct gic_data *gic = get_gic_data(irq);
gic->dist->clr_en[offset] = 1 << (irq % 32);
gic->cpu->eoi = irq;
}
int pl190_sic_read_irq(void)
void gic_ack_and_mask(l4id_t irq)
{
return 32 - __clz(read(PL190_SIC_STATUS));
gic_ack_irq(irq);
gic_mask_irq(irq);
}
void pl190_sic_mask_irq(int irq)
void gic_set_pending(l4id_t irq)
{
write(1 << irq, PL190_SIC_ENCLR);
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
volatile struct gic_data *gic = get_gic_data(irq);
gic->dist->set_pending[offset] = 1 << (irq % 32);
}
void pl190_sic_ack_irq(int irq)
void gic_clear_pending(l4id_t irq)
{
pl190_sic_mask_irq(irq);
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
volatile struct gic_data *gic = get_gic_data(irq);
gic->dist->clr_pending[offset] = 1 << (irq % 32);
}
void pl190_sic_unmask_irq(int irq)
void gic_cpu_init(int idx, unsigned long base)
{
setbit(1 << irq, PL190_SIC_ENSET);
struct gic_cpu *cpu;
cpu = gic_data[idx].cpu = (struct gic_cpu *)base;
/* Disable */
cpu->control = 0;
cpu->prio_mask = 0xf0;
cpu->bin_point = 3;
cpu->control = 1;
}
/* Initialises the primary and secondary interrupt controllers */
void pl190_vic_init(void)
void gic_dist_init(int idx, unsigned long base)
{
int i, irqs_per_word; /* Interrupts per word */
struct gic_dist *dist;
dist = gic_data[idx].dist = (struct gic_dist *)(base);
/* Surely disable GIC */
dist->control = 0;
/* 32*(N+1) interrupts supported */
int nirqs = 32 * ((dist->type & 0x1f) + 1);
if (nirqs > IRQS_MAX)
nirqs = IRQS_MAX;
/* Clear all interrupts */
write(0, PL190_VIC_INTENABLE);
write(0xFFFFFFFF, PL190_VIC_INTENCLEAR);
irqs_per_word = 32;
for(i = 0; i < nirqs ; i+=irqs_per_word) {
dist->clr_en[i/irqs_per_word] = 0xffffffff;
}
/* Set all irqs as normal IRQs (i.e. not FIQ) */
write(0, PL190_VIC_INTSELECT);
/* TODO: Is there a SIC_IRQ_SELECT for irq/fiq ??? */
/* Clear all pending interrupts */
for(i = 0; i < nirqs ; i+=irqs_per_word) {
dist->clr_pending[i/irqs_per_word] = 0xffffffff;
}
/* Disable user-mode access to VIC registers */
write(1, PL190_VIC_PROTECTION);
/* Set all irqs as normal priority, 8 bits per interrupt */
irqs_per_word = 4;
for(i = 32; i < nirqs ; i+=irqs_per_word) {
dist->priority[i/irqs_per_word] = 0xa0a0a0a0;
}
/* Clear software interrupts */
write(0xFFFFFFFF, PL190_VIC_SOFTINTCLEAR);
/* Set all target to cpu0, 8 bits per interrupt */
for(i = 32; i < nirqs ; i+=irqs_per_word) {
dist->target[i/irqs_per_word] = 0x01010101;
}
/* At this point, all interrupts are cleared and disabled.
* the controllers are ready to receive interrupts, if enabled. */
return;
/* Configure all to be level-sensitive, 2 bits per interrupt */
irqs_per_word = 16;
for(i = 32; i < nirqs ; i+=irqs_per_word) {
dist->config[i/irqs_per_word] = 0x00000000;
}
/* Enable GIC Distributor */
dist->control = 1;
}
void pl190_sic_init(void)
/* Some functions, may be helpful */
void gic_set_target(u32 irq, u32 cpu)
{
write(0, PL190_SIC_ENABLE);
write(0xFFFFFFFF, PL190_SIC_ENCLR);
/* Disable SIC-to-PIC direct routing of individual irq lines on SIC */
write(0xFFFFFFFF, PL190_SIC_PICENCLR);
/* cpu is a mask, not cpu number */
cpu &= 0xF;
irq &= 0xFF;
volatile struct gic_data *gic = get_gic_data(irq);
u32 offset = irq >> 2; /* offset = irq / 4 */
gic->dist->target[offset] |= (cpu << ((irq % 4) * 8));
}
#endif
u32 gic_get_target(u32 irq)
{
/* cpu is a mask, not cpu number */
unsigned int target;
irq &= 0xFF;
u32 offset = irq >> 2; /* offset = irq / 4 */
volatile struct gic_data *gic = get_gic_data(irq);
target = gic->dist->target[offset];
target >>= ((irq % 4) * 8);
return target & 0xFF;
}
void gic_set_priority(u32 irq, u32 prio)
{
/* cpu is a mask, not cpu number */
prio &= 0xF;
irq &= 0xFF;
u32 offset = irq >> 3; /* offset = irq / 8 */
volatile struct gic_data *gic = get_gic_data(irq);
/* target = cpu << ((irq % 4) * 4) */
gic->dist->target[offset] |= (prio << (irq & 0x1C));
}
u32 gic_get_priority(u32 irq)
{
/* cpu is a mask, not cpu number */
irq &= 0xFF;
u32 offset = irq >> 3; /* offset = irq / 8 */
volatile struct gic_data *gic = get_gic_data(irq);
return gic->dist->target[offset] & (irq & 0xFC);
}
#define TO_MANY 0 /* to all specified in a CPU mask */
#define TO_OTHERS 1 /* all but me */
#define TO_SELF 2 /* just to the requesting CPU */
#define CPU_MASK_BIT 16
#define TYPE_MASK_BIT 24
void gic_send_ipi(int cpu, int ipi_cmd)
{
/* if cpu is 0, then ipi is sent to self
* if cpu has exactly 1 bit set, the ipi to just that core
* if cpu has a mask, sent to all but current
*/
struct gic_dist *dist = gic_data[0].dist;
ipi_cmd &= 0xf;
cpu &= 0xff;
dsb();
if (cpu == 0) /* Self */
dist->soft_int = (TO_SELF << 24) | ipi_cmd;
else if ((cpu & (cpu-1)) == 0) /* Exactly to one CPU */
dist->soft_int = (TO_MANY << 24) | (cpu << 16) | ipi_cmd;
else /* All but me */
dist->soft_int = (TO_OTHERS << 24) | (cpu << 16) | ipi_cmd;
}
/* Make the generic code happy :) */
void gic_dummy_init()
{
}

View File

@@ -0,0 +1,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = Glob('*.c')
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,70 @@
/*
* Interrupt Controller - Beagleboard
*
* Copyright 2010 B Labs Ltd.
*/
#include <l4/drivers/irq/omap3/omap3_intc.h>
#include INC_PLAT(offsets.h)
#include INC_PLAT(irq.h)
void omap3_intc_ack_irq(l4id_t irq)
{
write(0x1, PLATFORM_INTC_VBASE + OMAP3_INTC_CONTROL);
}
void omap3_intc_mask_irq(l4id_t irq)
{
omap3_intc_set_irq_status(PLATFORM_INTC_VBASE, OMAP3_INTC_MIR_SET, irq);
}
void omap3_intc_unmask_irq(l4id_t irq)
{
omap3_intc_set_irq_status(PLATFORM_INTC_VBASE, OMAP3_INTC_MIR_CLR, irq);
}
/* End of Interrupt */
void omap3_intc_ack_and_mask(l4id_t irq)
{
omap3_intc_mask_irq(irq);
omap3_intc_ack_irq(irq);
}
l4id_t omap3_intc_read_irq(void *data)
{
unsigned int irq = 0;
if ((irq = (read(PLATFORM_INTC_VBASE + OMAP3_INTC_SIR_IRQ) & 0x7F)))
return irq;
return -1;
}
void omap3_intc_reset(unsigned long base)
{
/* Assert Reset */
write(OMAP_INTC_SOFTRESET, (base + OMAP3_INTC_SYSCONFIG));
/* wait for completion */
while (!(read((base + OMAP3_INTC_SYSSTATUS)) & 0x1));
}
void omap3_intc_init(void)
{
int i;
/* Do Soft-Reset */
omap3_intc_reset(PLATFORM_INTC_VBASE);
/*
* Set All IRQ to IRQ type and
* Priority as 0x0A- some random value
*/
for (i = 0; i < IRQS_MAX; i++)
omap3_intc_set_ilr(PLATFORM_INTC_VBASE, i, (0x0A << 2));
/* Mask(set mask) all interrupts */
for (i = 0; i < IRQS_MAX; i++)
omap3_intc_set_irq_status(PLATFORM_INTC_VBASE,
OMAP3_INTC_MIR_SET, i);
}

View File

@@ -1,10 +1,8 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['mm.c', 'mmu_ops.S', 'mutex.S']
src_local = ['pl190_vic.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -12,24 +12,8 @@
#include <l4/drivers/irq/pl190/pl190_vic.h>
#include <l4/generic/irq.h>
/* FIXME: Fix the stupid uart driver and change to single definition of this! */
#if defined(read)
#undef read
#endif
#if defined(write)
#undef write
#endif
#define read(a) *((volatile unsigned int *)(a))
#define write(v, a) (*((volatile unsigned int *)(a)) = v)
#define setbit(bitvect, a) write(read(a) | (bitvect), a)
#define clrbit(bitvect, a) write(read(a) & ~(bitvect), a)
#define devio(base, reg, bitvect, setclr) \
((setclr) ? setbit(bitvect, (base + reg)) \
: clrbit(bitvect, (base + reg)))
/* Returns the irq number on this chip converting the irq bitvector */
l4id_t pl190_read_irq(void)
l4id_t pl190_read_irq(void *nil)
{
l4id_t irq;
@@ -54,10 +38,10 @@ void pl190_ack_irq(l4id_t irq)
void pl190_unmask_irq(l4id_t irq)
{
setbit(1 << irq, PL190_VIC_INTENABLE);
setbit((unsigned int *)PL190_VIC_INTENABLE, (1 << irq));
}
l4id_t pl190_sic_read_irq(void)
l4id_t pl190_sic_read_irq(void *nil)
{
l4id_t irq;
@@ -79,7 +63,7 @@ void pl190_sic_ack_irq(l4id_t irq)
void pl190_sic_unmask_irq(l4id_t irq)
{
setbit(1 << irq, PL190_SIC_ENSET);
setbit((unsigned int *)PL190_SIC_ENSET, (1 << irq));
}
/* Initialises the primary and secondary interrupt controllers */

View File

@@ -0,0 +1,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['timer.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,133 @@
/*
* omap GP timer driver honoring generic
* timer library API
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include <l4/drivers/timer/omap/timer.h>
#include INC_ARCH(io.h)
#define OMAP_TIMER_MAT_IT_FLAG (1 << 0)
#define OMAP_TIMER_OVR_IT_FLAG (1 << 1)
#define OMAP_TIMER_TCAR_IT_FLAG (1 << 2)
void timer_irq_clear(unsigned long timer_base)
{
volatile u32 reg = read(timer_base + OMAP_TIMER_TISR);
reg |= OMAP_TIMER_OVR_IT_FLAG;
write(reg, timer_base + OMAP_TIMER_TISR);
}
u32 timer_periodic_intr_status(unsigned long timer_base)
{
volatile u32 reg = read(timer_base + OMAP_TIMER_TISR);
return (reg & OMAP_TIMER_OVR_IT_FLAG);
}
#define OMAP_TIMER_SOFT_RESET (1 << 1)
void timer_reset(unsigned long timer_base)
{
/* Reset Timer */
write(OMAP_TIMER_SOFT_RESET, timer_base + OMAP_TIMER_TIOCP);
/* Wait for reset completion */
while (!read(timer_base + OMAP_TIMER_TSTAT));
}
void timer_load(unsigned long timer_base, u32 value)
{
write(value, timer_base + OMAP_TIMER_TLDR);
write(value, timer_base + OMAP_TIMER_TCRR);
}
u32 timer_read(unsigned long timer_base)
{
return read(timer_base + OMAP_TIMER_TCRR);
}
#define OMAP_TIMER_START (1 << 0)
void timer_start(unsigned long timer_base)
{
volatile u32 reg = read(timer_base + OMAP_TIMER_TCLR);
reg |= OMAP_TIMER_START;
write(reg, timer_base + OMAP_TIMER_TCLR);
}
void timer_stop(unsigned long timer_base)
{
volatile u32 reg = read(timer_base + OMAP_TIMER_TCLR);
reg &= (~OMAP_TIMER_START);
write(reg, timer_base + OMAP_TIMER_TCLR);
}
/*
* Beagle Board RevC manual:
* overflow period = (0xffffffff - TLDR + 1)*PS*(1/TIMER_FCLK)
* where,
* PS: Prescaler divisor (we are not using this)
*
* Beagle board manual says, 26MHz oscillator present on board.
* U-Boot divides the sys_clock by 2 if sys_clk is >19MHz,
* so,we have sys_clk frequency = 13MHz
*
* TIMER_FCLK = 13MHz
*
*/
void timer_init_oneshot(unsigned long timer_base)
{
volatile u32 reg;
/* Reset the timer */
timer_reset(timer_base);
/* Set timer to compare mode */
reg = read(timer_base + OMAP_TIMER_TCLR);
reg |= (1 << OMAP_TIMER_MODE_COMPARE);
write(reg, timer_base + OMAP_TIMER_TCLR);
write(0xffffffff, timer_base + OMAP_TIMER_TMAR);
/* Clear pending Interrupts, if any */
write(7, timer_base + OMAP_TIMER_TISR);
}
void timer_init_periodic(unsigned long timer_base)
{
volatile u32 reg;
/* Reset the timer */
timer_reset(timer_base);
/* Set timer to autoreload mode */
reg = read(timer_base + OMAP_TIMER_TCLR);
reg |= (1 << OMAP_TIMER_MODE_AUTORELAOD);
write(reg, timer_base + OMAP_TIMER_TCLR);
/*
* Beagle Board RevC manual:
* overflow period = (0xffffffff - TLDR + 1)*PS*(1/TIMER_FCLK)
* where,
* PS: Prescaler divisor (we are not using this)
*
* Beagle board manual says, 26MHz oscillator present on board.
* U-Boot divides the sys_clock by 2 if sys_clk is >19MHz,
* so,we have sys_clk frequency = 13MHz
*
* TIMER_FCLK = 13MHz
* So, for 1ms period, TLDR = 0xffffcd38
*
*/
timer_load(timer_base, 0xffffcd38);
/* Clear pending Interrupts, if any */
write(7, timer_base + OMAP_TIMER_TISR);
/* Enable inteerupts */
write((1 << OMAP_TIMER_INTR_OVERFLOW), timer_base + OMAP_TIMER_TIER);
}
void timer_init(unsigned long timer_base)
{
timer_init_periodic(timer_base);
}

View File

@@ -0,0 +1,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['timer.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,78 @@
/*
* SP804 primecell driver honoring generic
* timer library API
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include INC_ARCH(io.h)
#include <l4/drivers/timer/sp804/timer.h>
unsigned long timer_secondary_base(unsigned long timer_base)
{
return timer_base + SP804_SECONDARY_OFFSET;
}
void timer_irq_clear(unsigned long timer_base)
{
write(1, timer_base + SP804_INTCLR);
}
/* Enable timer with its current configuration */
void timer_start(unsigned long timer_base)
{
volatile u32 reg = read(timer_base + SP804_CTRL);
reg |= SP804_ENABLE;
write(reg, timer_base + SP804_CTRL);
}
/* Load the timer with ticks value */
void timer_load(u32 loadval, unsigned long timer_base)
{
write(loadval, timer_base + SP804_LOAD);
}
u32 timer_read(unsigned long timer_base)
{
return read(timer_base + SP804_VALUE);
}
void timer_stop(unsigned long timer_base)
{
write(0, timer_base + SP804_CTRL);
}
void timer_init_periodic(unsigned long timer_base, unsigned int load_value)
{
volatile u32 reg;
/* Periodic, wraparound, 32 bit, irq on wraparound */
reg = SP804_PERIODIC | SP804_32BIT | SP804_IRQEN;
write(reg, timer_base + SP804_CTRL);
/* 1 tick per usec, 1 irq per msec */
if (load_value)
timer_load(load_value, timer_base);
else
timer_load(1000, timer_base);
}
void timer_init_oneshot(unsigned long timer_base)
{
volatile u32 reg = read(timer_base + SP804_CTRL);
/* One shot, 32 bits, no irqs */
reg |= SP804_32BIT | SP804_ONESHOT;
write(reg, timer_base + SP804_CTRL);
}
void timer_init(unsigned long timer_base, unsigned int load_value)
{
timer_init_periodic(timer_base, load_value);
}

View File

@@ -0,0 +1,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['uart.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,214 @@
/*
* UART driver used by OMAP devices
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/drivers/uart/omap/uart.h>
#include INC_ARCH(io.h)
#define OMAP_UART_FIFO_ENABLE (1 << 0)
#define OMAP_UART_RX_FIFO_CLR (1 << 1)
#define OMAP_UART_TX_FIFO_CLR (1 << 2)
static inline void uart_enable_fifo(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_FCR);
reg |= (OMAP_UART_FIFO_ENABLE | OMAP_UART_RX_FIFO_CLR |
OMAP_UART_TX_FIFO_CLR);
write(reg, uart_base + OMAP_UART_FCR);
}
static inline void uart_disable_fifo(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_FCR);
reg &= (~OMAP_UART_FIFO_ENABLE | OMAP_UART_RX_FIFO_CLR |
OMAP_UART_TX_FIFO_CLR);
write(reg, uart_base + OMAP_UART_FCR);
}
#define OMAP_UART_TX_ENABLE (1 << 0)
static inline void uart_enable_tx(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_MCR);
reg |= OMAP_UART_TX_ENABLE;
write(reg, uart_base + OMAP_UART_MCR);
}
static inline void uart_disable_tx(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_MCR);
reg &= ~OMAP_UART_TX_ENABLE;
write(reg, uart_base + OMAP_UART_MCR);
}
#define OMAP_UART_RX_ENABLE (1 << 1)
static inline void uart_enable_rx(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_MCR);
reg |= OMAP_UART_RX_ENABLE;
write(reg, uart_base + OMAP_UART_MCR);
}
static inline void uart_disable_rx(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_MCR);
reg &= ~OMAP_UART_RX_ENABLE;
write(reg, uart_base + OMAP_UART_MCR);
}
#define OMAP_UART_STOP_BITS_MASK (1 << 2)
static inline void uart_set_stop_bits(unsigned long uart_base, int bits)
{
volatile u32 reg = read(uart_base + OMAP_UART_LCR);
reg &= ~OMAP_UART_STOP_BITS_MASK;
reg |= (bits << 2);
write(reg, uart_base + OMAP_UART_LCR);
}
#define OMAP_UART_DATA_BITS_MASK (0x3)
static inline void uart_set_data_bits(unsigned long uart_base, int bits)
{
volatile u32 reg = read(uart_base + OMAP_UART_LCR);
reg &= ~OMAP_UART_DATA_BITS_MASK;
reg |= bits;
write(reg, uart_base + OMAP_UART_LCR);
}
#define OMAP_UART_PARITY_ENABLE (1 << 3)
static inline void uart_enable_parity(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_LCR);
reg |= OMAP_UART_PARITY_ENABLE;
write(reg, uart_base + OMAP_UART_LCR);
}
static inline void uart_disable_parity(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_LCR);
reg &= ~OMAP_UART_PARITY_ENABLE;
write(reg, uart_base + OMAP_UART_LCR);
}
#define OMAP_UART_PARITY_EVEN (1 << 4)
static inline void uart_set_even_parity(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_LCR);
reg |= OMAP_UART_PARITY_EVEN;
write(reg, uart_base + OMAP_UART_LCR);
}
static inline void uart_set_odd_parity(unsigned long uart_base)
{
volatile u32 reg = read(uart_base + OMAP_UART_LCR);
reg &= ~OMAP_UART_PARITY_EVEN;
write(reg, uart_base + OMAP_UART_LCR);
}
static inline void uart_select_mode(unsigned long uart_base, int mode)
{
write(mode, uart_base + OMAP_UART_MDR1);
}
#define OMAP_UART_INTR_EN 1
static inline void uart_enable_interrupt(unsigned long uart_base)
{
write(OMAP_UART_INTR_EN, uart_base + OMAP_UART_IER);
}
static inline void uart_disable_interrupt(unsigned long uart_base)
{
write((~OMAP_UART_INTR_EN), uart_base + OMAP_UART_IER);
}
static inline void uart_set_link_control(unsigned long uart_base, int mode)
{
write(mode, uart_base + OMAP_UART_LCR);
}
#define OMAP_UART_TXFE 0x20
void uart_tx_char(unsigned long uart_base, char c)
{
volatile u32 reg;
/* Check if there is space for tx */
do {
reg = read(uart_base + OMAP_UART_LSR);
} while(!(reg & OMAP_UART_TXFE));
write(c, uart_base + OMAP_UART_THR);
}
#define OMAP_UART_RXFNE 0x1
#define OMAP_UART_RX_FIFO_STATUS 0x8
char uart_rx_char(unsigned long uart_base)
{
volatile u32 reg;
/* Check if pending data is there */
do {
reg = read(uart_base + OMAP_UART_LSR);
} while(!(reg & OMAP_UART_RXFNE));
#if 0
/* Check if there is some error in recieve */
if(reg & OMAP_UART_RX_FIFO_STATUS)
return -1;
#endif
return (char)read(uart_base + OMAP_UART_RHR);
}
void uart_set_baudrate(unsigned long uart_base, u32 baudrate, u32 clkrate)
{
u32 clk_div = 0;
/* 48Mhz clock fixed on beagleboard */
const u32 uartclk = 48000000;
if(clkrate == 0)
clkrate = uartclk;
/* If baud out of range, set default rate */
if(baudrate > 3686400 || baudrate < 300)
baudrate = 115200;
clk_div = clkrate/(16 * baudrate);
/* Set clockrate in DLH and DLL */
write((clk_div & 0xff), uart_base + OMAP_UART_DLL);
write(((clk_div >> 8) & 0xff ), uart_base + OMAP_UART_DLH);
}
void uart_init(unsigned long uart_base)
{
/* Disable UART */
uart_select_mode(uart_base, OMAP_UART_MODE_DEFAULT);
/* Disable interrupts */
uart_disable_interrupt(uart_base);
/* Change to config mode, to set baud divisor */
uart_set_link_control(uart_base, OMAP_UART_BANKED_MODE_CONFIG_A);
/* Set the baud rate */
uart_set_baudrate(uart_base, 115200, 48000000);
/* Switch to operational mode */
uart_set_link_control(uart_base, OMAP_UART_BANKED_MODE_OPERATIONAL);
/* Set up the link- parity, data bits stop bits to 8N1 */
uart_disable_parity(uart_base);
uart_set_data_bits(uart_base, OMAP_UART_DATA_BITS_8);
uart_set_stop_bits(uart_base, OMAP_UART_STOP_BITS_1);
/* Disable Fifos */
uart_disable_fifo(uart_base);
/* Enable modem Rx/Tx */
uart_enable_tx(uart_base);
uart_enable_rx(uart_base);
/* Enable UART in 16x mode */
uart_select_mode(uart_base, OMAP_UART_MODE_UART16X);
}

View File

@@ -0,0 +1,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['uart.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -0,0 +1,298 @@
/*
* PL011 UART driver
*
* Copyright (C) 2009 B Labs Ltd.
*/
#include <l4/drivers/uart/pl011/uart.h>
#include INC_ARCH(io.h)
/* Error status bits in receive status register */
#define PL011_FE (1 << 0)
#define PL011_PE (1 << 1)
#define PL011_BE (1 << 2)
#define PL011_OE (1 << 3)
/* Status bits in flag register */
#define PL011_TXFE (1 << 7)
#define PL011_RXFF (1 << 6)
#define PL011_TXFF (1 << 5)
#define PL011_RXFE (1 << 4)
#define PL011_BUSY (1 << 3)
#define PL011_DCD (1 << 2)
#define PL011_DSR (1 << 1)
#define PL011_CTS (1 << 0)
#define PL011_UARTEN (1 << 0)
static inline void pl011_uart_enable(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTCR));
val |= PL011_UARTEN;
write(val, (base + PL011_UARTCR));
return;
}
static inline void pl011_uart_disable(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTCR));
val &= ~PL011_UARTEN;
write(val, (base + PL011_UARTCR));
return;
}
#define PL011_TXE (1 << 8)
static inline void pl011_tx_enable(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTCR));
val |= PL011_TXE;
write(val, (base + PL011_UARTCR));
return;
}
static inline void pl011_tx_disable(unsigned long base)
{
unsigned int val = 0;
val =read((base + PL011_UARTCR));
val &= ~PL011_TXE;
write(val, (base + PL011_UARTCR));
return;
}
#define PL011_RXE (1 << 9)
static inline void pl011_rx_enable(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTCR));
val |= PL011_RXE;
write(val, (base + PL011_UARTCR));
return;
}
static inline void pl011_rx_disable(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTCR));
val &= ~PL011_RXE;
write(val, (base + PL011_UARTCR));
return;
}
#define PL011_TWO_STOPBITS_SELECT (1 << 3)
static inline void pl011_set_stopbits(unsigned long base, int stopbits)
{
unsigned int val = 0;
val = read((base + PL011_UARTLCR_H));
if(stopbits == 2) { /* Set to two bits */
val |= PL011_TWO_STOPBITS_SELECT;
} else { /* Default is 1 */
val &= ~PL011_TWO_STOPBITS_SELECT;
}
write(val, (base + PL011_UARTLCR_H));
return;
}
#define PL011_PARITY_ENABLE (1 << 1)
static inline void pl011_parity_enable(unsigned long base)
{
unsigned int val = 0;
val = read((base +PL011_UARTLCR_H));
val |= PL011_PARITY_ENABLE;
write(val, (base + PL011_UARTLCR_H));
return;
}
static inline void pl011_parity_disable(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTLCR_H));
val &= ~PL011_PARITY_ENABLE;
write(val, (base + PL011_UARTLCR_H));
return;
}
#define PL011_PARITY_EVEN (1 << 2)
static inline void pl011_set_parity_even(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTLCR_H));
val |= PL011_PARITY_EVEN;
write(val, (base + PL011_UARTLCR_H));
return;
}
static inline void pl011_set_parity_odd(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTLCR_H));
val &= ~PL011_PARITY_EVEN;
write(val, (base + PL011_UARTLCR_H));
return;
}
#define PL011_ENABLE_FIFOS (1 << 4)
static inline void pl011_enable_fifos(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTLCR_H));
val |= PL011_ENABLE_FIFOS;
write(val, (base + PL011_UARTLCR_H));
return;
}
static inline void pl011_disable_fifos(unsigned long base)
{
unsigned int val = 0;
val = read((base + PL011_UARTLCR_H));
val &= ~PL011_ENABLE_FIFOS;
write(val, (base + PL011_UARTLCR_H));
return;
}
/* Sets the transfer word width for the data register. */
static inline void pl011_set_word_width(unsigned long base, int size)
{
unsigned int val = 0;
if(size < 5 || size > 8) /* Default is 8 */
size = 8;
/* Clear size field */
val = read((base + PL011_UARTLCR_H));
val &= ~(0x3 << 5);
write(val, (base + PL011_UARTLCR_H));
/*
* The formula is to write 5 less of size given:
* 11 = 8 bits
* 10 = 7 bits
* 01 = 6 bits
* 00 = 5 bits
*/
val = read((base + PL011_UARTLCR_H));
val |= (size - 5) << 5;
write(val, (base + PL011_UARTLCR_H));
return;
}
/*
* Defines at which level of fifo fullness an irq will be generated.
* @xfer: tx fifo = 0, rx fifo = 1
* @level: Generate irq if:
* 0 rxfifo >= 1/8 full txfifo <= 1/8 full
* 1 rxfifo >= 1/4 full txfifo <= 1/4 full
* 2 rxfifo >= 1/2 full txfifo <= 1/2 full
* 3 rxfifo >= 3/4 full txfifo <= 3/4 full
* 4 rxfifo >= 7/8 full txfifo <= 7/8 full
* 5-7 reserved reserved
*/
static inline void pl011_set_irq_fifolevel(unsigned long base, \
unsigned int xfer, unsigned int level)
{
if(xfer != 1 && xfer != 0) /* Invalid fifo */
return;
if(level > 4) /* Invalid level */
return;
write(level << (xfer * 3), (base + PL011_UARTIFLS));
return;
}
void uart_tx_char(unsigned long base, char c)
{
unsigned int val = 0;
do {
val = read((base + PL011_UARTFR));
} while (val & PL011_TXFF); /* TX FIFO FULL */
write(c, (base + PL011_UARTDR));
}
char uart_rx_char(unsigned long base)
{
unsigned int val = 0;
do {
val = read(base + PL011_UARTFR);
} while (val & PL011_RXFE); /* RX FIFO Empty */
return (char)read((base + PL011_UARTDR));
}
/*
* Sets the baud rate in kbps. It is recommended to use
* standard rates such as: 1200, 2400, 3600, 4800, 7200,
* 9600, 14400, 19200, 28800, 38400, 57600 76800, 115200.
*/
void pl011_set_baudrate(unsigned long base, unsigned int baud,
unsigned int clkrate)
{
const unsigned int uartclk = 24000000; /* 24Mhz clock fixed on pb926 */
unsigned int val = 0, ipart = 0, fpart = 0;
/* Use default pb926 rate if no rate is supplied */
if (clkrate == 0)
clkrate = uartclk;
if (baud > 115200 || baud < 1200)
baud = 38400; /* Default rate. */
/* 24000000 / (38400 * 16) */
ipart = 39;
write(ipart, base + PL011_UARTIBRD);
write(fpart, base + PL011_UARTFBRD);
/*
* For the IBAUD and FBAUD to update, we need to
* write to UARTLCR_H because the 3 registers are
* actually part of a single register in hardware
* which only updates by a write to UARTLCR_H
*/
val = read(base + PL011_UARTLCR_H);
write(val, base + PL011_UARTLCR_H);
}
void uart_init(unsigned long uart_base)
{
/* Initialise data register for 8 bit data read/writes */
pl011_set_word_width(uart_base, 8);
/*
* Fifos are disabled because by default it is assumed the port
* will be used as a user terminal, and in that case the typed
* characters will only show up when fifos are flushed, rather than
* when each character is typed. We avoid this by not using fifos.
*/
pl011_disable_fifos(uart_base);
/* Set default baud rate of 38400 */
pl011_set_baudrate(uart_base, 38400, 24000000);
/* Set default settings of 1 stop bit, no parity, no hw flow ctrl */
pl011_set_stopbits(uart_base, 1);
pl011_parity_disable(uart_base);
/* Enable rx, tx, and uart chip */
pl011_tx_enable(uart_base);
pl011_rx_enable(uart_base);
pl011_uart_enable(uart_base);
}

View File

@@ -4,7 +4,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'space.c', 'bootmem.c', 'resource.c', 'container.c', 'capability.c', 'cinfo.c']
src_local = ['irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'space.c', 'bootmem.c', 'resource.c', 'container.c', 'capability.c', 'cinfo.c', 'debug.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -15,10 +15,8 @@
* Increase this size if bootmem allocations fail.
*/
#define BOOTMEM_SIZE (SZ_4K * 4)
SECTION(".init.pgd") pgd_table_t init_pgd;
SECTION(".init.bootmem") char bootmem[BOOTMEM_SIZE];
__initdata struct address_space init_space;
struct address_space init_space;
static unsigned long cursor = (unsigned long)&bootmem;

View File

@@ -16,6 +16,7 @@
#include <l4/api/exregs.h>
#include <l4/api/ipc.h>
#include <l4/api/irq.h>
#include <l4/api/cache.h>
#include INC_GLUE(message.h)
#include INC_GLUE(ipc.h)
@@ -584,6 +585,7 @@ struct capability *cap_match_mem(struct capability *cap,
{
struct sys_map_args *args = args_ptr;
struct ktcb *target = args->task;
unsigned long long start, end, pfn_point;
unsigned long pfn;
unsigned int perms;
@@ -593,27 +595,45 @@ struct capability *cap_match_mem(struct capability *cap,
else
pfn = __pfn(args->virt);
/* Check range */
if (cap->start > pfn || cap->end < pfn + args->npages)
/* Long long range check to avoid overflow */
start = cap->start;
end = cap->end;
pfn_point = pfn;
if (start > pfn_point || cap->end < pfn_point + args->npages)
return 0;
/* Check permissions */
switch (args->flags) {
case MAP_USR_RW_FLAGS:
case MAP_USR_RW:
perms = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_CACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_USR_RO_FLAGS:
case MAP_USR_RWX:
perms = CAP_MAP_READ | CAP_MAP_WRITE |
CAP_MAP_EXEC | CAP_MAP_CACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_USR_RO:
perms = CAP_MAP_READ | CAP_MAP_CACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_USR_IO_FLAGS:
case MAP_USR_RX:
perms = CAP_MAP_READ | CAP_MAP_EXEC | CAP_MAP_CACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_USR_IO:
perms = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_UNCACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_UNMAP: /* Check for unmap syscall */
if (!(cap->access & CAP_MAP_UNMAP))
return 0;
break;
default:
/* Anything else is an invalid/unrecognised argument */
return 0;
@@ -760,6 +780,53 @@ struct capability *cap_match_irqctrl(struct capability *cap,
return cap;
}
struct sys_cache_args {
unsigned long start;
unsigned long npages;
unsigned int flags;
};
struct capability *cap_match_cache(struct capability *cap, void *args_ptr)
{
struct sys_cache_args *args = args_ptr;
unsigned long pfn = __pfn(args->start);
unsigned long long start, end, pfn_point;
unsigned int perms;
/* Long long range check to avoid overflow */
start = cap->start;
end = cap->end;
pfn_point = pfn;
if (start > pfn_point || end < pfn_point + args->npages)
return 0;
/* Check permissions */
switch (args->flags) {
/* check for cache functionality flags */
case L4_INVALIDATE_DCACHE:
case L4_INVALIDATE_ICACHE:
case L4_INVALIDATE_TLB:
perms = CAP_CACHE_INVALIDATE;
if ((cap->access & perms) != perms)
return 0;
break;
case L4_CLEAN_DCACHE:
case L4_CLEAN_INVALIDATE_DCACHE:
perms = CAP_CACHE_CLEAN;
if ((cap->access & perms) != perms)
return 0;
break;
default:
/* Anything else is an invalid/unrecognised argument */
return 0;
}
return cap;
}
#if defined(CONFIG_CAPABILITIES)
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
{
@@ -813,6 +880,26 @@ int cap_map_check(struct ktcb *target, unsigned long phys, unsigned long virt,
return 0;
}
int cap_unmap_check(struct ktcb *target, unsigned long virt,
unsigned long npages)
{
struct capability *virtmem;
/* Unmap check also uses identical struct as map check */
struct sys_map_args args = {
.task = target,
.virt = virt,
.npages = npages,
.flags = MAP_UNMAP,
};
if (!(virtmem = cap_find(current, cap_match_mem,
&args, CAP_TYPE_MAP_VIRTMEM)))
return -ENOCAP;
return 0;
}
/*
* Limitation: We currently only check from sender's
* perspective. This is because sender always targets a
@@ -906,6 +993,30 @@ int cap_irq_check(struct ktcb *registrant, unsigned int req,
return 0;
}
/*
* This is just a wrapper call for l4_cache_control
* system call sanity check
*/
int cap_cache_check(unsigned long start, unsigned long end, unsigned int flags)
{
struct capability *virtmem;
struct sys_cache_args args = {
.start = start,
.npages = __pfn(end) - __pfn(start),
.flags = flags,
};
/*
* We just want to check if the virtual memory region
* concerned here has
* appropriate permissions for cache calls
*/
if (!(virtmem = cap_find(current, cap_match_cache,
&args, CAP_TYPE_MAP_VIRTMEM)))
return -ENOCAP;
return 0;
}
#else /* Meaning !CONFIG_CAPABILITIES */
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
@@ -930,6 +1041,12 @@ int cap_map_check(struct ktcb *task, unsigned long phys, unsigned long virt,
return 0;
}
int cap_unmap_check(struct ktcb *target, unsigned long virt,
unsigned long npages)
{
return 0;
}
int cap_exregs_check(struct ktcb *task, struct exregs_data *exregs)
{
return 0;
@@ -948,4 +1065,9 @@ int cap_irq_check(struct ktcb *registrant, unsigned int req,
return 0;
}
int cap_cache_check(unsigned long start, unsigned long end,
unsigned int flags)
{
return 0;
}
#endif /* End of !CONFIG_CAPABILITIES */

View File

@@ -8,8 +8,10 @@
#include <l4/generic/capability.h>
#include <l4/generic/cap-types.h>
#include <l4/generic/bootmem.h>
#include <l4/generic/thread.h>
#include <l4/api/errno.h>
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_SUBARCH(mm.h)
#include INC_ARCH(linker.h)
@@ -83,22 +85,16 @@ struct container *container_find(struct kernel_resources *kres, l4id_t cid)
* This involves setting up pager's ktcb, space, utcb,
* all ids, registers, and mapping its (perhaps) first
* few pages in order to make it runnable.
*
* The first pager initialization is a special-case
* since it uses the current kernel pgd.
*/
int init_pager(struct pager *pager,
struct container *cont,
pgd_table_t *current_pgd)
int init_pager(struct pager *pager, struct container *cont)
{
struct ktcb *task;
struct address_space *space;
int first = !!current_pgd;
/*
* Set up dummy current cap_list so that cap accounting
* can be done to this pager. Note, that we're still on
* bootstack.
* idle task stack.
*/
cap_list_move(&current->cap_list, &pager->cap_list);
@@ -108,25 +104,8 @@ int init_pager(struct pager *pager,
/* New ktcb allocation is needed */
task = tcb_alloc_init(cont->cid);
/* If first, manually allocate/initalize space */
if (first) {
if (!(space = alloc_space()))
return -ENOMEM;
/* Set up space id */
space->spid = id_new(&kernel_resources.space_ids);
/* Initialize space structure */
link_init(&space->list);
mutex_init(&space->lock);
cap_list_init(&space->cap_list);
space->pgd = current_pgd;
address_space_attach(task, space);
} else {
/* Otherwise allocate conventionally */
space = address_space_create(0);
address_space_attach(task, space);
}
space = address_space_create(0);
address_space_attach(task, space);
/* Initialize ktcb */
task_init_registers(task, pager->start_address);
@@ -136,6 +115,9 @@ int init_pager(struct pager *pager,
task->tgid = task->tid;
task->container = cont;
/* Set cpu affinity */
thread_setup_affinity(task);
/* Add the address space to container space list */
address_space_add(task->space);
@@ -148,7 +130,7 @@ int init_pager(struct pager *pager,
/* Map the task's space */
add_mapping_pgd(pager->start_lma, pager->start_vma,
page_align_up(pager->memsize),
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
MAP_USR_RWX, TASK_PGD(task));
/* Move capability list from dummy to task's space cap list */
cap_list_move(&task->space->cap_list, &current->cap_list);
@@ -162,6 +144,7 @@ int init_pager(struct pager *pager,
/* Container list that keeps all tasks */
tcb_add(task);
return 0;
}
@@ -256,24 +239,15 @@ int update_dynamic_capids(struct kernel_resources *kres)
* Initialize all containers with their initial set of tasks,
* spaces, scheduler parameters such that they can be started.
*/
int container_init_pagers(struct kernel_resources *kres,
pgd_table_t *current_pgd)
int container_init_pagers(struct kernel_resources *kres)
{
struct container *cont;
struct pager *pager;
int first = 1;
list_foreach_struct(cont, &kres->containers.list, list) {
for (int i = 0; i < cont->npagers; i++) {
pager = &cont->pager[i];
/* First pager initializes specially */
if (first) {
init_pager(pager, cont, current_pgd);
first = 0;
} else {
init_pager(pager, cont, 0);
}
init_pager(pager, cont);
}
}

126
src/generic/debug.c Normal file
View File

@@ -0,0 +1,126 @@
/*
* Basic debug information about the kernel
*
* Copyright (C) 2010 B Labs Ltd.
*
* Written by Bahadir Balban
*/
#include <l4/lib/printk.h>
#include <l4/generic/debug.h>
#include INC_SUBARCH(cpu.h)
#include <l4/generic/platform.h>
#if defined (CONFIG_DEBUG_ACCOUNTING)
struct system_accounting system_accounting;
void system_accounting_print(struct system_accounting *sys_acc)
{
printk("System Operations Accounting:\n\n");
printk("System calls:\n");
printk("=============\n");
printk("IPC: %llu\n", sys_acc->syscalls.ipc);
printk("Thread Switch: %llu\n", sys_acc->syscalls.tswitch);
printk("Thread Control: %llu\n", sys_acc->syscalls.tctrl);
printk("Exchange Registers: %llu\n", sys_acc->syscalls.exregs);
printk("Unmap: %llu\n", sys_acc->syscalls.unmap);
printk("Irq Control: %llu\n", sys_acc->syscalls.irqctrl);
printk("Map: %llu\n", sys_acc->syscalls.map);
printk("Getid: %llu\n", sys_acc->syscalls.getid);
printk("Capability Control: %llu\n", sys_acc->syscalls.capctrl);
printk("Time: %llu\n", sys_acc->syscalls.time);
printk("Mutex Control: %llu\n", sys_acc->syscalls.mutexctrl);
printk("Cache Control: %llu\n", sys_acc->syscalls.cachectrl);
printk("\nExceptions:\n");
printk("===========\n");
printk("System call: %llu\n", sys_acc->exceptions.syscall);
printk("Data Abort: %llu\n", sys_acc->exceptions.data_abort);
printk("Prefetch Abort: %llu\n", sys_acc->exceptions.prefetch_abort);
printk("Irq: %llu\n", sys_acc->exceptions.irq);
printk("Undef Abort: %llu\n", sys_acc->exceptions.undefined_abort);
printk("Context Switch: %llu\n", sys_acc->task_ops.context_switch);
printk("Space Switch: %llu\n", sys_acc->task_ops.space_switch);
printk("\nCache operations:\n");
}
#endif
/*
* For spinlock debugging
*/
#if defined (CONFIG_DEBUG_SPINLOCKS)
#include <l4/lib/bit.h>
#define DEBUG_SPINLOCK_TOTAL 10
DECLARE_PERCPU(static unsigned long, held_lock_array[DEBUG_SPINLOCK_TOTAL]);
DECLARE_PERCPU(static u32, held_lock_bitmap);
void spin_lock_record_check(void *lock_addr)
{
int bit = 0;
/*
* Check if we already hold this lock
*/
for (int i = 0; i < DEBUG_SPINLOCK_TOTAL; i++) {
if (per_cpu(held_lock_array[i]) == (unsigned long)lock_addr) {
print_early("Spinlock already held.\n");
printk("lock_addr=%p\n", lock_addr);
BUG();
}
}
/*
* Add it as a new lock
*/
bit = find_and_set_first_free_bit(&per_cpu(held_lock_bitmap),
DEBUG_SPINLOCK_TOTAL);
per_cpu(held_lock_array[bit]) = (unsigned long)lock_addr;
}
void spin_unlock_delete_check(void *lock_addr)
{
/*
* Check if already unlocked
*/
if (*((unsigned int *)lock_addr) == 0) {
print_early("Spinlock already unlocked.");
BUG();
}
/*
* Search for the value
*/
for (int i = 0; i < DEBUG_SPINLOCK_TOTAL; i++) {
if (per_cpu(held_lock_array[i]) == (unsigned long)lock_addr) {
/*
* Delete its entry
*/
per_cpu(held_lock_array[i]) = 0;
BUG_ON(check_and_clear_bit(&per_cpu(held_lock_bitmap),
i) < 0);
return;
}
}
/*
* It must have been recorded
*/
BUG();
}
#endif

View File

@@ -6,6 +6,7 @@
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/debug.h>
#include <l4/generic/platform.h>
#include <l4/generic/tcb.h>
#include <l4/generic/irq.h>
@@ -57,7 +58,8 @@ static inline void cascade_irq_chip(struct irq_chip *this_chip)
{
if (this_chip->cascade >= 0) {
BUG_ON(IRQ_CHIPS_MAX == 1);
this_chip->ops.unmask(this_chip->cascade);
if(this_chip->ops.unmask)
this_chip->ops.unmask(this_chip->cascade);
}
}
@@ -69,7 +71,8 @@ void irq_controllers_init(void)
this_chip = irq_chip_array + i;
/* Initialise the irq chip (e.g. reset all registers) */
this_chip->ops.init();
if (this_chip->ops.init)
this_chip->ops.init();
/* Enable cascaded irq on this chip if it exists */
cascade_irq_chip(this_chip);
@@ -98,8 +101,10 @@ l4id_t global_irq_index(void)
this_chip = irq_chip_array + i;
/* Find local irq that is triggered on this chip */
BUG_ON((irq_index =
this_chip->ops.read_irq()) == IRQ_NIL);
if (this_chip->ops.read_irq) {
irq_index = this_chip->ops.read_irq(this_chip->data);
BUG_ON(irq_index == IRQ_NIL);
}
/* See if this irq is a cascaded irq */
if (irq_index == this_chip->cascade)
@@ -127,6 +132,8 @@ void do_irq(void)
l4id_t irq_index = global_irq_index();
struct irq_desc *this_irq = irq_desc_array + irq_index;
system_account_irq();
/*
* Note, this can be easily done a few instructions
* quicker by some immediate read/disable/enable_all().

View File

@@ -8,9 +8,11 @@
#include <l4/generic/container.h>
#include <l4/generic/resource.h>
#include <l4/generic/bootmem.h>
#include <l4/generic/platform.h>
#include <l4/lib/math.h>
#include <l4/lib/memcache.h>
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_ARCH(linker.h)
#include INC_PLAT(platform.h)
#include <l4/api/errno.h>
@@ -132,22 +134,28 @@ void free_pmd(void *addr)
BUG_ON(mem_cache_free(kernel_resources.pmd_cache, addr) < 0);
}
void free_space(void *addr)
void free_space(void *addr, struct ktcb *task)
{
struct capability *cap;
BUG_ON(!(cap = capability_find_by_rtype(current,
BUG_ON(!(cap = capability_find_by_rtype(task,
CAP_RTYPE_SPACEPOOL)));
capability_free(cap, 1);
BUG_ON(mem_cache_free(kernel_resources.space_cache, addr) < 0);
}
void free_ktcb(void *addr)
/*
* Account it to pager, but if it doesn't exist,
* to current idle task
*/
void free_ktcb(void *addr, struct ktcb *acc_task)
{
struct capability *cap;
BUG_ON(!(cap = capability_find_by_rtype(current,
/* Account it to task's pager if it exists */
BUG_ON(!(cap = capability_find_by_rtype(acc_task,
CAP_RTYPE_THREADPOOL)));
capability_free(cap, 1);
@@ -449,8 +457,8 @@ void init_kernel_resources(struct kernel_resources *kres)
/* Set up total physical memory as single capability */
physmem = alloc_bootmem(sizeof(*physmem), 0);
physmem->start = __pfn(PHYS_MEM_START);
physmem->end = __pfn(PHYS_MEM_END);
physmem->start = __pfn(PLATFORM_PHYS_MEM_START);
physmem->end = __pfn(PLATFORM_PHYS_MEM_END);
link_init(&physmem->list);
cap_list_insert(physmem, &kres->physmem_free);
@@ -599,14 +607,7 @@ void setup_kernel_resources(struct boot_resources *bootres,
{
struct capability *cap;
struct container *container;
pgd_table_t *current_pgd;
/*
* See how many containers we have. Assign next
* unused container id for kernel resources
*/
kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
// kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
//pgd_table_t *current_pgd;
/* First initialize the list of non-memory capabilities */
cap = boot_capability_create();
@@ -646,11 +647,34 @@ void setup_kernel_resources(struct boot_resources *bootres,
* since we want to avoid allocating an uncertain
* amount of memory from the boot allocators.
*/
current_pgd = realloc_page_tables();
// current_pgd = arch_realloc_page_tables();
/* Move it back */
cap_list_move(&kres->non_memory_caps, &current->cap_list);
/*
* Setting up ids used internally.
*
* See how many containers we have. Assign next
* unused container id for kernel resources
*/
kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
// kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
/*
* Assign thread and space ids to current which will later
* become the idle task
*/
current->tid = id_new(&kres->ktcb_ids);
current->space->spid = id_new(&kres->space_ids);
/*
* Init per-cpu zombie lists
*/
for (int i = 0; i < CONFIG_NCPU; i++)
init_ktcb_list(&per_cpu_byid(kres->zombie_list, i));
/*
* Create real containers from compile-time created
* cinfo structures
@@ -667,8 +691,7 @@ void setup_kernel_resources(struct boot_resources *bootres,
}
/* Initialize pagers */
container_init_pagers(kres, current_pgd);
container_init_pagers(kres);
}
/*
@@ -703,11 +726,11 @@ struct mem_cache *init_resource_cache(int nstruct, int struct_size,
add_boot_mapping(__pfn_to_addr(cap->start),
virtual,
page_align_up(bufsize),
MAP_SVC_RW_FLAGS);
MAP_KERN_RW);
} else {
add_mapping_pgd(__pfn_to_addr(cap->start),
virtual, page_align_up(bufsize),
MAP_SVC_RW_FLAGS, &init_pgd);
MAP_KERN_RW, &init_pgd);
}
/* Unmap area from memcap */
memcap_unmap_range(cap, &kres->physmem_free,
@@ -791,12 +814,11 @@ void init_resource_allocators(struct boot_resources *bootres,
kres, 0);
/* Count boot pmds used so far and add them */
bootres->nkpmds += pgd_count_pmds(&init_pgd);
bootres->nkpmds += pgd_count_boot_pmds();
/*
* Calculate maximum possible pmds
* that may be used during this pmd
* cache init and add them.
* Calculate maximum possible pmds that may be used
* during this pmd cache initialization and add them.
*/
bootres->nkpmds += ((bootres->npmds * PMD_SIZE) / PMD_MAP_SIZE);
if (!is_aligned(bootres->npmds * PMD_SIZE,

View File

@@ -15,60 +15,71 @@
#include <l4/generic/container.h>
#include <l4/generic/preempt.h>
#include <l4/generic/thread.h>
#include <l4/generic/debug.h>
#include <l4/generic/irq.h>
#include <l4/generic/tcb.h>
#include <l4/api/errno.h>
#include <l4/api/kip.h>
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(init.h)
#include INC_PLAT(platform.h)
#include INC_ARCH(exception.h)
#include INC_SUBARCH(irq.h)
struct scheduler scheduler;
DECLARE_PERCPU(struct scheduler, scheduler);
/* This is incremented on each irq or voluntarily by preempt_disable() */
extern unsigned int current_irq_nest_count;
DECLARE_PERCPU(extern unsigned int, current_irq_nest_count);
/* This ensures no scheduling occurs after voluntary preempt_disable() */
static int voluntary_preempt = 0;
DECLARE_PERCPU(static int, voluntary_preempt);
void sched_lock_runqueues(unsigned long *irqflags)
void sched_lock_runqueues(struct scheduler *sched, unsigned long *irqflags)
{
spin_lock_irq(&scheduler.sched_rq[0].lock, irqflags);
spin_lock(&scheduler.sched_rq[1].lock);
spin_lock_irq(&sched->sched_rq[0].lock, irqflags);
spin_lock(&sched->sched_rq[1].lock);
BUG_ON(irqs_enabled());
}
void sched_unlock_runqueues(unsigned long irqflags)
void sched_unlock_runqueues(struct scheduler *sched, unsigned long irqflags)
{
spin_unlock(&scheduler.sched_rq[1].lock);
spin_unlock_irq(&scheduler.sched_rq[0].lock, irqflags);
spin_unlock(&sched->sched_rq[1].lock);
spin_unlock_irq(&sched->sched_rq[0].lock, irqflags);
}
int preemptive()
{
return current_irq_nest_count == 0;
return per_cpu(current_irq_nest_count) == 0;
}
int preempt_count()
{
return current_irq_nest_count;
return per_cpu(current_irq_nest_count);
}
#if !defined(CONFIG_PREEMPT_DISABLE)
void preempt_enable(void)
{
voluntary_preempt--;
current_irq_nest_count--;
per_cpu(voluntary_preempt)--;
per_cpu(current_irq_nest_count)--;
}
/* A positive irq nest count implies current context cannot be preempted. */
void preempt_disable(void)
{
current_irq_nest_count++;
voluntary_preempt++;
per_cpu(current_irq_nest_count)++;
per_cpu(voluntary_preempt)++;
}
#else /* End of !CONFIG_PREEMPT_DISABLE */
void preempt_enable(void) { }
void preempt_disable(void) { }
#endif /* CONFIG_PREEMPT_DISABLE */
int in_irq_context(void)
{
/*
@@ -76,13 +87,15 @@ int in_irq_context(void)
* one more than all preempt_disable()'s which are
* counted by voluntary_preempt.
*/
return (current_irq_nest_count == (voluntary_preempt + 1));
return (per_cpu(current_irq_nest_count) ==
(per_cpu(voluntary_preempt) + 1));
}
int in_nested_irq_context(void)
{
/* Deducing voluntary preemptions we get real irq nesting */
return (current_irq_nest_count - voluntary_preempt) > 1;
return (per_cpu(current_irq_nest_count) -
per_cpu(voluntary_preempt)) > 1;
}
int in_process_context(void)
@@ -90,36 +103,24 @@ int in_process_context(void)
return !in_irq_context();
}
/*
* In current implementation, if all task are asleep it is considered
* a bug. We use idle_task() to investigate.
*
* In the future, it will be natural that all tasks may be asleep,
* so this will change to something such as a Wait-for-Interrupt
* routine.
*/
void idle_task(void)
void sched_init_runqueue(struct scheduler *sched, struct runqueue *rq)
{
printk("Idle task.\n");
while(1);
}
void sched_init_runqueue(struct runqueue *rq)
{
memset(rq, 0, sizeof(struct runqueue));
link_init(&rq->task_list);
spin_lock_init(&rq->lock);
rq->sched = sched;
}
void sched_init(struct scheduler *scheduler)
void sched_init()
{
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
sched_init_runqueue(&scheduler->sched_rq[i]);
struct scheduler *sched = &per_cpu(scheduler);
scheduler->rq_runnable = &scheduler->sched_rq[0];
scheduler->rq_expired = &scheduler->sched_rq[1];
scheduler->prio_total = TASK_PRIO_TOTAL;
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
sched_init_runqueue(sched, &sched->sched_rq[i]);
sched->rq_runnable = &sched->sched_rq[0];
sched->rq_expired = &sched->sched_rq[1];
sched->prio_total = TASK_PRIO_TOTAL;
sched->idle_task = current;
}
/* Swap runnable and expired runqueues. */
@@ -127,12 +128,12 @@ static void sched_rq_swap_runqueues(void)
{
struct runqueue *temp;
BUG_ON(list_empty(&scheduler.rq_expired->task_list));
BUG_ON(list_empty(&per_cpu(scheduler).rq_expired->task_list));
/* Queues are swapped and expired list becomes runnable */
temp = scheduler.rq_runnable;
scheduler.rq_runnable = scheduler.rq_expired;
scheduler.rq_expired = temp;
temp = per_cpu(scheduler).rq_runnable;
per_cpu(scheduler).rq_runnable = per_cpu(scheduler).rq_expired;
per_cpu(scheduler).rq_expired = temp;
}
/* Set policy on where to add tasks in the runqueue */
@@ -143,39 +144,45 @@ static void sched_rq_swap_runqueues(void)
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
{
unsigned long irqflags;
struct scheduler *sched =
&per_cpu_byid(scheduler, task->affinity);
BUG_ON(!list_empty(&task->rq_list));
sched_lock_runqueues(&irqflags);
/* Lock that particular cpu's runqueue set */
sched_lock_runqueues(sched, &irqflags);
if (front)
list_insert(&task->rq_list, &rq->task_list);
else
list_insert_tail(&task->rq_list, &rq->task_list);
rq->total++;
task->rq = rq;
sched_unlock_runqueues(irqflags);
/* Unlock that particular cpu's runqueue set */
sched_unlock_runqueues(sched, irqflags);
}
/* Helper for removing a task from its runqueue. */
static inline void sched_rq_remove_task(struct ktcb *task)
{
struct runqueue *rq;
unsigned long irqflags;
struct scheduler *sched =
&per_cpu_byid(scheduler, task->affinity);
sched_lock_runqueues(&irqflags);
sched_lock_runqueues(sched, &irqflags);
/*
* We must lock both, otherwise rqs may swap and
* we may get the wrong rq.
*/
rq = task->rq;
BUG_ON(list_empty(&task->rq_list));
list_remove_init(&task->rq_list);
task->rq = 0;
rq->total--;
BUG_ON(rq->total < 0);
sched_unlock_runqueues(irqflags);
task->rq->total--;
BUG_ON(task->rq->total < 0);
task->rq = 0;
sched_unlock_runqueues(sched, irqflags);
}
@@ -209,7 +216,8 @@ void sched_resume_sync(struct ktcb *task)
BUG_ON(task == current);
task->state = TASK_RUNNABLE;
sched_rq_add_task(task,
scheduler.rq_runnable,
per_cpu_byid(scheduler,
task->affinity).rq_runnable,
RQ_ADD_FRONT);
schedule();
}
@@ -224,38 +232,10 @@ void sched_resume_async(struct ktcb *task)
{
task->state = TASK_RUNNABLE;
sched_rq_add_task(task,
scheduler.rq_runnable,
per_cpu_byid(scheduler,
task->affinity).rq_runnable,
RQ_ADD_FRONT);
}
/* Same as suspend, task state and flags are different */
void sched_exit_sync(void)
{
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_DEAD;
current->flags &= ~TASK_EXITING;
if (current->pagerid != current->tid)
wake_up(&current->wqh_pager, 0);
preempt_enable();
schedule();
}
void sched_exit_async(void)
{
preempt_disable();
sched_rq_remove_task(current);
current->state = TASK_DEAD;
current->flags &= ~TASK_EXITING;
if (current->pagerid != current->tid)
wake_up(&current->wqh_pager, 0);
preempt_enable();
need_resched = 1;
// printk("CPU%d: Resuming task %d with affinity %d\n", smp_get_cpuid(), task->tid, task->affinity);
}
/*
@@ -291,23 +271,25 @@ void sched_suspend_async(void)
}
extern void arch_switch(struct ktcb *cur, struct ktcb *next);
extern void arch_context_switch(struct ktcb *cur, struct ktcb *next);
static inline void context_switch(struct ktcb *next)
{
struct ktcb *cur = current;
//printk("(%d) to (%d)\n", cur->tid, next->tid);
// printk("Core:%d (%d) to (%d)\n", smp_get_cpuid(), cur->tid, next->tid);
system_account_context_switch();
/* Flush caches and everything */
arch_hardware_flush(TASK_PGD(next));
if (current->space->spid != next->space->spid)
arch_space_switch(next);
/* Update utcb region for next task */
task_update_utcb(next);
/* Switch context */
arch_switch(cur, next);
arch_context_switch(cur, next);
// printk("Returning from yield. Tid: (%d)\n", cur->tid);
}
@@ -321,7 +303,7 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
BUG_ON(prio_total < task->priority);
BUG_ON(prio_total == 0);
return task->ticks_assigned =
SCHED_TICKS * task->priority / prio_total;
CONFIG_SCHED_TICKS * task->priority / prio_total;
}
@@ -360,11 +342,11 @@ void schedule()
/* Should not schedule with preemption
* disabled or in nested irq */
BUG_ON(voluntary_preempt);
BUG_ON(per_cpu(voluntary_preempt));
BUG_ON(in_nested_irq_context());
/* Should not have more ticks than SCHED_TICKS */
BUG_ON(current->ticks_left > SCHED_TICKS);
BUG_ON(current->ticks_left > CONFIG_SCHED_TICKS);
/* If coming from process path, cannot have
* any irqs that schedule after this */
@@ -378,16 +360,17 @@ void schedule()
sched_rq_remove_task(current);
if (current->ticks_left)
sched_rq_add_task(current,
scheduler.rq_runnable,
per_cpu(scheduler).rq_runnable,
RQ_ADD_BEHIND);
else
sched_rq_add_task(current,
scheduler.rq_expired,
per_cpu(scheduler).rq_expired,
RQ_ADD_BEHIND);
}
/*
* FIXME: Are these smp-safe?
* FIXME: Are these smp-safe? BB: On first glance they
* should be because runqueues are per-cpu right now.
*
* If task is about to sleep and
* it has pending events, wake it up.
@@ -406,31 +389,36 @@ void schedule()
TASK_IN_USER(current)) {
if (current->flags & TASK_SUSPENDING)
sched_suspend_async();
else if (current->flags & TASK_EXITING)
sched_exit_async();
}
/* Determine the next task to be run */
do {
if (scheduler.rq_runnable->total > 0) {
next = link_to_struct(scheduler.rq_runnable->task_list.next,
struct ktcb, rq_list);
/* Simpler task pick up loop. May put in sched_pick_next() */
for (;;) {
struct scheduler *sched = &per_cpu(scheduler);
/* If we or a child has just exited, run idle task once for clean up */
if (current->flags & TASK_EXITED) {
current->flags &= ~TASK_EXITED;
next = sched->idle_task;
break;
} else if (sched->rq_runnable->total > 0) {
/* Get a runnable task, if available */
next = link_to_struct(sched->rq_runnable->task_list.next,
struct ktcb, rq_list);
break;
} else if (sched->rq_expired->total > 0) {
/* Swap queues and retry if not */
sched_rq_swap_runqueues();
continue;
} else if (in_process_context()) {
/* Do idle task if no runnable tasks and in process */
next = sched->idle_task;
break;
} else {
if (scheduler.rq_expired->total > 0) {
sched_rq_swap_runqueues();
next = link_to_struct(
scheduler.rq_runnable->task_list.next,
struct ktcb, rq_list);
} else {
/* Irq preemptions return to current task
* if no runnable tasks are available */
next = current;
}
/* Irq calls must return to interrupted current process */
next = current;
break;
}
/* If process context, poll forever for new tasks */
} while (scheduler.rq_runnable->total == 0 &&
scheduler.rq_expired->total == 0 &&
in_process_context());
}
/* New tasks affect runqueue total priority. */
if (next->flags & TASK_RESUMING)
@@ -443,7 +431,7 @@ void schedule()
* becomes runnable rather than all at once. It is done
* every runqueue swap
*/
sched_recalc_ticks(next, scheduler.prio_total);
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
next->ticks_left = next->ticks_assigned;
}
@@ -462,7 +450,7 @@ void schedule()
*/
void scheduler_start()
{
timer_start();
platform_timer_start();
switch_to_user(current);
}

View File

@@ -4,6 +4,7 @@
* Copyright (C) 2008 Bahadir Balban
*/
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(exception.h)
#include INC_SUBARCH(mm.h)
@@ -15,7 +16,6 @@
#include <l4/api/kip.h>
#include <l4/lib/idpool.h>
void init_address_space_list(struct address_space_list *space_list)
{
memset(space_list, 0, sizeof(*space_list));
@@ -47,15 +47,16 @@ void address_space_add(struct address_space *space)
BUG_ON(!++curcont->space_list.count);
}
void address_space_remove(struct address_space *space)
void address_space_remove(struct address_space *space, struct container *cont)
{
BUG_ON(list_empty(&space->list));
BUG_ON(--curcont->space_list.count < 0);
BUG_ON(--cont->space_list.count < 0);
list_remove_init(&space->list);
}
/* Assumes address space reflock is already held */
void address_space_delete(struct address_space *space)
void address_space_delete(struct address_space *space,
struct ktcb *task_accounted)
{
BUG_ON(space->ktcb_refs);
@@ -66,7 +67,7 @@ void address_space_delete(struct address_space *space)
id_del(&kernel_resources.space_ids, space->spid);
/* Deallocate the space structure */
free_space(space);
free_space(space, task_accounted);
}
struct address_space *address_space_create(struct address_space *orig)
@@ -81,7 +82,7 @@ struct address_space *address_space_create(struct address_space *orig)
/* Allocate pgd */
if (!(pgd = alloc_pgd())) {
free_space(space);
free_space(space, current);
return PTR_ERR(-ENOMEM);
}
@@ -92,7 +93,7 @@ struct address_space *address_space_create(struct address_space *orig)
space->pgd = pgd;
/* Copy all kernel entries */
copy_pgd_kern_all(pgd);
arch_copy_pgd_kernel_entries(pgd);
/*
* Set up space id: Always allocate a new one. Specifying a space id
@@ -106,7 +107,7 @@ struct address_space *address_space_create(struct address_space *orig)
/* Copy its user entries/tables */
if ((err = copy_user_tables(space, orig)) < 0) {
free_pgd(pgd);
free_space(space);
free_space(space, current);
return PTR_ERR(err);
}
}

View File

@@ -16,6 +16,8 @@
#include INC_ARCH(exception.h)
#include INC_SUBARCH(mm.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_SUBARCH(mmu_ops.h)
void init_ktcb_list(struct ktcb_list *ktcb_list)
{
@@ -30,6 +32,8 @@ void tcb_init(struct ktcb *new)
link_init(&new->task_list);
mutex_init(&new->thread_control_lock);
spin_lock_init(&new->thread_lock);
init_ktcb_list(&new->child_exit_list);
cap_list_init(&new->cap_list);
@@ -65,32 +69,52 @@ struct ktcb *tcb_alloc_init(l4id_t cid)
void tcb_delete(struct ktcb *tcb)
{
struct ktcb *pager, *acc_task;
/* Sanity checks first */
BUG_ON(!is_page_aligned(tcb));
BUG_ON(tcb->wqh_pager.sleepers > 0);
BUG_ON(tcb->wqh_send.sleepers > 0);
BUG_ON(tcb->wqh_recv.sleepers > 0);
BUG_ON(!list_empty(&tcb->task_list) &&
!(tcb->flags & TASK_EXITING));
BUG_ON(!list_empty(&tcb->rq_list) && tcb != current);
BUG_ON(tcb->rq && tcb != current);
BUG_ON(tcb->affinity != current->affinity);
BUG_ON(tcb->state != TASK_INACTIVE);
BUG_ON(!list_empty(&tcb->rq_list));
BUG_ON(tcb->rq);
BUG_ON(tcb == current);
BUG_ON(tcb->nlocks);
BUG_ON(tcb->waiting_on);
BUG_ON(tcb->wq);
mutex_lock(&curcont->space_list.lock);
/* Remove from zombie list */
list_remove(&tcb->task_list);
/* Determine task to account deletions */
if (!(pager = tcb_find(tcb->pagerid)))
acc_task = current;
else
acc_task = pager;
/*
* NOTE: This protects single threaded space
* deletion against space modification.
*
* If space deletion were multi-threaded, list
* traversal would be needed to ensure list is
* still there.
*/
mutex_lock(&tcb->container->space_list.lock);
mutex_lock(&tcb->space->lock);
BUG_ON(--tcb->space->ktcb_refs < 0);
/* No refs left for the space, delete it */
if (tcb->space->ktcb_refs == 0) {
address_space_remove(tcb->space);
address_space_remove(tcb->space, tcb->container);
mutex_unlock(&tcb->space->lock);
address_space_delete(tcb->space);
mutex_unlock(&curcont->space_list.lock);
address_space_delete(tcb->space, acc_task);
mutex_unlock(&tcb->container->space_list.lock);
} else {
mutex_unlock(&tcb->space->lock);
mutex_unlock(&curcont->space_list.lock);
mutex_unlock(&tcb->container->space_list.lock);
}
/* Clear container id part */
@@ -100,7 +124,7 @@ void tcb_delete(struct ktcb *tcb)
id_del(&kernel_resources.ktcb_ids, tcb->tid);
/* Free the tcb */
free_ktcb(tcb);
free_ktcb(tcb, acc_task);
}
struct ktcb *tcb_find_by_space(l4id_t spid)
@@ -133,6 +157,22 @@ struct ktcb *container_find_tcb(struct container *c, l4id_t tid)
return 0;
}
struct ktcb *container_find_lock_tcb(struct container *c, l4id_t tid)
{
struct ktcb *task;
spin_lock(&c->ktcb_list.list_lock);
list_foreach_struct(task, &c->ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_lock(&task->thread_lock);
spin_unlock(&c->ktcb_list.list_lock);
return task;
}
}
spin_unlock(&c->ktcb_list.list_lock);
return 0;
}
/*
* Threads are the only resource where inter-container searches are
* allowed. This is because on other containers, only threads can be
@@ -158,6 +198,26 @@ struct ktcb *tcb_find(l4id_t tid)
}
}
struct ktcb *tcb_find_lock(l4id_t tid)
{
struct container *c;
if (current->tid == tid) {
spin_lock(&current->thread_lock);
return current;
}
if (tid_to_cid(tid) == curcont->cid) {
return container_find_lock_tcb(curcont, tid);
} else {
if (!(c = container_find(&kernel_resources,
tid_to_cid(tid))))
return 0;
else
return container_find_lock_tcb(c, tid);
}
}
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list)
{
spin_lock(&ktcb_list->list_lock);
@@ -178,13 +238,45 @@ void tcb_add(struct ktcb *new)
spin_unlock(&c->ktcb_list.list_lock);
}
void tcb_remove(struct ktcb *new)
/*
* Its important that this is per-cpu. This is
* because it must be guaranteed that the task
* is not runnable. Idle task on that cpu guarantees it.
*/
void tcb_delete_zombies(void)
{
struct ktcb *zombie, *n;
struct ktcb_list *ktcb_list =
&per_cpu(kernel_resources.zombie_list);
/* Traverse the per-cpu zombie list */
spin_lock(&ktcb_list->list_lock);
list_foreach_removable_struct(zombie, n,
&ktcb_list->list,
task_list)
/* Delete all zombies one by one */
tcb_delete(zombie);
spin_unlock(&ktcb_list->list_lock);
}
/*
* It's enough to lock list and thread without
* traversing the list, because we're only
* protecting against thread modification.
* Deletion is a single-threaded operation
*/
void tcb_remove(struct ktcb *task)
{
/* Lock list */
spin_lock(&curcont->ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list));
BUG_ON(list_empty(&task->task_list));
BUG_ON(--curcont->ktcb_list.count < 0);
list_remove_init(&new->task_list);
spin_lock(&task->thread_lock);
list_remove_init(&task->task_list);
spin_unlock(&curcont->ktcb_list.list_lock);
spin_unlock(&task->thread_lock);
}
void ktcb_list_remove(struct ktcb *new, struct ktcb_list *ktcb_list)
@@ -207,8 +299,7 @@ unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
*/
void task_update_utcb(struct ktcb *task)
{
/* Update the KIP pointer */
kip.utcb = task->utcb_address;
arch_update_utcb(task->utcb_address);
}
/*
@@ -251,7 +342,7 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
if (current == task) {
/* Check own utcb, if not there, page it in */
if ((ret = check_access(task->utcb_address, UTCB_SIZE,
MAP_SVC_RW_FLAGS, page_in)) < 0)
MAP_KERN_RW, page_in)) < 0)
return -EFAULT;
else
return 0;
@@ -259,7 +350,7 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
/* Check another's utcb, but don't try to map in */
if ((ret = check_access_task(task->utcb_address,
UTCB_SIZE,
MAP_SVC_RW_FLAGS, 0,
MAP_KERN_RW, 0,
task)) < 0) {
return -EFAULT;
} else {
@@ -268,18 +359,18 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
* unless they're identical
*/
if ((phys =
virt_to_phys_by_pgd(task->utcb_address,
TASK_PGD(task))) !=
virt_to_phys_by_pgd(task->utcb_address,
TASK_PGD(current))) {
virt_to_phys_by_pgd(TASK_PGD(task),
task->utcb_address)) !=
virt_to_phys_by_pgd(TASK_PGD(current),
task->utcb_address)) {
/*
* We have none or an old reference.
* Update it with privileged flags,
* so that only kernel can access.
*/
add_mapping_pgd(phys, task->utcb_address,
add_mapping_pgd(phys, page_align(task->utcb_address),
page_align_up(UTCB_SIZE),
MAP_SVC_RW_FLAGS,
MAP_KERN_RW,
TASK_PGD(current));
}
BUG_ON(!phys);

View File

@@ -58,7 +58,7 @@ void update_system_time(void)
* TODO: Investigate: how do we make sure timer_irq is
* called SCHED_TICKS times per second?
*/
if (systime.thz == SCHED_TICKS) {
if (systime.thz == CONFIG_SCHED_TICKS) {
systime.thz = 0;
systime.sec++;
}
@@ -71,7 +71,7 @@ int sys_time(struct timeval *tv, int set)
int err;
if ((err = check_access((unsigned long)tv, sizeof(*tv),
MAP_USR_RW_FLAGS, 1)) < 0)
MAP_USR_RW, 1)) < 0)
return err;
/* Get time */
@@ -79,7 +79,7 @@ int sys_time(struct timeval *tv, int set)
while(retries > 0) {
systime.reader = 1;
tv->tv_sec = systime.sec;
tv->tv_usec = 1000000 * systime.thz / SCHED_TICKS;
tv->tv_usec = 1000000 * systime.thz / CONFIG_SCHED_TICKS;
retries--;
if (systime.reader)

View File

@@ -1,10 +1,22 @@
# Inherit global environment
Import('env')
import os, sys, glob
PROJRELROOT = '../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import('env', 'symbols')
# The set of source files associated with this SConscript file.
src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c']
src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c', 'cache.c', 'debug.c']
for name, val in symbols:
if 'CONFIG_SMP' == name:
src_local += ['smp.c', 'ipi.c', 'smp_test.c']
obj = env.Object(src_local)
Return('obj')

49
src/glue/arm/cache.c Normal file
View File

@@ -0,0 +1,49 @@
/*
* Wrapper for arm specific cache related functions
*
* Copyright (C) 2009 B Labs Ltd.
*/
#include INC_GLUE(cache.h)
void invalidate_cache(void)
{
}
void invalidate_icache(void)
{
}
void invalidate_dcache(void)
{
}
void clean_dcache(void)
{
}
void clean_invalidate_dcache(void)
{
}
void clean_invalidate_cache(void)
{
}
void drain_writebuffer(void)
{
}
void invalidate_tlb(void)
{
}
void invalidate_itlb(void)
{
}
void invalidate_dtlb(void)
{
}

43
src/glue/arm/debug.c Normal file
View File

@@ -0,0 +1,43 @@
#include <l4/generic/preempt.h>
#include <l4/generic/debug.h>
#include INC_SUBARCH(perfmon.h)
#include INC_GLUE(debug.h)
#if defined (CONFIG_DEBUG_PERFMON_KERNEL)
#define CYCLES_PER_COUNTER_TICKS 64
void system_measure_syscall_end(unsigned long swi_address)
{
volatile u64 cnt = perfmon_read_cyccnt() * CYCLES_PER_COUNTER_TICKS;
unsigned int call_offset = (swi_address & 0xFF) >> 2;
/* Number of syscalls */
u64 call_count =
*(((u64 *)&system_accounting.syscalls) + call_offset);
/* System call timing structure */
struct syscall_timing *st =
(struct syscall_timing *)
&system_accounting.syscall_timings + call_offset;
/* Set min */
if (st->min == 0)
st->min = cnt;
else if (st->min > cnt)
st->min = cnt;
/* Set max */
if (st->max < cnt)
st->max = cnt;
st->total += cnt;
/* Average = total timings / total calls */
st->avg = st->total / call_count;
/* Update total */
system_accounting.syscall_timings.all_total += cnt;
}
#endif

View File

@@ -1,7 +1,7 @@
/*
* Main initialisation code for the ARM kernel
*
* Copyright (C) 2007 Bahadir Balban
* Copyright (C) 2007 - 2010 B Labs Ltd.
*/
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
@@ -17,36 +17,23 @@
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(cpu.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_SUBARCH(perfmon.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(message.h)
#include INC_GLUE(syscall.h)
#include INC_GLUE(init.h)
#include INC_GLUE(smp.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(printascii.h)
#include INC_API(syscall.h)
#include INC_API(kip.h)
#include INC_API(mutex.h)
unsigned int kernel_mapping_end;
/* Maps the early memory regions needed to bootstrap the system */
void init_kernel_mappings(void)
{
memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
/* Map kernel area to its virtual region */
add_section_mapping_init(align(virt_to_phys(_start_text),SZ_1MB),
align((unsigned int)_start_text, SZ_1MB), 1,
cacheable | bufferable);
/* Map kernel one-to-one to its physical region */
add_section_mapping_init(align(virt_to_phys(_start_text),SZ_1MB),
align(virt_to_phys(_start_text),SZ_1MB),
1, 0);
}
void print_sections(void)
{
dprintk("_start_kernel: ",(unsigned int)_start_kernel);
@@ -59,89 +46,20 @@ void print_sections(void)
dprintk("_end_vectors: ",(unsigned int)_end_vectors);
dprintk("_start_kip: ", (unsigned int) _start_kip);
dprintk("_end_kip: ", (unsigned int) _end_kip);
dprintk("_bootstack: ", (unsigned int)_bootstack);
dprintk("_start_syscalls: ", (unsigned int) _start_syscalls);
dprintk("_end_syscalls: ", (unsigned int) _end_syscalls);
dprintk("_start_bootstack: ", (unsigned int)_start_bootstack);
dprintk("_end_bootstack: ", (unsigned int)_end_bootstack);
dprintk("_start_bootstack: ", (unsigned int)_start_bootstack);
dprintk("_end_bootstack: ", (unsigned int)_end_bootstack);
dprintk("_start_init_pgd: ", (unsigned int)_start_init_pgd);
dprintk("_end_init_pgd: ", (unsigned int)_end_init_pgd);
dprintk("_end_kernel: ", (unsigned int)_end_kernel);
dprintk("_start_init: ", (unsigned int)_start_init);
dprintk("_end_init: ", (unsigned int)_end_init);
dprintk("_end: ", (unsigned int)_end);
}
/*
* Enable virtual memory using kernel's pgd
* and continue execution on virtual addresses.
*/
void start_vm()
{
/*
* TTB must be 16K aligned. This is because first level tables are
* sized 16K.
*/
if ((unsigned int)&init_pgd & 0x3FFF)
dprintk("kspace not properly aligned for ttb:",
(u32)&init_pgd);
// memset((void *)&kspace, 0, sizeof(pgd_table_t));
arm_set_ttb(virt_to_phys(&init_pgd));
/*
* This sets all 16 domains to zero and domain 0 to 1. The outcome
* is that page table access permissions are in effect for domain 0.
* All other domains have no access whatsoever.
*/
arm_set_domain(1);
/* Enable everything before mmu permissions are in place */
arm_enable_caches();
arm_enable_wbuffer();
/*
* Leave the past behind. Tlbs are invalidated, write buffer is drained.
* The whole of I + D caches are invalidated unconditionally. This is
* important to ensure that the cache is free of previously loaded
* values. Otherwise unpredictable data aborts may occur at arbitrary
* times, each time a load/store operation hits one of the invalid
* entries and those entries are cleaned to main memory.
*/
arm_invalidate_cache();
arm_drain_writebuffer();
arm_invalidate_tlb();
arm_enable_mmu();
/* Jump to virtual memory addresses */
__asm__ __volatile__ (
"add sp, sp, %0 \n" /* Update stack pointer */
"add fp, fp, %0 \n" /* Update frame pointer */
/* On the next instruction below, r0 gets
* current PC + KOFFSET + 2 instructions after itself. */
"add r0, pc, %0 \n"
/* Special symbol that is extracted and included in the loader.
* Debuggers can break on it to load the virtual symbol table */
".global break_virtual;\n"
"break_virtual:\n"
"mov pc, r0 \n" /* (r0 has next instruction) */
:
: "r" (KERNEL_OFFSET)
: "r0"
);
/* At this point, execution is on virtual addresses. */
remove_section_mapping(virt_to_phys(_start_kernel));
/*
* Restore link register (LR) for this function.
*
* NOTE: LR values are pushed onto the stack at each function call,
* which means the restored return values will be physical for all
* functions in the call stack except this function. So the caller
* of this function must never return but initiate scheduling etc.
*/
__asm__ __volatile__ (
"add %0, %0, %1 \n"
"mov pc, %0 \n"
:: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
);
while(1);
}
/* This calculates what address the kip field would have in userspace. */
#define KIP_USR_OFFSETOF(kip, field) ((void *)(((unsigned long)&kip.field - \
(unsigned long)&kip) + USER_KIP_PAGE))
@@ -171,128 +89,117 @@ void kip_init()
utcb_ref = (struct utcb **)((unsigned long)&kip + UTCB_KIP_OFFSET);
add_boot_mapping(virt_to_phys(&kip), USER_KIP_PAGE, PAGE_SIZE,
MAP_USR_RO_FLAGS);
MAP_USR_RO);
printk("%s: Kernel built on %s, %s\n", __KERNELNAME__,
kip.kdesc.date, kip.kdesc.time);
}
void vectors_init()
{
unsigned int size = ((u32)_end_vectors - (u32)arm_high_vector);
/* Map the vectors in high vector page */
add_boot_mapping(virt_to_phys(arm_high_vector),
ARM_HIGH_VECTOR, size, 0);
arm_enable_high_vectors();
ARM_HIGH_VECTOR, size, MAP_KERN_RWX);
/* Kernel memory trapping is enabled at this point. */
}
void abort()
#include <l4/generic/cap-types.h>
#include <l4/api/capability.h>
#include <l4/generic/capability.h>
/* This is what an idle task needs */
static DECLARE_PERCPU(struct capability, pmd_cap);
/*
* FIXME: Add this when initializing kernel resources
* This is a hack.
*/
void setup_idle_caps()
{
printk("Aborting on purpose to halt system.\n");
#if 0
/* Prefetch abort */
__asm__ __volatile__ (
"mov pc, #0x0\n"
::
);
#endif
/* Data abort */
__asm__ __volatile__ (
"mov r0, #0 \n"
"ldr r0, [r0] \n"
::
);
struct capability *cap = &per_cpu(pmd_cap);
cap_list_init(&current->cap_list);
cap->type = CAP_RTYPE_MAPPOOL | CAP_TYPE_QUANTITY;
cap->size = 50;
link_init(&cap->list);
cap_list_insert(cap, &current->cap_list);
}
void jump(struct ktcb *task)
/*
* Set up current stack's beginning, and initial page tables
* as a valid task environment for idle task for current cpu
*/
void setup_idle_task()
{
__asm__ __volatile__ (
"mov lr, %0\n" /* Load pointer to context area */
"ldr r0, [lr]\n" /* Load spsr value to r0 */
"msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
"add sp, lr, %1\n" /* Reset SVC stack */
"sub sp, sp, %2\n" /* Align to stack alignment */
"ldmib lr, {r0-r14}^\n" /* Load all USR registers */
"nop \n" /* Spec says dont touch banked registers
* right after LDM {no-pc}^ for one instruction */
"add lr, lr, #64\n" /* Manually move to PC location. */
"ldr lr, [lr]\n" /* Load the PC_USR to LR */
"movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
:
: "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
);
}
void switch_to_user(struct ktcb *task)
{
arm_clean_invalidate_cache();
arm_invalidate_tlb();
arm_set_ttb(virt_to_phys(TASK_PGD(task)));
arm_invalidate_tlb();
jump(task);
}
void setup_dummy_current()
{
/*
* Temporarily iInitialize the beginning of
* last page of stack as the current ktcb
*/
memset(current, 0, sizeof(struct ktcb));
current->space = &init_space;
TASK_PGD(current) = &init_pgd;
/* Initialize space caps list */
cap_list_init(&current->space->cap_list);
#if 0
/*
* Unneeded stuff
*/
/*
* Set up idle context.
*/
current->context.spsr = ARM_MODE_SVC;
current->context.pc = (u32)idle_task;
current->context.sp = (u32)align((unsigned long)current + PAGE_SIZE,
STACK_ALIGNMENT);
#endif
/*
* FIXME: This must go to kernel resources init.
*/
/*
* If using split page tables, kernel
* resources must point at the global pgd
* TODO: We may need this for V6, in the future
*/
#if defined(CONFIG_SUBARCH_V7)
kernel_resources.pgd_global = &init_global_pgd;
#endif
}
void init_finalize(struct kernel_resources *kres)
void remove_initial_mapping(void)
{
volatile register unsigned int stack;
volatile register unsigned int newstack;
struct ktcb *first_task;
struct container *c;
/* At this point, execution is on virtual addresses. */
remove_section_mapping(virt_to_phys(_start_kernel));
}
/* Get the first container */
c = link_to_struct(kres->containers.list.next,
struct container, list);
void init_finalize(void)
{
/* Set up idle task capabilities */
setup_idle_caps();
/* Get the first pager in container */
first_task = link_to_struct(c->ktcb_list.list.next,
struct ktcb, task_list);
platform_timer_start();
/* Calculate first stack address */
newstack = align((unsigned long)first_task + PAGE_SIZE - 1,
STACK_ALIGNMENT);
#if defined (CONFIG_SMP)
/* Tell other cores to continue */
secondary_run_signal = 1;
dmb();
#endif
/* Switch to new stack */
stack = newstack;
asm("mov sp, %0\n\t"::"r"(stack));
/* -- Point of no stack unwinding -- */
/*
* Unmap boot memory, and add it as
* an unused kernel memcap
*/
free_boot_memory(&kernel_resources);
/*
* Set up initial KIP UTCB ref
*/
kip.utcb = (u32)current->utcb_address;
/*
* Start the scheduler, jumping to task
*/
scheduler_start();
idle_task();
}
void start_kernel(void)
{
printascii("\n"__KERNELNAME__": start kernel...\n");
print_early("\n"__KERNELNAME__": start kernel...\n");
// print_sections();
/* Early cpu initialization */
cpu_startup();
/*
* Initialise section mappings
@@ -300,17 +207,19 @@ void start_kernel(void)
*/
init_kernel_mappings();
print_early("\n"__KERNELNAME__": Init kernel mappings...\n");
/*
* Enable virtual memory
* and jump to virtual addresses
*/
start_vm();
start_virtual_memory();
/*
* Set up a dummy current ktcb on
* boot stack with initial pgd
* Set up initial page tables and ktcb
* as a valid environment for idle task
*/
setup_dummy_current();
setup_idle_task();
/*
* Initialise platform-specific
@@ -322,6 +231,17 @@ void start_kernel(void)
printk("%s: Virtual memory enabled.\n",
__KERNELNAME__);
/* Identify CPUs and system */
system_identify();
sched_init();
/* Try to initialize secondary cores if there are any */
smp_start_cores();
/* Remove one-to-one kernel mapping */
remove_initial_mapping();
/*
* Map and enable high vector page.
* Faults can be handled after here.
@@ -341,8 +261,8 @@ void start_kernel(void)
/* Initialise system call page */
syscall_init();
/* Init scheduler */
sched_init(&scheduler);
/* Init performance monitor, if enabled */
perfmon_init();
/*
* Evaluate system resources
@@ -354,7 +274,7 @@ void start_kernel(void)
* Free boot memory, switch to first
* task's stack and start scheduler
*/
init_finalize(&kernel_resources);
init_finalize();
BUG();
}

18
src/glue/arm/ipi.c Normal file
View File

@@ -0,0 +1,18 @@
/*
* Copyright 2010 B Labs.Ltd.
*
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*
* Description: IPI handler for all ARM SMP cores
*/
#include INC_GLUE(ipi.h)
#include INC_GLUE(smp.h)
#include INC_SUBARCH(cpu.h)
#include <l4/lib/printk.h>
/* This should be in a file something like exception.S */
int ipi_handler(struct irq_desc *desc)
{
return 0;
}

View File

@@ -8,34 +8,51 @@
#include <l4/lib/printk.h>
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include <l4/generic/platform.h>
#include INC_SUBARCH(mm.h)
#include INC_GLUE(memlayout.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(printascii.h)
#include INC_GLUE(mapping.h)
#include INC_PLAT(offsets.h)
#include INC_ARCH(linker.h)
#include INC_ARCH(asm.h)
/*
* Conversion from generic protection flags to arch-specific
* pte flags.
* Return arch-specific pte flags from generic space flags.
*/
unsigned int space_flags_to_ptflags(unsigned int flags)
{
switch (flags) {
case MAP_USR_RW_FLAGS:
return __MAP_USR_RW_FLAGS;
case MAP_USR_RO_FLAGS:
return __MAP_USR_RO_FLAGS;
case MAP_SVC_RW_FLAGS:
return __MAP_SVC_RW_FLAGS;
case MAP_USR_IO_FLAGS:
return __MAP_USR_IO_FLAGS;
case MAP_SVC_IO_FLAGS:
return __MAP_SVC_IO_FLAGS;
case MAP_FAULT:
return __MAP_FAULT;
case MAP_USR_RW:
return __MAP_USR_RW;
case MAP_USR_RO:
return __MAP_USR_RO;
case MAP_KERN_RW:
return __MAP_KERN_RW;
case MAP_USR_IO:
return __MAP_USR_IO;
case MAP_KERN_IO:
return __MAP_KERN_IO;
case MAP_USR_RWX:
return __MAP_USR_RWX;
case MAP_KERN_RWX:
return __MAP_KERN_RWX;
case MAP_USR_RX:
return __MAP_USR_RX;
case MAP_KERN_RX:
return __MAP_KERN_RX;
/*
* Don't remove this, if a flag with
* same value is introduced, compiler will warn us
*/
case MAP_INVALID_FLAGS:
default:
BUG();
return MAP_INVALID_FLAGS;
}
BUG(); return 0;
return 0;
}
void task_init_registers(struct ktcb *task, unsigned long pc)
@@ -46,45 +63,12 @@ void task_init_registers(struct ktcb *task, unsigned long pc)
/*
* Copies global kernel entries into another pgd. Even for sub-pmd ranges
* the associated pmd entries are copied, assuming any pmds copied are
* applicable to all tasks in the system.
* Copies all global kernel entries that a user process
* should have in its pgd. In split page table setups
* this is a noop.
*/
void copy_pgd_kern_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end)
void copy_pgd_kernel_entries(pgd_table_t *to)
{
/* Extend sub-pmd ranges to their respective pmd boundaries */
start = align(start, PMD_MAP_SIZE);
if (end < start)
end = 0;
/* Aligning would overflow if mapping the last virtual pmd */
if (end < align(~0, PMD_MAP_SIZE) ||
start > end) /* end may have already overflown as input */
end = align_up(end, PMD_MAP_SIZE);
else
end = 0;
copy_pgds_by_vrange(to, from, start, end);
}
/* Copies all standard bits that a user process should have in its pgd */
void copy_pgd_kern_all(pgd_table_t *to)
{
pgd_table_t *from = TASK_PGD(current);
copy_pgd_kern_by_vrange(to, from, KERNEL_AREA_START, KERNEL_AREA_END);
copy_pgd_kern_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
copy_pgd_kern_by_vrange(to, from, USER_KIP_PAGE,
USER_KIP_PAGE + PAGE_SIZE);
copy_pgd_kern_by_vrange(to, from, ARM_HIGH_VECTOR,
ARM_HIGH_VECTOR + PAGE_SIZE);
copy_pgd_kern_by_vrange(to, from, ARM_SYSCALL_VECTOR,
ARM_SYSCALL_VECTOR + PAGE_SIZE);
/* We temporarily map uart registers to every process */
copy_pgd_kern_by_vrange(to, from, USERSPACE_CONSOLE_VIRTUAL,
USERSPACE_CONSOLE_VIRTUAL + PAGE_SIZE);
arch_copy_pgd_kernel_entries(to);
}

134
src/glue/arm/smp.c Normal file
View File

@@ -0,0 +1,134 @@
/*
* Copyright (C) 2010 B Labs Ltd.
*
* Authors: Prem Mallappa, Bahadir Balban
*
* SMP Initialization of cores.
*/
#include <l4/generic/platform.h>
#include INC_GLUE(smp.h)
#include INC_GLUE(init.h)
#include INC_GLUE(mapping.h)
#include INC_SUBARCH(cpu.h)
#include INC_SUBARCH(proc.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_ARCH(linker.h)
#include INC_ARCH(io.h)
#include <l4/drivers/irq/gic/gic.h>
unsigned long secondary_run_signal;
void __smp_start(void);
void smp_start_cores(void)
{
void (*smp_start_func)(int) =
(void (*)(int))virt_to_phys(__smp_start);
/* FIXME: Check why this high-level version doesn't work */
// v7_up_dcache_op_setway(CACHE_SETWAY_CLEAN);
v7_clean_invalidate_setway();
/* We dont probably need this, it is not listed as a requirement */
arm_smp_inval_icache_entirely();
/* Start other cpus */
for (int i = 1; i < CONFIG_NCPU; i++) {
printk("%s: Bringing up CPU%d\n", __KERNELNAME__, i);
if ((platform_smp_start(i, smp_start_func)) < 0) {
printk("FATAL: Could not start secondary cpu. "
"cpu=%d\n", i);
BUG();
}
wfi(); /* wait for other cpu send IPI to core0 */
}
scu_print_state();
}
void init_smp(void)
{
/* Start_secondary_cpus */
if (CONFIG_NCPU > 1) {
/* This sets IPI function pointer at bare minimum */
platform_smp_init(CONFIG_NCPU);
}
}
void secondary_setup_idle_task(void)
{
/* This also has its spid allocated by primary */
current->space = &init_space;
TASK_PGD(current) = &init_pgd;
/* We need a thread id */
current->tid = id_new(&kernel_resources.ktcb_ids);
}
/*
* Idle wait before any tasks become available for running.
*
* FIXME: This should be changed such that tasks running on other
* cpus can be killed and secondaries wait on an idle task.
*
* Currently the tasks are held in wfi() even if asked to be killed
* until a new runnable task becomes runnable. This may be problematic
* for a pager who issued a kill request and is waiting for it to finish.
*/
void sched_secondary_start(void)
{
while (!secondary_run_signal)
dmb();
secondary_setup_idle_task();
setup_idle_caps();
idle_task();
BUG();
}
/*
* this is where it jumps from secondary_start(), which is called from
* board_smp_start() to align each core to start here
*/
void smp_secondary_init(void)
{
/* Print early core start message */
// print_early("Secondary core started.\n");
/* Start virtual memory */
start_virtual_memory();
arm_smp_inval_tlb_entirely();
arm_smp_inval_bpa_entirely();
dsb();
isb();
printk("%s: CPU%d: Virtual memory enabled.\n",
__KERNELNAME__, smp_get_cpuid());
/* Mostly initialize GIC CPU interface */
secondary_init_platform();
printk("%s: CPU%d: Initialized.\n",
__KERNELNAME__, smp_get_cpuid());
sched_init();
dsb();
gic_send_ipi(CPUID_TO_MASK(0), 0);
/*
* Wait for the first runnable task to become available
*/
sched_secondary_start();
}

99
src/glue/arm/smp_test.c Normal file
View File

@@ -0,0 +1,99 @@
#include <l4/lib/spinlock.h>
#include <l4/lib/printk.h>
#include INC_GLUE(smp.h)
#include INC_SUBARCH(cpu.h)
DECLARE_SPINLOCK(smp_lock);
static unsigned long smp_var = 0;
static unsigned long signal_finished;
static unsigned long basic_var = 0;
void test_basic_coherent(void)
{
dmb();
if (smp_get_cpuid() == 0) {
if (basic_var != 5555) {
printk("FATAL: variable update not seen. var = %lu\n", basic_var);
BUG();
}
} else {
basic_var = 5555;
dmb();
}
}
void test_smp_coherent(void)
{
int other;
if (smp_get_cpuid() == 1)
other = 0;
else
other = 1;
/* Increment var */
for (int i = 0; i < 1000; i++) {
spin_lock(&smp_lock);
smp_var++;
spin_unlock(&smp_lock);
}
/* Signal finished */
spin_lock(&smp_lock);
signal_finished |= (1 << smp_get_cpuid());
spin_unlock(&smp_lock);
/* Wait for other to finish */
while (!(signal_finished & (1 << other))) {
dmb();
}
if (smp_get_cpuid() == 0) {
printk("Total result: %lu\n", smp_var);
if (smp_var != 2000) {
printk("FATAL: Total result not as expected\n");
BUG();
}
printk("%s: Success.\n", __FUNCTION__);
}
}
static u32 make_mask(int ncpus)
{
u32 mask = 0;
while(--ncpus){
mask |= CPUID_TO_MASK(ncpus);
}
mask |= CPUID_TO_MASK(0);
return mask;
}
#ifndef MAX_IPIS
#define MAX_IPIS 15
#endif
void test_ipi(void)
{
int ipi, cpu;
for (ipi = 0; ipi <= MAX_IPIS; ipi++) {
for (cpu = 0; cpu < CONFIG_NCPU; cpu++) {
if (cpu == smp_get_cpuid())
continue;
printk("IPI %d from %d to %d\n", ipi, smp_get_cpuid(), cpu);
arch_send_ipi(CPUID_TO_MASK(cpu), ipi);
}
}
/* Send IPI to all cores at once */
cpu = make_mask(CONFIG_NCPU);
printk("IPI from %d to all\n", smp_get_cpuid());
arch_send_ipi(cpu, 1);
printk("IPI from %d to self\n", smp_get_cpuid());
arch_send_ipi(0, 1); /* Send IPI to self */
}

View File

@@ -7,11 +7,15 @@
#include <l4/lib/printk.h>
#include <l4/generic/space.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/debug.h>
#include <l4/generic/tcb.h>
#include <l4/api/errno.h>
#include INC_GLUE(memlayout.h)
#include INC_GLUE(syscall.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(debug.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(perfmon.h)
#include INC_API(syscall.h)
#include INC_API(kip.h)
@@ -31,6 +35,7 @@ void kip_init_syscalls(void)
kip.container_control = ARM_SYSCALL_PAGE + sys_container_control_offset;
kip.time = ARM_SYSCALL_PAGE + sys_time_offset;
kip.mutex_control = ARM_SYSCALL_PAGE + sys_mutex_control_offset;
kip.cache_control = ARM_SYSCALL_PAGE + sys_cache_control_offset;
}
/* Jump table for all system calls. */
@@ -119,6 +124,12 @@ int arch_sys_mutex_control(syscall_context_t *regs)
return sys_mutex_control((unsigned long)regs->r0, (int)regs->r1);
}
int arch_sys_cache_control(syscall_context_t *regs)
{
return sys_cache_control((unsigned long)regs->r0,
(unsigned long)regs->r1,
(unsigned int)regs->r2);
}
/*
* Initialises the system call jump table, for kernel to use.
@@ -140,9 +151,10 @@ void syscall_init()
syscall_table[sys_container_control_offset >> 2] = (syscall_fn_t)arch_sys_container_control;
syscall_table[sys_time_offset >> 2] = (syscall_fn_t)arch_sys_time;
syscall_table[sys_mutex_control_offset >> 2] = (syscall_fn_t)arch_sys_mutex_control;
syscall_table[sys_cache_control_offset >> 2] = (syscall_fn_t)arch_sys_cache_control;
add_boot_mapping(virt_to_phys(&__syscall_page_start),
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS);
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RX);
}
/* Checks a syscall is legitimate and dispatches to appropriate handler. */
@@ -155,8 +167,20 @@ int syscall(syscall_context_t *regs, unsigned long swi_addr)
/* Check within syscall offset boundary */
if (((swi_addr & syscall_offset_mask) >= 0) &&
((swi_addr & syscall_offset_mask) <= syscalls_end_offset)) {
/* Do system call accounting, if enabled */
system_account_syscall();
system_account_syscall_type(swi_addr);
/* Start measure syscall timing, if enabled */
system_measure_syscall_start();
/* Quick jump, rather than compare each */
ret = (*syscall_table[(swi_addr & 0xFF) >> 2])(regs);
/* End measure syscall timing, if enabled */
system_measure_syscall_end(swi_addr);
} else {
printk("System call received from call @ 0x%lx."
"Instruction: 0x%lx.\n", swi_addr,
@@ -172,9 +196,6 @@ int syscall(syscall_context_t *regs, unsigned long swi_addr)
if (current->flags & TASK_SUSPENDING) {
BUG_ON(current->nlocks);
sched_suspend_sync();
} else if (current->flags & TASK_EXITING) {
BUG_ON(current->nlocks);
sched_exit_sync();
}
return ret;

View File

@@ -29,6 +29,8 @@
********************************************************************/
#include <stdarg.h> /* for va_list, ... comes with gcc */
#include <l4/lib/printk.h>
#include <l4/lib/mutex.h>
/* FIXME: LICENSE LICENCE */
typedef unsigned int word_t;
@@ -422,6 +424,8 @@ int do_printk(char* format_p, va_list args)
return n;
}
DECLARE_SPINLOCK(printk_lock);
/**
* Flexible print function
*
@@ -435,9 +439,14 @@ int printk(char *format, ...)
{
va_list args;
int i;
unsigned long irqstate;
va_start(args, format);
spin_lock_irq(&printk_lock, &irqstate);
i = do_printk(format, args);
spin_unlock_irq(&printk_lock, irqstate);
va_end(args);
return i;
};

View File

@@ -3,12 +3,11 @@
*
* Copyright (C) 2007 Bahadir Balban
*/
#include INC_PLAT(uart.h)
void putc(char c)
{
if (c == '\n')
uart_putc('\r');
uart_putc(c);
uart_tx_char(PLATFORM_CONSOLE_VBASE, '\r');
uart_tx_char(PLATFORM_CONSOLE_VBASE, c);
}

View File

@@ -1,30 +1,18 @@
/*
* Copyright 2008-2010 B Labs Ltd.
*/
void *_memset(void *p, int c, int size);
void *_memcpy(void *d, void *s, int size);
void *memset(void *p, int c, int size)
{
char ch;
char *pp;
pp = (char *)p;
ch = (char)c;
for (int i = 0; i < size; i++) {
*pp++ = ch;
}
return p;
return _memset(p, c, size);
}
void *memcpy(void *d, void *s, int size)
{
char *dst = (char *)d;
char *src = (char *)s;
for (int i = 0; i < size; i++) {
*dst = *src;
dst++;
src++;
}
return d;
return _memcpy(d, s, size);
}

View File

@@ -154,6 +154,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
BUG_ON(wqh->sleepers < 0);
spin_lock_irq(&wqh->slock, &irqflags);
if (wqh->sleepers > 0) {
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
struct waitqueue,
@@ -165,7 +166,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
task_unset_wqh(sleeper);
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
//printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
// printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
spin_unlock_irq(&wqh->slock, irqflags);
if (flags & WAKEUP_SYNC)

View File

@@ -0,0 +1,11 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['print-early.c', 'platform.c', 'perfmon.c', 'irq.c', 'cm.c']
obj = env.Object(src_local)
Return('obj')

45
src/platform/beagle/cm.c Normal file
View File

@@ -0,0 +1,45 @@
/*
* Clock mangaer module of the beagleboard.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include INC_PLAT(cm.h)
#include INC_ARCH(io.h)
/*
* Enable Interface clock of device (represented by bit)
* in CM module's(represented by CM_BASE) CM_FCLEN register
*/
void omap_cm_enable_iclk(unsigned long cm_base, int bit)
{
unsigned int val = 0;
val = read((cm_base + CM_FCLKEN_OFFSET));
val |= (1 << bit);
write(val, (cm_base + CM_FCLKEN_OFFSET));
}
/*
* Enable Functional clock of device (represented by bit)
* in CM module's(represented by CM_BASE) CM_FCLEN register
*/
void omap_cm_enable_fclk(unsigned long cm_base, int bit)
{
unsigned int val = 0;
val = read((cm_base + CM_ICLKEN_OFFSET));
val |= (1 << bit);
write(val, (cm_base + CM_FCLKEN_OFFSET));
}
/* Set clock source for device */
void omap_cm_clk_select(unsigned long cm_base, int bit, int src)
{
unsigned int val = 0;
val = read((cm_base + CM_CLKSEL_OFFSET));
val |= (src << bit);
write(val, (cm_base + CM_CLKSEL_OFFSET));
}

66
src/platform/beagle/irq.c Normal file
View File

@@ -0,0 +1,66 @@
/*
* Support for generic irq handling using platform irq controller (PL190)
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/irq.h>
#include <l4/generic/time.h>
#include INC_PLAT(irq.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(timer.h)
#include INC_ARCH(exception.h)
#include <l4/drivers/irq/omap3/omap3_intc.h>
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "OMAP 3 irq controller",
.level = 0,
.cascade = IRQ_NIL,
.start = 0,
.end = IRQS_MAX,
.ops = {
.init = omap3_intc_init,
.read_irq = omap3_intc_read_irq,
.ack_and_mask = omap3_intc_ack_and_mask,
.unmask = omap3_intc_unmask_irq,
},
},
};
static int platform_timer_handler(struct irq_desc *desc)
{
timer_irq_clear(PLATFORM_TIMER0_VBASE);
return do_timer_irq();
}
/*
* Timer handler for userspace
*/
static int platform_timer_user_handler(struct irq_desc *desc)
{
/* Ack the device irq */
timer_irq_clear(PLATFORM_TIMER1_VBASE);
/* Notify the userspace */
irq_thread_notify(desc);
return 0;
}
/* Built-in irq handlers initialised at compile time.
* Else register with register_irq() */
struct irq_desc irq_desc_array[IRQS_MAX] = {
[IRQ_TIMER0] = {
.name = "Timer0",
.chip = &irq_chip_array[0],
.handler = platform_timer_handler,
},
[IRQ_TIMER1] = {
.name = "Timer1",
.chip = &irq_chip_array[0],
.handler = platform_timer_user_handler,
},
};

View File

@@ -0,0 +1,89 @@
/*
* Platform specific perfmon initialization
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include INC_PLAT(timer.h)
#include <l4/lib/printk.h>
#include INC_PLAT(offsets.h)
#include INC_SUBARCH(perfmon.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memlayout.h)
/*
* Current findings by these tests:
*
* Cpu cycle count and timer ticks are consistently showing 400Mhz
* with a reference timer tick of 1Mhz. So cycle counts are fixed
* with regard to the timer.
*
* Instruction execute count on the busy_loop however, is varying
* between x1, x2 combinations when compared to timer and cycle
* count values. This is happening by trivial changes in code such
* as adding a function call. (Other variables are ruled out, e.g.
* no concurrent memory accesses, caches are off)
*
* There may be two causes to this:
* - Due to missing dmb/dsb/isb instructions.
* - Due to BTC (busy_loop has one branch) which may describe
* the doubling in IPC, since out of the 2 instructions in the
* busy loop one is a branch.
*
* Disabling the BTC increased cycle counts per instruction
* significantly, advising us not to expect any accuracy in counting
* instructions in cycles. Hence instruction-based tests are
* commented out. It is wise to only rely upon timer and cycle counts.
*/
void platform_test_tick_cycles()
{
/* Initialize the timer */
const unsigned int load_value = 0xffffffff - 100000;
int mhz_top, mhz_bot, temp;
unsigned long timer_base = PLATFORM_TIMER1_VBASE;
int cyccnt;
/* Make sure timer is disabled */
timer_stop(timer_base);
/* One shot, 32 bits, no irqs */
timer_init_oneshot(timer_base);
/* Load the timer with ticks value */
timer_load(timer_base, load_value);
/* Start the timer */
timer_start(timer_base);
/* Start counter */
perfmon_reset_start_cyccnt();
/* Wait until 0 */
while (timer_read(timer_base) != 0)
;
cyccnt = perfmon_read_cyccnt();
/* Fixed-point accuracy on bottom digit */
temp = cyccnt * 64 * 10 * 13 / 100000;
mhz_top = temp / 10;
mhz_bot = temp - mhz_top * 10;
printk("Perfmon: 0x%x cycle count \n", cyccnt);
printk("%s: %d.%d MHz CPU speed measured by timer REFCLK at 13MHz\n",
__KERNELNAME__, mhz_top, mhz_bot);
}
void platform_test_cpucycles(void)
{
/*
* Variable results:
*
* platform_test_loop_cycles();
* platform_test_loop_ticks();
*/
/* Fixed result */
platform_test_tick_cycles();
}

View File

@@ -0,0 +1,142 @@
/*
* Beagle Board platform-specific initialisation and setup
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/space.h>
#include <l4/generic/irq.h>
#include <l4/generic/bootmem.h>
#include INC_ARCH(linker.h)
#include INC_SUBARCH(mm.h)
#include INC_GLUE(mapping.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(uart.h)
#include INC_PLAT(timer.h)
#include INC_PLAT(irq.h)
#include INC_ARCH(asm.h)
#include INC_PLAT(cm.h)
/*
* The devices that are used by the kernel are mapped
* independent of these capabilities, but these provide a
* concise description of what is used by the kernel.
*/
int platform_setup_device_caps(struct kernel_resources *kres)
{
struct capability *timer[1];
/* Setup timer1 capability as free */
timer[0] = alloc_bootmem(sizeof(*timer[0]), 0);
timer[0]->start = __pfn(PLATFORM_TIMER1_BASE);
timer[0]->end = timer[0]->start + 1;
timer[0]->size = timer[0]->end - timer[0]->start;
cap_set_devtype(timer[0], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[0], 1);
link_init(&timer[0]->list);
cap_list_insert(timer[0], &kres->devmem_free);
return 0;
}
/*
* Use UART2 for kernel as well as user tasks,
* so map it to kernel and user space, this only
* is provided by beagle board
*/
void init_platform_console(void)
{
add_boot_mapping(PLATFORM_UART2_BASE, PLATFORM_CONSOLE_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
add_boot_mapping(PLATFORM_PERCM_BASE, PLATFORM_PERCM_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
/*
* Map same UART IO area to userspace so that primitive uart-based
* userspace printf can work. Note, this raw mapping is to be
* removed in the future, when file-based io is implemented.
*/
add_boot_mapping(PLATFORM_UART2_BASE, USERSPACE_CONSOLE_VBASE,
PAGE_SIZE, MAP_USR_IO);
/* use 32KHz clock signal */
omap_cm_clk_select(PLATFORM_PERCM_VBASE, 11,
OMAP_TIMER_CLKSRC_SYS_CLK);
/* Enable Interface and Functional clock */
omap_cm_enable_iclk(PLATFORM_PERCM_VBASE, 11);
omap_cm_enable_fclk(PLATFORM_PERCM_VBASE, 11);
uart_init(PLATFORM_CONSOLE_VBASE);
}
void platform_timer_start(void)
{
/* Enable irq line for TIMER0 */
irq_enable(IRQ_TIMER0);
/* Enable timer */
timer_start(PLATFORM_TIMER0_VBASE);
}
/*
* We are using GPTIMER1 only, so we map GPTIMER1 base,
* incase any other timer is needed we need to map it
* to userspace or kernel space as needed
*/
void init_platform_timer(void)
{
add_boot_mapping(PLATFORM_TIMER0_BASE, PLATFORM_TIMER0_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
add_boot_mapping(PLATFORM_WKUP_CM_BASE, PLATFORM_WKUP_CM_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
#if 0
/* use 32KHz clock signal */
omap_cm_clk_select(PLATFORM_WKUP_CM_VBASE, 0,
OMAP_TIMER_CLKSRC_32KHZ_CLK);
#else
/*
* Assumption: Beagle board RevC manual says,
* it has 26MHz oscillator present, so we are
* assuming this oscillator is our system clock
*/
omap_cm_clk_select(PLATFORM_WKUP_CM_VBASE, 0,
OMAP_TIMER_CLKSRC_SYS_CLK);
#endif
/* Enable Interface and Functional clock */
omap_cm_enable_iclk(PLATFORM_WKUP_CM_VBASE, 0);
omap_cm_enable_fclk(PLATFORM_WKUP_CM_VBASE, 0);
timer_init(PLATFORM_TIMER0_VBASE);
}
void init_platform_irq_controller()
{
add_boot_mapping(PLATFORM_INTC_BASE, PLATFORM_INTC_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
irq_controllers_init();
}
void init_platform_devices()
{
/* Add userspace devices here as you develop their irq handlers */
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
}
void platform_init(void)
{
init_platform_console();
init_platform_timer();
init_platform_irq_controller();
init_platform_devices();
}

View File

@@ -0,0 +1,75 @@
#include INC_PLAT(uart.h)
#include INC_PLAT(offsets.h)
#include INC_ARCH(io.h)
void print_early(char *str)
{
unsigned int reg = 0;
unsigned long uart_base;
/* Check if mmu is on */
__asm__ __volatile__ (
"mrc p15, 0, %0, c1, c0"
: "=r" (reg)
: "r" (reg)
);
/*
* Get uart phys/virt base based on mmu on/off
* Also strings are linked at virtual address, so if
* we are running with mmu off we should translate
* string address to physical
*/
if (reg & 1) {
uart_base = PLATFORM_CONSOLE_VBASE;
}
else {
uart_base = PLATFORM_UART2_BASE;
str = (char *)(((unsigned long)str & ~KERNEL_AREA_START) |
PLATFORM_PHYS_MEM_START);
}
/* call uart tx function */
while (*str != '\0') {
uart_tx_char(uart_base, *str);
if (*str == '\n')
uart_tx_char(uart_base, '\r');
++str;
}
}
void printhex8(unsigned int val)
{
char hexbuf[16];
char *temp = hexbuf + 15;
int v;
/* put end of string */
*(temp--) = '\0';
if (!val) {
*temp = '0';
}
else {
while (val) {
v = val & 0xf;
val = val >> 4;
--temp;
/* convert decimal value to ascii */
if (v >= 10)
v += ('a' - 10);
else
v = v + '0';
*temp = *((char *)&v);
}
}
*(--temp) = 'x';
*(--temp) = '0';
print_early(temp);
}

View File

@@ -1,10 +1,24 @@
# Inherit global environment
Import('env')
import os, sys
PROJRELROOT = '../../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import('env', 'platform', 'symbols')
# The set of source files associated with this SConscript file.
src_local = ['printascii.S','platform.c', 'uart.c', 'timer.c', 'irq.c']
src_local = ['platform.c', 'irq.c']
obj = env.Object(src_local)
# This is arealview platform, include corresponding files.
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'), exports = {'env' : env, 'symbols' : symbols},
duplicate=0, build_dir='realview')
Return('obj')

View File

@@ -3,58 +3,61 @@
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/irq.h>
#include <l4/generic/time.h>
#include <l4/drivers/irq/gic/gic.h>
#include INC_PLAT(irq.h)
#include INC_PLAT(platform.h)
#include INC_ARCH(exception.h)
#include <l4/drivers/irq/pl190/pl190_vic.h>
#include <l4/drivers/timer/sp804/sp804_timer.h>
#include <l4/generic/irq.h>
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX];
#if 0
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
#if defined (CONFIG_CPU_ARM11MPCORE) || defined (CONFIG_CPU_CORTEXA9)
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "Vectored irq controller",
[0] = {
.name = "CoreTile GIC",
.level = 0,
.cascade = IRQ_SIC,
.offset = 0,
.cascade = IRQ_NIL,
.start = 0,
.end = IRQS_MAX,
.data = &gic_data[0],
.ops = {
.init = pl190_vic_init,
.read_irq = pl190_read_irq,
.ack_and_mask = pl190_mask_irq,
.unmask = pl190_unmask_irq,
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq
},
},
},
#if 0
[1] = {
.name = "Secondary irq controller",
.name = "EB GIC",
.level = 1,
.cascade = IRQ_NIL,
.offset = SIRQ_CHIP_OFFSET,
.start = EB_GIC_IRQ_OFFSET,
.end = EB_GIC_IRQ_OFFSET + IRQS_MAX,
.data = &gic_data[1],
.ops = {
.init = pl190_sic_init,
.read_irq = pl190_sic_read_irq,
.ack_and_mask = pl190_sic_mask_irq,
.unmask = pl190_sic_unmask_irq,
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq,
},
},
#endif
};
#else
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "EB GIC",
.level = 0,
.cascade = IRQ_NIL,
.start = 0,
.end = EB_GIC_IRQ_OFFSET + IRQS_MAX,
.data = &gic_data[1],
.ops = {
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq,
},
},
};
#endif
static int platform_timer_handler(void)
{
sp804_irq_handler(PLATFORM_TIMER0_BASE);
return do_timer_irq();
}
/* Built-in irq handlers initialised at compile time.
* Else register with register_irq() */
struct irq_desc irq_desc_array[IRQS_MAX] = {
[IRQ_TIMER01] = {
.name = "Timer01",
.chip = &irq_chip_array[0],
.handler = platform_timer_handler,
},
};

View File

@@ -1,64 +1,102 @@
/*
* EB platform-specific initialisation and setup
*
* Copyright (C) 2007 Bahadir Balban
* Copyright (C) 2009-2010 B Labs Ltd.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*
*/
#include <l4/generic/platform.h>
#include <l4/generic/bootmem.h>
#include INC_PLAT(offsets.h)
#include INC_ARCH(io.h)
#include <l4/generic/space.h>
#include <l4/generic/irq.h>
#include INC_ARCH(linker.h)
#include INC_PLAT(printascii.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_PLAT(offsets.h)
#include <l4/generic/cap-types.h>
#include INC_PLAT(platform.h)
#include INC_PLAT(uart.h)
#include INC_PLAT(irq.h)
#include INC_ARCH(asm.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(smp.h)
void init_platform_console(void)
/*
* The devices that are used by the kernel are mapped
* independent of these capabilities, but these provide a
* concise description of what is used by the kernel.
*/
int platform_setup_device_caps(struct kernel_resources *kres)
{
add_boot_mapping(EB_UART0_BASE, PLATFORM_CONSOLE0_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
struct capability *uart[4], *timer[4];
/*
* Map same UART IO area to userspace so that primitive uart-based
* userspace printf can work. Note, this raw mapping is to be
* removed in the future, when file-based io is implemented.
*/
add_boot_mapping(EB_UART0_BASE, USERSPACE_UART_BASE, PAGE_SIZE,
MAP_USR_IO_FLAGS);
/* Setup capabilities for userspace uarts and timers */
uart[1] = alloc_bootmem(sizeof(*uart[1]), 0);
uart[1]->start = __pfn(PLATFORM_UART1_BASE);
uart[1]->end = uart[1]->start + 1;
uart[1]->size = uart[1]->end - uart[1]->start;
cap_set_devtype(uart[1], CAP_DEVTYPE_UART);
cap_set_devnum(uart[1], 1);
link_init(&uart[1]->list);
cap_list_insert(uart[1], &kres->devmem_free);
uart_init();
}
uart[2] = alloc_bootmem(sizeof(*uart[2]), 0);
uart[2]->start = __pfn(PLATFORM_UART2_BASE);
uart[2]->end = uart[2]->start + 1;
uart[2]->size = uart[2]->end - uart[2]->start;
cap_set_devtype(uart[2], CAP_DEVTYPE_UART);
cap_set_devnum(uart[2], 2);
link_init(&uart[2]->list);
cap_list_insert(uart[2], &kres->devmem_free);
void init_platform_timer(void)
{
add_boot_mapping(EB_TIMER01_BASE, PLATFORM_TIMER0_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(EB_SYSCTRL_BASE, EB_SYSCTRL_VBASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
timer_init();
uart[3] = alloc_bootmem(sizeof(*uart[3]), 0);
uart[3]->start = __pfn(PLATFORM_UART3_BASE);
uart[3]->end = uart[3]->start + 1;
uart[3]->size = uart[3]->end - uart[3]->start;
cap_set_devtype(uart[3], CAP_DEVTYPE_UART);
cap_set_devnum(uart[3], 3);
link_init(&uart[3]->list);
cap_list_insert(uart[3], &kres->devmem_free);
/* Setup timer1 capability as free */
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
timer[1]->end = timer[1]->start + 1;
timer[1]->size = timer[1]->end - timer[1]->start;
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[1], 1);
link_init(&timer[1]->list);
cap_list_insert(timer[1], &kres->devmem_free);
return 0;
}
void init_platform_irq_controller()
{
#if 0
add_boot_mapping(PB926_VIC_BASE, PLATFORM_IRQCTRL_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PB926_SIC_BASE, PLATFORM_SIRQCTRL_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
irq_controllers_init();
unsigned int sysctrl = PLATFORM_SYSCTRL_VBASE;
write(SYSCTRL_UNLOCK, sysctrl + SYS_LOCK);
write(PLD_CTRL1_INTMOD_WITHOUT_DCC, sysctrl + SYS_PLDCTL1);
write(SYSCTRL_LOCK, sysctrl + SYS_LOCK); /* Lock again */
#if defined (CONFIG_CPU_ARM11MPCORE) || defined (CONFIG_CPU_CORTEXA9)
/* TODO: we need to map 64KB ?*/
add_boot_mapping(MPCORE_PRIVATE_BASE, MPCORE_PRIVATE_VBASE,
PAGE_SIZE * 2, MAP_IO_DEFAULT);
gic_dist_init(0, GIC0_DIST_VBASE);
gic_cpu_init(0, GIC0_CPU_VBASE);
#else
add_boot_mapping(PLATFORM_GIC1_BASE, PLATFORM_GIC1_VBASE, PAGE_SIZE*2,
MAP_IO_DEFAULT);
gic_dist_init(1, GIC1_DIST_VBASE);
#endif
#if !defined (CONFIG_CPU_ARM11MPCORE) && !defined (CONFIG_CPU_CORTEXA9)
gic_cpu_init(1, PLATFORM_GIC1_VBASE);
#endif
irq_controllers_init();
}
void platform_init(void)
void init_platform_devices()
{
init_platform_console();
init_platform_timer();
init_platform_irq_controller();
}
}

View File

@@ -4,7 +4,10 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['printascii.S','platform.c', 'uart.c', 'timer.c', 'irq.c']
src_local = ['platform.c', 'irq.c']
obj = env.Object(src_local)
# This is arealview platform, include corresponding files.
obj += SConscript('../realview/SConscript', exports = {'env' : env})
Return('obj')

View File

@@ -9,8 +9,8 @@
#include INC_PLAT(irq.h)
#include INC_PLAT(platform.h)
#include INC_ARCH(exception.h)
#include INC_PLAT(timer.h)
#include <l4/drivers/irq/pl190/pl190_vic.h>
#include <l4/drivers/timer/sp804/sp804_timer.h>
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX];
#if 0
@@ -42,19 +42,3 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
};
#endif
static int platform_timer_handler(void)
{
sp804_irq_handler(PLATFORM_TIMER0_BASE);
return do_timer_irq();
}
/* Built-in irq handlers initialised at compile time.
* Else register with register_irq() */
struct irq_desc irq_desc_array[IRQS_MAX] = {
[IRQ_TIMER01] = {
.name = "Timer01",
.chip = &irq_chip_array[0],
.handler = platform_timer_handler,
},
};

View File

@@ -3,12 +3,10 @@
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/space.h>
#include <l4/generic/irq.h>
#include INC_ARCH(linker.h)
#include INC_PLAT(printascii.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
@@ -19,49 +17,101 @@
#include INC_PLAT(irq.h)
#include INC_ARCH(asm.h)
void init_platform_console(void)
/*
* The devices that are used by the kernel are mapped
* independent of these capabilities, but these provide a
* concise description of what is used by the kernel.
*/
int platform_setup_device_caps(struct kernel_resources *kres)
{
add_boot_mapping(PB11MPCORE_UART0_BASE, PLATFORM_CONSOLE0_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
struct capability *uart[4], *timer[4];
/*
* Map same UART IO area to userspace so that primitive uart-based
* userspace printf can work. Note, this raw mapping is to be
* removed in the future, when file-based io is implemented.
*/
add_boot_mapping(PB11MPCORE_UART0_BASE, USERSPACE_UART_BASE, PAGE_SIZE,
MAP_USR_IO_FLAGS);
/* Setup capabilities for userspace uarts and timers */
uart[1] = alloc_bootmem(sizeof(*uart[1]), 0);
uart[1]->start = __pfn(PLATFORM_UART1_BASE);
uart[1]->end = uart[1]->start + 1;
uart[1]->size = uart[1]->end - uart[1]->start;
cap_set_devtype(uart[1], CAP_DEVTYPE_UART);
cap_set_devnum(uart[1], 1);
link_init(&uart[1]->list);
cap_list_insert(uart[1], &kres->devmem_free);
uart_init();
}
uart[2] = alloc_bootmem(sizeof(*uart[2]), 0);
uart[2]->start = __pfn(PLATFORM_UART2_BASE);
uart[2]->end = uart[2]->start + 1;
uart[2]->size = uart[2]->end - uart[2]->start;
cap_set_devtype(uart[2], CAP_DEVTYPE_UART);
cap_set_devnum(uart[2], 2);
link_init(&uart[2]->list);
cap_list_insert(uart[2], &kres->devmem_free);
void init_platform_timer(void)
{
add_boot_mapping(PB11MPCORE_TIMER01_BASE, PLATFORM_TIMER0_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PB11MPCORE_SYSCTRL0_BASE, PB11MPCORE_SYSCTRL0_VBASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
/* TODO: SYSCTRL1 mapping may be needed */
add_boot_mapping(PB11MPCORE_SYSCTRL1_BASE, PB11MPCORE_SYSCTRL1_VBASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
timer_init();
uart[3] = alloc_bootmem(sizeof(*uart[3]), 0);
uart[3]->start = __pfn(PLATFORM_UART3_BASE);
uart[3]->end = uart[3]->start + 1;
uart[3]->size = uart[3]->end - uart[3]->start;
cap_set_devtype(uart[3], CAP_DEVTYPE_UART);
cap_set_devnum(uart[3], 3);
link_init(&uart[3]->list);
cap_list_insert(uart[3], &kres->devmem_free);
/* Setup timer1 capability as free */
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
timer[1]->end = timer[1]->start + 1;
timer[1]->size = timer[1]->end - timer[1]->start;
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[1], 1);
link_init(&timer[1]->list);
cap_list_insert(timer[1], &kres->devmem_free);
/* Setup timer2 capability as free */
timer[2] = alloc_bootmem(sizeof(*timer[2]), 0);
timer[2]->start = __pfn(PLATFORM_TIMER2_BASE);
timer[2]->end = timer[2]->start + 1;
timer[2]->size = timer[2]->end - timer[2]->start;
cap_set_devtype(timer[2], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[2], 1);
link_init(&timer[2]->list);
cap_list_insert(timer[2], &kres->devmem_free);
/* Setup timer3 capability as free */
timer[3] = alloc_bootmem(sizeof(*timer[3]), 0);
timer[3]->start = __pfn(PLATFORM_TIMER3_BASE);
timer[3]->end = timer[3]->start + 1;
timer[3]->size = timer[3]->end - timer[3]->start;
cap_set_devtype(timer[3], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[3], 1);
link_init(&timer[3]->list);
cap_list_insert(timer[3], &kres->devmem_free);
return 0;
}
void init_platform_irq_controller()
{
#if 0
add_boot_mapping(PB926_VIC_BASE, PLATFORM_IRQCTRL_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
MAP_IO_DEFAULT);
add_boot_mapping(PB926_SIC_BASE, PLATFORM_SIRQCTRL_BASE, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
MAP_IO_DEFAULT);
irq_controllers_init();
#endif
}
void platform_init(void)
void init_platform_devices()
{
init_platform_console();
init_platform_timer();
init_platform_irq_controller();
/* We need SYSCTRL1 for initializing TIMER2 and 3 */
add_boot_mapping(PLATFORM_SYSCTRL1_BASE, PLATFORM_SYSCTRL1_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
/* Add userspace devices here as you develop their irq handlers */
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
add_boot_mapping(PLATFORM_TIMER2_BASE, PLATFORM_TIMER2_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
add_boot_mapping(PLATFORM_TIMER3_BASE, PLATFORM_TIMER3_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
}

View File

@@ -4,7 +4,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['printascii.S','platform.c', 'uart.c', 'timer.c', 'irq.c']
src_local = ['print-early.c','platform.c', 'irq.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -8,9 +8,9 @@
#include <l4/generic/time.h>
#include INC_PLAT(irq.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(timer.h)
#include INC_ARCH(exception.h)
#include <l4/drivers/irq/pl190/pl190_vic.h>
#include <l4/drivers/timer/sp804/sp804_timer.h>
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
@@ -47,7 +47,7 @@ static int platform_timer_handler(struct irq_desc *desc)
* Microkernel is using just TIMER0,
* so we call handler with TIMER01 index
*/
sp804_irq_handler(PLATFORM_TIMER0_VIRTUAL);
timer_irq_clear(PLATFORM_TIMER0_VBASE);
return do_timer_irq();
}
@@ -57,7 +57,7 @@ static int platform_timer_handler(struct irq_desc *desc)
static int platform_timer_user_handler(struct irq_desc *desc)
{
/* Ack the device irq */
sp804_irq_handler(PLATFORM_TIMER1_VIRTUAL);
timer_irq_clear(PLATFORM_TIMER1_VBASE);
/* Notify the userspace */
irq_thread_notify(desc);

View File

@@ -8,16 +8,20 @@
#include <l4/generic/irq.h>
#include <l4/generic/bootmem.h>
#include INC_ARCH(linker.h)
#include INC_PLAT(printascii.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(mapping.h)
#include INC_GLUE(memlayout.h)
#include INC_PLAT(offsets.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(uart.h)
#include INC_PLAT(timer.h)
#include INC_PLAT(irq.h)
#include INC_ARCH(asm.h)
#include INC_ARCH(io.h)
#include <l4/generic/capability.h>
#include <l4/generic/cap-types.h>
/*
* The devices that are used by the kernel are mapped
@@ -26,11 +30,11 @@
*/
int platform_setup_device_caps(struct kernel_resources *kres)
{
struct capability *uart[4], *timer[4], *clcd[1];
struct capability *uart[4], *timer[2];
/* Setup capabilities for userspace uarts and timers */
uart[1] = alloc_bootmem(sizeof(*uart[1]), 0);
uart[1]->start = __pfn(PB926_UART1_BASE);
uart[1]->start = __pfn(PLATFORM_UART1_BASE);
uart[1]->end = uart[1]->start + 1;
uart[1]->size = uart[1]->end - uart[1]->start;
cap_set_devtype(uart[1], CAP_DEVTYPE_UART);
@@ -39,7 +43,7 @@ int platform_setup_device_caps(struct kernel_resources *kres)
cap_list_insert(uart[1], &kres->devmem_free);
uart[2] = alloc_bootmem(sizeof(*uart[2]), 0);
uart[2]->start = __pfn(PB926_UART2_BASE);
uart[2]->start = __pfn(PLATFORM_UART2_BASE);
uart[2]->end = uart[2]->start + 1;
uart[2]->size = uart[2]->end - uart[2]->start;
cap_set_devtype(uart[2], CAP_DEVTYPE_UART);
@@ -48,7 +52,7 @@ int platform_setup_device_caps(struct kernel_resources *kres)
cap_list_insert(uart[2], &kres->devmem_free);
uart[3] = alloc_bootmem(sizeof(*uart[3]), 0);
uart[3]->start = __pfn(PB926_UART3_BASE);
uart[3]->start = __pfn(PLATFORM_UART3_BASE);
uart[3]->end = uart[3]->start + 1;
uart[3]->size = uart[3]->end - uart[3]->start;
cap_set_devtype(uart[3], CAP_DEVTYPE_UART);
@@ -58,7 +62,7 @@ int platform_setup_device_caps(struct kernel_resources *kres)
/* Setup timer1 capability as free */
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
timer[1]->start = __pfn(PB926_TIMER23_BASE);
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
timer[1]->end = timer[1]->start + 1;
timer[1]->size = timer[1]->end - timer[1]->start;
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
@@ -66,34 +70,27 @@ int platform_setup_device_caps(struct kernel_resources *kres)
link_init(&timer[1]->list);
cap_list_insert(timer[1], &kres->devmem_free);
/* Setup clcd capability as free */
clcd[0] = alloc_bootmem(sizeof(*clcd[0]), 0);
clcd[0]->start = __pfn(PB926_CLCD_BASE);
clcd[0]->end = clcd[0]->start + 1;
clcd[0]->size = clcd[0]->end - clcd[0]->start;
cap_set_devtype(clcd[0], CAP_DEVTYPE_CLCD);
cap_set_devnum(clcd[0], 0);
link_init(&clcd[0]->list);
cap_list_insert(clcd[0], &kres->devmem_free);
return 0;
}
/* We will use UART0 for kernel as well as user tasks, so map it to kernel and user space */
/*
* We will use UART0 for kernel as well as user tasks,
* so map it to kernel and user space
*/
void init_platform_console(void)
{
add_boot_mapping(PB926_UART0_BASE, PLATFORM_CONSOLE_VIRTUAL, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PLATFORM_UART0_BASE, PLATFORM_CONSOLE_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
/*
* Map same UART IO area to userspace so that primitive uart-based
* userspace printf can work. Note, this raw mapping is to be
* removed in the future, when file-based io is implemented.
*/
add_boot_mapping(PB926_UART0_BASE, USERSPACE_CONSOLE_VIRTUAL, PAGE_SIZE,
MAP_USR_IO_FLAGS);
add_boot_mapping(PLATFORM_UART0_BASE, USERSPACE_CONSOLE_VBASE,
PAGE_SIZE, MAP_USR_IO);
uart_init();
uart_init(PLATFORM_CONSOLE_VBASE);
}
/*
@@ -103,33 +100,62 @@ void init_platform_console(void)
*/
void init_platform_timer(void)
{
add_boot_mapping(PB926_TIMER01_BASE, PLATFORM_TIMER0_VIRTUAL, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PLATFORM_TIMER0_BASE, PLATFORM_TIMER0_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
add_boot_mapping(PB926_SYSCTRL_BASE, PLATFORM_SYSCTRL_VIRTUAL, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
timer_init();
/* 1 Mhz means can tick up to 1,000,000 times a second */
timer_init(PLATFORM_TIMER0_VBASE, 1000000 / CONFIG_SCHED_TICKS);
}
void init_platform_irq_controller()
{
add_boot_mapping(PB926_VIC_BASE, PLATFORM_IRQCTRL0_VIRTUAL, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PB926_SIC_BASE, PLATFORM_IRQCTRL1_VIRTUAL, PAGE_SIZE,
MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PLATFORM_VIC_BASE, PLATFORM_IRQCTRL0_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
add_boot_mapping(PLATFORM_SIC_BASE, PLATFORM_IRQCTRL1_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
irq_controllers_init();
}
void init_platform_devices()
{
/* Add userspace devices here as you develop their irq handlers */
add_boot_mapping(PB926_TIMER23_BASE, PLATFORM_TIMER1_VIRTUAL,
PAGE_SIZE, MAP_IO_DEFAULT_FLAGS);
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
}
/* If these bits are off, 32Khz OSC source is used */
#define TIMER3_SCTRL_1MHZ (1 << 21)
#define TIMER2_SCTRL_1MHZ (1 << 19)
#define TIMER1_SCTRL_1MHZ (1 << 17)
#define TIMER0_SCTRL_1MHZ (1 << 15)
/* Set all timers to use 1Mhz OSC clock */
void init_timer_osc(void)
{
volatile u32 reg;
add_boot_mapping(PLATFORM_SYSCTRL_BASE, PLATFORM_SYSCTRL_VBASE,
PAGE_SIZE, MAP_IO_DEFAULT);
reg = read(SP810_SCCTRL);
write(reg | TIMER0_SCTRL_1MHZ | TIMER1_SCTRL_1MHZ
| TIMER2_SCTRL_1MHZ | TIMER3_SCTRL_1MHZ,
SP810_SCCTRL);
}
void platform_timer_start(void)
{
/* Enable irq line for TIMER0 */
irq_enable(IRQ_TIMER0);
/* Enable timer */
timer_start(PLATFORM_TIMER0_VBASE);
}
void platform_init(void)
{
init_timer_osc();
init_platform_console();
init_platform_timer();
init_platform_irq_controller();

View File

@@ -0,0 +1,75 @@
#include INC_PLAT(uart.h)
#include INC_PLAT(offsets.h)
#include INC_ARCH(io.h)
void print_early(char *str)
{
unsigned int reg = 0;
unsigned long uart_base;
/* Check if mmu is on */
__asm__ __volatile__ (
"mrc p15, 0, %0, c1, c0"
: "=r" (reg)
: "r" (reg)
);
/*
* Get uart phys/virt base based on mmu on/off
* Also strings are linked at virtual address, so if
* we are running with mmu off we should translate
* string address to physical
*/
if (reg & 1) {
uart_base = PLATFORM_CONSOLE_VBASE;
}
else {
uart_base = PLATFORM_UART0_BASE;
str = (char *)(((unsigned long)str & ~KERNEL_AREA_START) |
PLATFORM_PHYS_MEM_START);
}
/* call uart tx function */
while (*str != '\0') {
uart_tx_char(uart_base, *str);
if (*str == '\n')
uart_tx_char(uart_base, '\r');
++str;
}
}
void printhex8(unsigned int val)
{
char hexbuf[16];
char *temp = hexbuf + 15;
int v;
/* put end of string */
*(temp--) = '\0';
if (!val) {
*temp = '0';
}
else {
while (val) {
v = val & 0xf;
val = val >> 4;
--temp;
/* convert decimal value to ascii */
if (v >= 10)
v += ('a' - 10);
else
v = v + '0';
*temp = *((char *)&v);
}
}
*(--temp) = 'x';
*(--temp) = '0';
print_early(temp);
}

View File

@@ -0,0 +1,24 @@
# Inherit global environment
import os, sys
PROJRELROOT = '../../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import('env', 'platform', 'symbols')
# The set of source files associated with this SConscript file.
src_local = ['platform.c', 'irq.c']
obj = env.Object(src_local)
# This is arealview platform, include corresponding files.
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'), exports = {'env' : env, 'symbols' : symbols},
duplicate=0, build_dir='realview')
Return('obj')

65
src/platform/pba8/irq.c Normal file
View File

@@ -0,0 +1,65 @@
/*
* Support for generic irq handling using platform irq controller (PL190)
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/drivers/irq/gic/gic.h>
#include INC_PLAT(irq.h)
#include <l4/generic/irq.h>
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX];
#if 0
#ifdef CONFIG_CPU_ARM11MPCORE
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "CoreTile GIC",
.level = 0,
.cascade = MPCORE_GIC_IRQ_EB_GIC1,
.start = 0,
.end = IRQS_MAX,
.data = &gic_data[0],
.ops = {
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq
},
},
[1] = {
.name = "EB GIC",
.level = 1,
.cascade = IRQ_NIL,
.start = EB_GIC_IRQ_OFFSET,
.end = EB_GIC_IRQ_OFFSET + IRQS_MAX,
.data = &gic_data[1],
.ops = {
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq,
},
},
};
#else
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "EB GIC",
.level = 0,
.cascade = IRQ_NIL,
.start = 0,
.end = EB_GIC_IRQ_OFFSET + IRQS_MAX,
.data = &gic_data[1],
.ops = {
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq,
},
},
};
#endif
#endif

View File

@@ -0,0 +1,97 @@
/*
* EB platform-specific initialisation and setup
*
* Copyright (C) 2009 B Labs Ltd.
*/
#include <l4/generic/platform.h>
#include <l4/generic/bootmem.h>
#include INC_PLAT(offsets.h)
#include INC_ARCH(io.h)
#include <l4/generic/space.h>
#include <l4/generic/irq.h>
#include <l4/generic/cap-types.h>
#include INC_PLAT(platform.h)
#include INC_PLAT(irq.h)
#include INC_GLUE(mapping.h)
/*
* The devices that are used by the kernel are mapped
* independent of these capabilities, but these provide a
* concise description of what is used by the kernel.
*/
int platform_setup_device_caps(struct kernel_resources *kres)
{
#if 0
struct capability *uart[4], *timer[4];
/* Setup capabilities for userspace uarts and timers */
uart[1] = alloc_bootmem(sizeof(*uart[1]), 0);
uart[1]->start = __pfn(PLATFORM_UART1_BASE);
uart[1]->end = uart[1]->start + 1;
uart[1]->size = uart[1]->end - uart[1]->start;
cap_set_devtype(uart[1], CAP_DEVTYPE_UART);
cap_set_devnum(uart[1], 1);
link_init(&uart[1]->list);
cap_list_insert(uart[1], &kres->devmem_free);
uart[2] = alloc_bootmem(sizeof(*uart[2]), 0);
uart[2]->start = __pfn(PLATFORM_UART2_BASE);
uart[2]->end = uart[2]->start + 1;
uart[2]->size = uart[2]->end - uart[2]->start;
cap_set_devtype(uart[2], CAP_DEVTYPE_UART);
cap_set_devnum(uart[2], 2);
link_init(&uart[2]->list);
cap_list_insert(uart[2], &kres->devmem_free);
uart[3] = alloc_bootmem(sizeof(*uart[3]), 0);
uart[3]->start = __pfn(PLATFORM_UART3_BASE);
uart[3]->end = uart[3]->start + 1;
uart[3]->size = uart[3]->end - uart[3]->start;
cap_set_devtype(uart[3], CAP_DEVTYPE_UART);
cap_set_devnum(uart[3], 3);
link_init(&uart[3]->list);
cap_list_insert(uart[3], &kres->devmem_free);
/* Setup timer1 capability as free */
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
timer[1]->end = timer[1]->start + 1;
timer[1]->size = timer[1]->end - timer[1]->start;
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[1], 1);
link_init(&timer[1]->list);
cap_list_insert(timer[1], &kres->devmem_free);
#endif
return 0;
}
void init_platform_irq_controller()
{
#if 0
unsigned int sysctrl = PLATFORM_SYSCTRL_VBASE;
write(SYSCTRL_UNLOCK, sysctrl + SYS_LOCK);
write(PLD_CTRL1_INTMOD_WITHOUT_DCC, sysctrl+SYS_PLDCTL1);
write(SYSCTRL_LOCK, sysctrl + SYS_LOCK); /* Lock again */
#ifdef CONFIG_CPU_ARM11MPCORE
/* TODO: we need to map 64KB ?*/
add_boot_mapping(ARM11MP_PRIVATE_MEM_BASE, EB_MPCORE_PRIV_MEM_VBASE,
PAGE_SIZE*2, MAP_IO_DEFAULT);
gic_dist_init(0, GIC0_DIST_VBASE);
gic_cpu_init(0, GIC0_CPU_VBASE);
#endif
add_boot_mapping(PLATFORM_GIC1_BASE, PLATFORM_GIC1_VBASE, PAGE_SIZE*2,
MAP_IO_DEFAULT);
gic_dist_init(1, GIC1_DIST_VBASE);
gic_cpu_init(1, PLATFORM_GIC1_VBASE);
irq_controllers_init();
#endif
}
void init_platform_devices()
{
}

View File

@@ -0,0 +1,24 @@
# Inherit global environment
import os, sys
PROJRELROOT = '../../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import('env', 'platform', 'symbols')
# The set of source files associated with this SConscript file.
src_local = ['platform.c', 'irq.c']
obj = env.Object(src_local)
# This is arealview platform, include corresponding files.
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'), exports = {'env' : env, 'symbols' : symbols},
duplicate=0, build_dir='realview')
Return('obj')

32
src/platform/pba9/irq.c Normal file
View File

@@ -0,0 +1,32 @@
/*
* Platform generic irq handling
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/irq.h>
#include <l4/generic/time.h>
#include INC_PLAT(irq.h)
#include INC_PLAT(platform.h)
#include INC_ARCH(exception.h)
#include <l4/drivers/irq/gic/gic.h>
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
[0] = {
.name = "VX-A9 GIC",
.level = 0,
.cascade = IRQ_NIL,
.start = 0,
.end = IRQ_OFFSET + IRQS_MAX,
.data = &gic_data[0],
.ops = {
.init = gic_dummy_init,
.read_irq = gic_read_irq,
.ack_and_mask = gic_ack_and_mask,
.unmask = gic_unmask_irq,
},
},
};

View File

@@ -0,0 +1,58 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/generic/platform.h>
#include <l4/generic/space.h>
#include <l4/generic/irq.h>
#include <l4/generic/bootmem.h>
#include INC_ARCH(linker.h)
#include INC_SUBARCH(mm.h)
#include INC_SUBARCH(mmu_ops.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_PLAT(offsets.h)
#include INC_PLAT(platform.h)
#include INC_PLAT(irq.h)
#include INC_ARCH(asm.h)
#include INC_GLUE(mapping.h)
#include <l4/generic/capability.h>
#include <l4/generic/cap-types.h>
#include <l4/drivers/irq/gic/gic.h>
/*
* The devices that are used by the kernel are mapped
* independent of these capabilities, but these provide a
* concise description of what is used by the kernel.
*/
int platform_setup_device_caps(struct kernel_resources *kres)
{
struct capability *timer[2];
/* Setup timer1 capability as free */
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
timer[1]->end = timer[1]->start + 1;
timer[1]->size = timer[1]->end - timer[1]->start;
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
cap_set_devnum(timer[1], 1);
link_init(&timer[1]->list);
cap_list_insert(timer[1], &kres->devmem_free);
return 0;
}
void init_platform_irq_controller()
{
/* TODO: we need to map 64KB ?*/
add_boot_mapping(MPCORE_PRIVATE_BASE, MPCORE_PRIVATE_VBASE,
PAGE_SIZE * 2, MAP_IO_DEFAULT);
gic_dist_init(0, GIC0_DIST_VBASE);
gic_cpu_init(0, GIC0_CPU_VBASE);
irq_controllers_init();
}
void init_platform_devices()
{
}

View File

@@ -0,0 +1,22 @@
# Inherit global environment
import os, sys, glob
PROJRELROOT = '../../'
sys.path.append(PROJRELROOT)
from config.projpaths import *
from configure import *
Import('env', 'symbols')
# The set of source files associated with this SConscript file.
src_local = ['irq.c', 'platform.c', 'print-early.c', 'perfmon.c', 'cpuperf.S']
for name, val in symbols:
if 'CONFIG_SMP' == name:
src_local += ['smp.c']
obj = env.Object(src_local)
Return('obj')

Some files were not shown because too many files have changed in this diff Show More