mirror of
https://github.com/drasko/codezero.git
synced 2026-01-18 22:03:16 +01:00
Initial commit
This commit is contained in:
10
src/glue/arm/SConscript
Normal file
10
src/glue/arm/SConscript
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['init.c', 'memory.c', 'systable.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
396
src/glue/arm/init.c
Normal file
396
src/glue/arm/init.c
Normal file
@@ -0,0 +1,396 @@
|
||||
/*
|
||||
* Main initialisation code for the ARM kernel
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/generic/platform.h>
|
||||
#include <l4/generic/physmem.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_ARCH(bootdesc.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(utcb.h)
|
||||
#include INC_GLUE(syscall.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_PLAT(printascii.h)
|
||||
#include INC_API(syscall.h)
|
||||
#include INC_API(kip.h)
|
||||
|
||||
unsigned int kernel_mapping_end;
|
||||
|
||||
void init_locks(void)
|
||||
{
|
||||
}
|
||||
|
||||
/* Maps the early memory regions needed to bootstrap the system */
|
||||
void init_kernel_mappings(void)
|
||||
{
|
||||
init_clear_ptab();
|
||||
|
||||
/* Map kernel area to its virtual region */
|
||||
add_section_mapping_init(virt_to_phys(_start_text),
|
||||
(unsigned int)_start_text, 1,
|
||||
cacheable | bufferable);
|
||||
|
||||
/* Map kernel one-to-one to its physical region */
|
||||
add_section_mapping_init(virt_to_phys(_start_text),
|
||||
virt_to_phys(_start_text),
|
||||
1, 0);
|
||||
|
||||
/* Map page table to its virtual region */
|
||||
add_section_mapping_init(virt_to_phys(_start_kspace),
|
||||
(unsigned int)_start_kspace,
|
||||
1, 0);
|
||||
|
||||
/* Clean current before first time access. */
|
||||
memset(current, 0, sizeof(struct ktcb));
|
||||
|
||||
/*
|
||||
* Setup a dummy current ktcb over the bootstack, so that generic
|
||||
* mapping functions can use this as the pgd source.
|
||||
*/
|
||||
current->pgd = &kspace;
|
||||
}
|
||||
|
||||
void print_sections(void)
|
||||
{
|
||||
dprintk("_start_kernel: ",(unsigned int)_start_kernel);
|
||||
dprintk("_start_text: ",(unsigned int)_start_text);
|
||||
dprintk("_end_text: ", (unsigned int)_end_text);
|
||||
dprintk("_start_data: ", (unsigned int)_start_data);
|
||||
dprintk("_end_data: ", (unsigned int)_end_data);
|
||||
dprintk("_start_vectors: ",(unsigned int)_start_vectors);
|
||||
dprintk("arm_high_vector: ",(unsigned int)arm_high_vector);
|
||||
dprintk("_end_vectors: ",(unsigned int)_end_vectors);
|
||||
dprintk("_start_kip: ", (unsigned int) _start_kip);
|
||||
dprintk("_end_kip: ", (unsigned int) _end_kip);
|
||||
dprintk("_bootstack: ", (unsigned int)_bootstack);
|
||||
dprintk("_end_kernel: ", (unsigned int)_end_kernel);
|
||||
dprintk("_start_kspace: ", (unsigned int)_start_kspace);
|
||||
dprintk("_start_pmd: ", (unsigned int)_start_pmd);
|
||||
dprintk("_end_pmd: ", (unsigned int)_end_pmd);
|
||||
dprintk("_end_kspace: ", (unsigned int)_end_kspace);
|
||||
dprintk("_end: ", (unsigned int)_end);
|
||||
}
|
||||
|
||||
/* Enable virtual memory using kernel's first level table
|
||||
* and continue execution on virtual addresses.*/
|
||||
void start_vm()
|
||||
{
|
||||
/*
|
||||
* TTB must be 16K aligned. This is because first level tables are
|
||||
* sized 16K.
|
||||
*/
|
||||
if ((unsigned int)&kspace & 0x3FFF)
|
||||
dprintk("kspace not properly aligned for ttb:",
|
||||
(u32)&kspace);
|
||||
memset((void *)&kspace, 0, sizeof(pgd_table_t));
|
||||
arm_set_ttb(virt_to_phys(&kspace));
|
||||
|
||||
/*
|
||||
* This sets all 16 domains to zero and domain 0 to 1. The outcome
|
||||
* is that page table access permissions are in effect for domain 0.
|
||||
* All other domains have no access whatsoever.
|
||||
*/
|
||||
arm_set_domain(1);
|
||||
|
||||
/* Enable everything before mmu permissions are in place */
|
||||
arm_enable_caches();
|
||||
arm_enable_wbuffer();
|
||||
|
||||
/*
|
||||
* Leave the past behind. Tlbs are invalidated, write buffer is drained.
|
||||
* The whole of I + D caches are invalidated unconditionally. This is
|
||||
* important to ensure that the cache is free of previously loaded
|
||||
* values. Otherwise unpredictable data aborts may occur at arbitrary
|
||||
* times, each time a load/store operation hits one of the invalid
|
||||
* entries and those entries are cleaned to main memory.
|
||||
*/
|
||||
arm_invalidate_cache();
|
||||
arm_drain_writebuffer();
|
||||
arm_invalidate_tlb();
|
||||
arm_enable_mmu();
|
||||
|
||||
/* Jump to virtual memory addresses */
|
||||
__asm__ __volatile__ (
|
||||
"add sp, sp, %0 \n" /* Update stack pointer */
|
||||
#ifndef __OPTIMIZED_FP__ /* If fp not optimised away */
|
||||
"add fp, fp, %0 \n" /* Update frame pointer */
|
||||
#endif
|
||||
/* On the next instruction below, r0 gets
|
||||
* current PC + KOFFSET + 2 instructions after itself. */
|
||||
"add r0, pc, %0 \n"
|
||||
/* Special symbol that is extracted and included in the loader.
|
||||
* Debuggers can break on it to load the virtual symbol table */
|
||||
".global bkpt_phys_to_virt;\n"
|
||||
"bkpt_phys_to_virt:\n"
|
||||
"mov pc, r0 \n" /* (r0 has next instruction) */
|
||||
:
|
||||
: "r" (KERNEL_OFFSET)
|
||||
: "r0"
|
||||
);
|
||||
/* At this point, execution is on virtual addresses. */
|
||||
remove_section_mapping(virt_to_phys(_start_kernel));
|
||||
|
||||
/*
|
||||
* Restore link register (LR) for this function.
|
||||
*
|
||||
* NOTE: LR values are pushed onto the stack at each function call,
|
||||
* which means the restored return values will be physical for all
|
||||
* functions in the call stack except this function. So the caller
|
||||
* of this function must never return but initiate scheduling etc.
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
"add %0, %0, %1 \n"
|
||||
"mov pc, %0 \n"
|
||||
:: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
|
||||
);
|
||||
while(1);
|
||||
}
|
||||
|
||||
/* This calculates what address the kip field would have in userspace. */
|
||||
#define KIP_USR_OFFSETOF(kip, field) ((void *)(((unsigned long)&kip.field - \
|
||||
(unsigned long)&kip) + USER_KIP_PAGE))
|
||||
|
||||
/* The kip is non-standard, using 0xBB to indicate mine for now ;-) */
|
||||
void kip_init()
|
||||
{
|
||||
struct utcb **utcb_ref;
|
||||
|
||||
memset(&kip, 0, PAGE_SIZE);
|
||||
memcpy(&kip, "L4\230K", 4); /* Name field = l4uK */
|
||||
kip.api_version = 0xBB;
|
||||
kip.api_subversion = 1;
|
||||
kip.api_flags = 0; /* LE, 32-bit architecture */
|
||||
kip.kdesc.subid = 0x1;
|
||||
kip.kdesc.id = 0xBB;
|
||||
kip.kdesc.gendate = (__YEAR__ << 9)|(__MONTH__ << 5)|(__DAY__);
|
||||
kip.kdesc.subsubver = 0x00000001; /* Consider as .00000001 */
|
||||
kip.kdesc.ver = 0;
|
||||
memcpy(&kip.kdesc.supplier, "BBB", 3);
|
||||
|
||||
kip_init_syscalls();
|
||||
|
||||
/* KIP + 0xFF0 is pointer to UTCB area for this thread group. */
|
||||
utcb_ref = (struct utcb **)((unsigned long)&kip + UTCB_KIP_OFFSET);
|
||||
|
||||
/* All thread groups have their utcb mapped at UTCB_AREA_START */
|
||||
*utcb_ref = (struct utcb *)UTCB_AREA_START;
|
||||
|
||||
add_mapping(virt_to_phys(&kip), USER_KIP_PAGE, PAGE_SIZE,
|
||||
MAP_USR_RO_FLAGS);
|
||||
}
|
||||
|
||||
|
||||
void vectors_init()
|
||||
{
|
||||
unsigned int size = ((u32)_end_vectors - (u32)arm_high_vector);
|
||||
|
||||
/* Map the vectors in high vector page */
|
||||
add_mapping(virt_to_phys(arm_high_vector),
|
||||
ARM_HIGH_VECTOR, size, 0);
|
||||
arm_enable_high_vectors();
|
||||
|
||||
/* Kernel memory trapping is enabled at this point. */
|
||||
}
|
||||
|
||||
void abort()
|
||||
{
|
||||
printk("Aborting on purpose to halt system.\n");
|
||||
#if 0
|
||||
/* Prefetch abort */
|
||||
__asm__ __volatile__ (
|
||||
"mov pc, #0x0\n"
|
||||
::
|
||||
);
|
||||
#endif
|
||||
/* Data abort */
|
||||
__asm__ __volatile__ (
|
||||
"mov r0, #0 \n"
|
||||
"ldr r0, [r0] \n"
|
||||
::
|
||||
);
|
||||
}
|
||||
|
||||
void jump(struct ktcb *task)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mov lr, %0\n" /* Load pointer to context area */
|
||||
"ldr r0, [lr]\n" /* Load spsr value to r0 */
|
||||
"msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
|
||||
"ldmib lr, {r0-r14}^\n" /* Load all USR registers */
|
||||
|
||||
"nop \n" /* Spec says dont touch banked registers
|
||||
* right after LDM {no-pc}^ for one instruction */
|
||||
"add lr, lr, #64\n" /* Manually move to PC location. */
|
||||
"ldr lr, [lr]\n" /* Load the PC_USR to LR */
|
||||
"movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
|
||||
:
|
||||
: "r" (task)
|
||||
);
|
||||
}
|
||||
|
||||
void switch_to_user(struct ktcb *task)
|
||||
{
|
||||
arm_clean_invalidate_cache();
|
||||
arm_invalidate_tlb();
|
||||
arm_set_ttb(virt_to_phys(task->pgd));
|
||||
arm_invalidate_tlb();
|
||||
jump(task);
|
||||
}
|
||||
|
||||
void init_inittask(char *name, struct task_ids *ids)
|
||||
{
|
||||
struct svc_image *taskimg;
|
||||
struct ktcb *task;
|
||||
int task_pages;
|
||||
|
||||
/*
|
||||
* NOTE: Inittask uses the kernel bootstack as its PAGE_SIZE'd kernel
|
||||
* stack. There is no problem with this as the inittask always exists.
|
||||
* This also solves the problem of freeing the bootstack and making use
|
||||
* of the initial kspace pgd.
|
||||
*/
|
||||
if (!strcmp(name, "mm0"))
|
||||
task = current; /* mm0 is the mockup current during init */
|
||||
else
|
||||
task = (struct ktcb *)zalloc_page();
|
||||
|
||||
/*
|
||||
* Search the compile-time generated boot descriptor for information on
|
||||
* available task images.
|
||||
*/
|
||||
for (int i = 0; i < bootdesc->total_images; i++) {
|
||||
if (!strcmp(name, bootdesc->images[i].name)) {
|
||||
BUG_ON(!(taskimg = &bootdesc->images[i]));
|
||||
break;
|
||||
}
|
||||
}
|
||||
printk("\nInitialising %s.\n", name);
|
||||
if (taskimg->phys_start & PAGE_MASK)
|
||||
printk("Warning, image start address not page aligned.\n");
|
||||
|
||||
/* Calculate the number of pages the task sections occupy. */
|
||||
task_pages = __pfn((page_align_up(taskimg->phys_end) -
|
||||
page_align(taskimg->phys_start)));
|
||||
task->context.pc = INITTASK_AREA_START;
|
||||
|
||||
/* Stack starts one page above the end of image. */
|
||||
task->context.sp = INITTASK_AREA_END - 8;
|
||||
task->context.spsr = ARM_MODE_USR;
|
||||
|
||||
set_task_ids(task, ids);
|
||||
|
||||
if (!task->pgd) {
|
||||
BUG(); /* Inittask won't come here */
|
||||
task->pgd = alloc_pgd();
|
||||
/* Tasks with no pgd copy from the inittask's pgd. */
|
||||
memcpy(task->pgd, current->pgd, sizeof(pgd_table_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* This task's userspace mapping. This should allocate a new pmd, if not
|
||||
* existing, and a new page entry on its private pgd.
|
||||
*/
|
||||
add_mapping_pgd(taskimg->phys_start, INITTASK_AREA_START,
|
||||
task_pages * PAGE_SIZE, MAP_USR_DEFAULT_FLAGS,
|
||||
task->pgd);
|
||||
printk("Mapping %d pages from 0x%x to 0x%x for %s\n", task_pages,
|
||||
taskimg->phys_start, INITTASK_AREA_START, name);
|
||||
|
||||
/* Add the physical pages used by the task to the page map */
|
||||
set_page_map(taskimg->phys_start, task_pages, 1);
|
||||
|
||||
/* Task's rendezvous point */
|
||||
waitqueue_head_init(&task->wqh_send);
|
||||
waitqueue_head_init(&task->wqh_recv);
|
||||
|
||||
/* Tasks' rendezvous blocked list */
|
||||
spin_lock_init(&task->ipc_block_lock);
|
||||
INIT_LIST_HEAD(&task->ipc_block_list);
|
||||
|
||||
/* Global hashlist that keeps all existing tasks */
|
||||
add_task_global(task);
|
||||
|
||||
/* Scheduler initialises the very first task itself */
|
||||
}
|
||||
|
||||
void init_tasks()
|
||||
{
|
||||
struct task_ids ids;
|
||||
|
||||
/* Initialise thread and space id pools */
|
||||
thread_id_pool = id_pool_new_init(THREAD_IDS_MAX);
|
||||
space_id_pool = id_pool_new_init(SPACE_IDS_MAX);
|
||||
ids.tid = id_new(thread_id_pool);
|
||||
ids.spid = id_new(space_id_pool);
|
||||
|
||||
/* Initialise the global task list head */
|
||||
INIT_LIST_HEAD(&global_task_list);
|
||||
|
||||
/*
|
||||
* This must come last so that other tasks can copy its pgd before it
|
||||
* modifies it for its own specifics.
|
||||
*/
|
||||
init_inittask("mm0", &ids);
|
||||
}
|
||||
|
||||
void start_kernel(void)
|
||||
{
|
||||
printascii("\nstart_kernel...\n");
|
||||
/* Print section boundaries for kernel image */
|
||||
//print_sections();
|
||||
|
||||
/* Initialise section mappings for the kernel area */
|
||||
init_kernel_mappings();
|
||||
|
||||
/* Enable virtual memory and jump to virtual addresses */
|
||||
start_vm();
|
||||
|
||||
/* PMD tables initialised */
|
||||
init_pmd_tables();
|
||||
|
||||
/* Initialise platform-specific page mappings, and peripherals */
|
||||
platform_init();
|
||||
|
||||
/* Map and enable high vector page. Faults can be handled after here. */
|
||||
vectors_init();
|
||||
|
||||
/* Remap 1MB kernel sections as 4Kb pages. */
|
||||
remap_as_pages(_start_kernel, _end_kernel);
|
||||
|
||||
/* Move the initial pgd into a more convenient place, mapped as pages. */
|
||||
relocate_page_tables();
|
||||
|
||||
/* Initialise memory allocators */
|
||||
paging_init();
|
||||
|
||||
/* Initialise kip and map for userspace access */
|
||||
kip_init();
|
||||
|
||||
/* Initialise system call page */
|
||||
syscall_init();
|
||||
|
||||
/* Initialise everything else, e.g. locks, lists... */
|
||||
init_locks();
|
||||
|
||||
/* Setup inittask's ktcb and push it to scheduler runqueue */
|
||||
init_tasks();
|
||||
|
||||
/* Start the scheduler with available tasks in the runqueue */
|
||||
scheduler_start();
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
4
src/glue/arm/irq.c
Normal file
4
src/glue/arm/irq.c
Normal file
@@ -0,0 +1,4 @@
|
||||
/*
|
||||
* ARM Generic irq handler
|
||||
*/
|
||||
|
||||
135
src/glue/arm/memory.c
Normal file
135
src/glue/arm/memory.c
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
* ARM virtual memory implementation
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/physmem.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_PLAT(printascii.h)
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_ARCH(linker.h)
|
||||
|
||||
/*
|
||||
* Conversion from generic protection flags to arch-specific
|
||||
* pte flags.
|
||||
*/
|
||||
unsigned int space_flags_to_ptflags(unsigned int flags)
|
||||
{
|
||||
switch (flags) {
|
||||
case MAP_USR_RW_FLAGS:
|
||||
return __MAP_USR_RW_FLAGS;
|
||||
case MAP_USR_RO_FLAGS:
|
||||
return __MAP_USR_RO_FLAGS;
|
||||
case MAP_SVC_RW_FLAGS:
|
||||
return __MAP_SVC_RW_FLAGS;
|
||||
case MAP_USR_IO_FLAGS:
|
||||
return __MAP_USR_IO_FLAGS;
|
||||
case MAP_SVC_IO_FLAGS:
|
||||
return __MAP_SVC_IO_FLAGS;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
BUG(); return 0;
|
||||
}
|
||||
|
||||
#define NUM_PMD_TABLES 6
|
||||
#define NUM_PGD_TABLES 8
|
||||
|
||||
/* Initial first level page table to provide startup mappings */
|
||||
SECTION(".kspace.pgd") pgd_table_t kspace;
|
||||
SECTION(".kspace.pmd") pmd_table_t pmd_tables[NUM_PMD_TABLES];
|
||||
|
||||
/* A mini bitmap for boot pmd allocations */
|
||||
static int pmd_cnt;
|
||||
pmd_table_t *pmd_array;
|
||||
|
||||
pmd_table_t *alloc_boot_pmd(void)
|
||||
{
|
||||
pmd_table_t *pt;
|
||||
|
||||
if (pmd_cnt == NUM_PMD_TABLES)
|
||||
return 0;
|
||||
|
||||
pt = &pmd_array[pmd_cnt++];
|
||||
BUG_ON((unsigned long)pt & (sizeof(pmd_table_t) - 1));
|
||||
|
||||
return pt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialises pmd allocation cache, this is called before page allocator
|
||||
* initialises. After this call one can add page mappings via add_mapping().
|
||||
* This also sets the alloc_pmd() global function to this boot-time version.
|
||||
*/
|
||||
void init_pmd_tables(void)
|
||||
{
|
||||
pmd_cnt = 0;
|
||||
pmd_array = pmd_tables;
|
||||
memset(pmd_array, 0, NUM_PMD_TABLES * sizeof(pmd_table_t));
|
||||
}
|
||||
|
||||
/* Clears out all entries in the initial page table */
|
||||
void init_clear_ptab(void)
|
||||
{
|
||||
memset((void *)virt_to_phys(&kspace), 0, sizeof(pgd_table_t));
|
||||
}
|
||||
|
||||
/* Sets up struct page array and the physical memory descriptor. */
|
||||
void paging_init(void)
|
||||
{
|
||||
read_bootdesc();
|
||||
physmem_init();
|
||||
memory_init();
|
||||
copy_bootdesc();
|
||||
}
|
||||
|
||||
/*
|
||||
* Copies global kernel entries into another pgd. Even for sub-pmd ranges
|
||||
* the associated pmd entries are copied, assuming any pmds copied are
|
||||
* applicable to all tasks in the system.
|
||||
*/
|
||||
void copy_pgd_kern_by_vrange(pgd_table_t *to, pgd_table_t *from,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
/* Extend sub-pmd ranges to their respective pmd boundaries */
|
||||
start = align(start, PMD_MAP_SIZE);
|
||||
|
||||
if (end < start)
|
||||
end = 0;
|
||||
|
||||
/* Aligning would overflow if mapping the last virtual pmd */
|
||||
if (end < align(~0, PMD_MAP_SIZE) ||
|
||||
start > end) /* end may have already overflown as input */
|
||||
end = align_up(end, PMD_MAP_SIZE);
|
||||
else
|
||||
end = 0;
|
||||
|
||||
copy_pgds_by_vrange(to, from, start, end);
|
||||
}
|
||||
|
||||
/* Copies all standard bits that a user process should have in its pgd */
|
||||
void copy_pgd_kern_all(pgd_table_t *to)
|
||||
{
|
||||
pgd_table_t *from = current->pgd;
|
||||
|
||||
copy_pgd_kern_by_vrange(to, from, KERNEL_AREA_START, KERNEL_AREA_END);
|
||||
copy_pgd_kern_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
|
||||
copy_pgd_kern_by_vrange(to, from, USER_KIP_PAGE,
|
||||
USER_KIP_PAGE + PAGE_SIZE);
|
||||
copy_pgd_kern_by_vrange(to, from, ARM_HIGH_VECTOR,
|
||||
ARM_HIGH_VECTOR + PAGE_SIZE);
|
||||
copy_pgd_kern_by_vrange(to, from, ARM_SYSCALL_VECTOR,
|
||||
ARM_SYSCALL_VECTOR + PAGE_SIZE);
|
||||
|
||||
/* We temporarily map uart registers to every process */
|
||||
copy_pgd_kern_by_vrange(to, from, USERSPACE_UART_BASE,
|
||||
USERSPACE_UART_BASE + PAGE_SIZE);
|
||||
}
|
||||
|
||||
82
src/glue/arm/systable.c
Normal file
82
src/glue/arm/systable.c
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
* System Calls
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_GLUE(syscall.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_API(syscall.h)
|
||||
#include INC_API(kip.h)
|
||||
|
||||
void kip_init_syscalls(void)
|
||||
{
|
||||
kip.space_control = ARM_SYSCALL_PAGE + sys_space_control_offset;
|
||||
kip.thread_control = ARM_SYSCALL_PAGE + sys_thread_control_offset;
|
||||
kip.ipc_control = ARM_SYSCALL_PAGE + sys_ipc_control_offset;
|
||||
kip.map = ARM_SYSCALL_PAGE + sys_map_offset;
|
||||
kip.ipc = ARM_SYSCALL_PAGE + sys_ipc_offset;
|
||||
kip.kread = ARM_SYSCALL_PAGE + sys_kread_offset;
|
||||
kip.unmap = ARM_SYSCALL_PAGE + sys_unmap_offset;
|
||||
kip.exchange_registers = ARM_SYSCALL_PAGE + sys_exchange_registers_offset;
|
||||
kip.thread_switch = ARM_SYSCALL_PAGE + sys_thread_switch_offset;
|
||||
kip.schedule = ARM_SYSCALL_PAGE + sys_schedule_offset;
|
||||
kip.getid = ARM_SYSCALL_PAGE + sys_getid_offset;
|
||||
kip.kmem_grant = ARM_SYSCALL_PAGE + sys_kmem_grant_offset;
|
||||
kip.kmem_reclaim = ARM_SYSCALL_PAGE + sys_kmem_reclaim_offset;
|
||||
}
|
||||
|
||||
/* Jump table for all system calls. */
|
||||
syscall_fn_t syscall_table[SYSCALLS_TOTAL];
|
||||
|
||||
/*
|
||||
* Initialises the system call jump table, for kernel to use.
|
||||
* Also maps the system call page into userspace.
|
||||
*/
|
||||
void syscall_init()
|
||||
{
|
||||
syscall_table[sys_ipc_offset >> 2] = (syscall_fn_t)sys_ipc;
|
||||
syscall_table[sys_thread_switch_offset >> 2] = (syscall_fn_t)sys_thread_switch;
|
||||
syscall_table[sys_thread_control_offset >> 2] = (syscall_fn_t)sys_thread_control;
|
||||
syscall_table[sys_exchange_registers_offset >> 2] = (syscall_fn_t)sys_exchange_registers;
|
||||
syscall_table[sys_schedule_offset >> 2] = (syscall_fn_t)sys_schedule;
|
||||
syscall_table[sys_getid_offset >> 2] = (syscall_fn_t)sys_getid;
|
||||
syscall_table[sys_unmap_offset >> 2] = (syscall_fn_t)sys_unmap;
|
||||
syscall_table[sys_space_control_offset >> 2] = (syscall_fn_t)sys_space_control;
|
||||
syscall_table[sys_ipc_control_offset >> 2] = (syscall_fn_t)sys_ipc_control;
|
||||
syscall_table[sys_map_offset >> 2] = (syscall_fn_t)sys_map;
|
||||
syscall_table[sys_kread_offset >> 2] = (syscall_fn_t)sys_kread;
|
||||
syscall_table[sys_kmem_grant_offset >> 2] = (syscall_fn_t)sys_kmem_grant;
|
||||
syscall_table[sys_kmem_reclaim_offset >> 2] = (syscall_fn_t)sys_kmem_reclaim;
|
||||
|
||||
add_mapping(virt_to_phys(&__syscall_page_start),
|
||||
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS);
|
||||
}
|
||||
|
||||
/* Checks a syscall is legitimate and dispatches to appropriate handler. */
|
||||
int syscall(struct syscall_args *regs, unsigned long swi_addr)
|
||||
{
|
||||
/* Check if genuine system call, coming from the syscall page */
|
||||
if ((swi_addr & ARM_SYSCALL_PAGE) == ARM_SYSCALL_PAGE) {
|
||||
/* Check within syscall offset boundary */
|
||||
if (((swi_addr & syscall_offset_mask) >= 0) &&
|
||||
((swi_addr & syscall_offset_mask) <= syscalls_end_offset)) {
|
||||
/* Quick jump, rather than compare each */
|
||||
return (*syscall_table[(swi_addr & 0xFF) >> 2])(regs);
|
||||
} else {
|
||||
printk("System call received from call @ 0x%lx."
|
||||
"Instruction: 0x%lx.\n", swi_addr,
|
||||
*((unsigned long *)swi_addr));
|
||||
return -ENOSYS;
|
||||
}
|
||||
} else {
|
||||
printk("System call exception from unknown location 0x%lx."
|
||||
"Discarding.\n", swi_addr);
|
||||
return -ENOSYS;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user