mirror of
https://github.com/drasko/codezero.git
synced 2026-01-13 03:13:15 +01:00
Initial commit
This commit is contained in:
10
src/arch/arm/SConscript
Normal file
10
src/arch/arm/SConscript
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception.c', 'bootdesc.c']
|
||||
obj = env.Object(src_local)
|
||||
|
||||
Return('obj')
|
||||
45
src/arch/arm/bootdesc.c
Normal file
45
src/arch/arm/bootdesc.c
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Reading of bootdesc forged at build time.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_ARCH(bootdesc.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_PLAT(printascii.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
struct bootdesc *bootdesc;
|
||||
|
||||
void copy_bootdesc()
|
||||
{
|
||||
struct bootdesc *new = kzalloc(bootdesc->desc_size);
|
||||
|
||||
memcpy(new, bootdesc, bootdesc->desc_size);
|
||||
remove_mapping((unsigned long)bootdesc);
|
||||
bootdesc = new;
|
||||
}
|
||||
|
||||
void read_bootdesc(void)
|
||||
{
|
||||
/*
|
||||
* End of the kernel image is where bootdesc resides. Note this is
|
||||
* not added to the page_map because it's meant to be discarded.
|
||||
*/
|
||||
add_mapping(virt_to_phys(_end), (unsigned long)_end, PAGE_SIZE,
|
||||
MAP_USR_DEFAULT_FLAGS);
|
||||
|
||||
/* Get original bootdesc */
|
||||
bootdesc = (struct bootdesc *)_end;
|
||||
|
||||
/* Determine end of physical memory used by loaded images. */
|
||||
for (int i = 0; i < bootdesc->total_images; i++)
|
||||
if (bootdesc->images[i].phys_end > __svc_images_end)
|
||||
__svc_images_end = bootdesc->images[i].phys_end;
|
||||
}
|
||||
|
||||
218
src/arch/arm/exception.c
Normal file
218
src/arch/arm/exception.c
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
* Debug print support for unexpected exceptions
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/api/ipc.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_PLAT(printascii.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(utcb.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
/*
|
||||
* NOTE: These are defined in libl4 headers for userspace. Syslib uses
|
||||
* these as conventional mr offsets to store ipc-related data commonly needed
|
||||
* for all ipc parties.
|
||||
*/
|
||||
#define MR_TAG 0
|
||||
#define MR_SENDERID 1
|
||||
#define MR_UNUSED_START 2
|
||||
|
||||
/* Send data fault ipc to the faulty task's pager */
|
||||
void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
|
||||
{
|
||||
/* mr[0] has the fault tag. The rest is the fault structure */
|
||||
u32 mr[MR_TOTAL] = { [MR_TAG] = L4_IPC_TAG_PFAULT,
|
||||
[MR_SENDERID] = current->tid };
|
||||
fault_kdata_t *fault = (fault_kdata_t *)&mr[MR_UNUSED_START];
|
||||
|
||||
/* Fill in fault information to pass over during ipc */
|
||||
fault->faulty_pc = faulty_pc;
|
||||
fault->fsr = fsr;
|
||||
fault->far = far;
|
||||
|
||||
/* Write pte of the abort address, which is different on pabt/dabt */
|
||||
if (is_prefetch_abort(fsr))
|
||||
fault->pte = virt_to_pte(faulty_pc);
|
||||
else
|
||||
fault->pte = virt_to_pte(far);
|
||||
|
||||
/*
|
||||
* System calls save arguments (and message registers) on the kernel
|
||||
* stack. They are then referenced from the caller's ktcb. Here, the
|
||||
* same ktcb reference is set to the fault data so it gives the effect
|
||||
* as if the ipc to the pager has the fault data in the message
|
||||
* registers saved on the kernel stack during an ipc syscall. Also this
|
||||
* way fault does not need to modify the actual utcb MRs in userspace.
|
||||
*/
|
||||
|
||||
/* Assign fault such that it overlaps as the MR0 reference in ktcb. */
|
||||
current->syscall_regs = (syscall_args_t *)
|
||||
((unsigned long)&mr[0] -
|
||||
offsetof(syscall_args_t, r3));
|
||||
|
||||
/* Send ipc to the task's pager */
|
||||
ipc_sendwait(current->pagerid);
|
||||
|
||||
/*
|
||||
* Pager is now notified and handling the fault. We now sleep on
|
||||
* another queue.
|
||||
*/
|
||||
}
|
||||
|
||||
int check_aborts(u32 faulted_pc, u32 fsr, u32 far)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (is_prefetch_abort(fsr)) {
|
||||
dprintk("Prefetch abort @ ", faulted_pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (fsr & ARM_FSR_MASK) {
|
||||
/* Aborts that are expected on page faults: */
|
||||
case DABT_PERM_PAGE:
|
||||
dprintk("Page permission fault @ ", far);
|
||||
ret = 0;
|
||||
break;
|
||||
case DABT_XLATE_PAGE:
|
||||
dprintk("Page translation fault @ ", far);
|
||||
ret = 0;
|
||||
break;
|
||||
case DABT_XLATE_SECT:
|
||||
dprintk("Section translation fault @ ", far);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
/* Aborts that can't be handled by a pager yet: */
|
||||
case DABT_TERMINAL:
|
||||
dprintk("Terminal fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_VECTOR:
|
||||
dprintk("Vector abort (obsolete!) @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_ALIGN:
|
||||
dprintk("Alignment fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_EXT_XLATE_LEVEL1:
|
||||
dprintk("External LVL1 translation fault @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_EXT_XLATE_LEVEL2:
|
||||
dprintk("External LVL2 translation fault @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_DOMAIN_SECT:
|
||||
dprintk("Section domain fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_DOMAIN_PAGE:
|
||||
dprintk("Page domain fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_PERM_SECT:
|
||||
dprintk("Section permission fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_EXT_LFETCH_SECT:
|
||||
dprintk("External section linefetch fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_EXT_LFETCH_PAGE:
|
||||
dprintk("Page perm fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_EXT_NON_LFETCH_SECT:
|
||||
dprintk("External section non-linefetch fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case DABT_EXT_NON_LFETCH_PAGE:
|
||||
dprintk("External page non-linefetch fault dabt @ ", far);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
default:
|
||||
dprintk("FATAL: Unrecognised/Unknown data abort @ ", far);
|
||||
dprintk("FATAL: FSR code: ", fsr);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* @r0: The address where the program counter was during the fault.
|
||||
* @r1: Contains the fault status register
|
||||
* @r2: Contains the fault address register
|
||||
*/
|
||||
void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
|
||||
{
|
||||
set_abort_type(fsr, ARM_DABT);
|
||||
dprintk("Data abort @ PC: ", faulted_pc);
|
||||
if (check_aborts(faulted_pc, fsr, far) < 0) {
|
||||
printascii("This abort can't be handled by any pager.\n");
|
||||
goto error;
|
||||
}
|
||||
if (KERN_ADDR(faulted_pc))
|
||||
goto error;
|
||||
|
||||
/* This notifies the pager */
|
||||
fault_ipc_to_pager(faulted_pc, fsr, far);
|
||||
|
||||
return;
|
||||
|
||||
error:
|
||||
disable_irqs();
|
||||
dprintk("Unhandled data abort @ PC address: ", faulted_pc);
|
||||
dprintk("FAR:", far);
|
||||
dprintk("FSR:", fsr);
|
||||
printascii("Kernel panic.\n");
|
||||
printascii("Halting system...\n");
|
||||
while (1)
|
||||
;
|
||||
}
|
||||
|
||||
void prefetch_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
|
||||
{
|
||||
set_abort_type(fsr, ARM_PABT);
|
||||
if (check_aborts(faulted_pc, fsr, far) < 0) {
|
||||
printascii("This abort can't be handled by any pager.\n");
|
||||
goto error;
|
||||
}
|
||||
fault_ipc_to_pager(faulted_pc, fsr, far);
|
||||
return;
|
||||
|
||||
error:
|
||||
disable_irqs();
|
||||
while (1)
|
||||
;
|
||||
}
|
||||
|
||||
void dump_undef_abort(u32 undef_addr)
|
||||
{
|
||||
dprintk("Undefined instruction at address: ", undef_addr);
|
||||
printascii("Halting system...\n");
|
||||
}
|
||||
|
||||
extern int current_irq_nest_count;
|
||||
/*
|
||||
* This is called right where the nest count is increased in case the nesting
|
||||
* is beyond the predefined max limit. It is another matter whether this
|
||||
* limit is enough to guarantee the kernel stack is not overflown.
|
||||
*/
|
||||
void irq_overnest_error(void)
|
||||
{
|
||||
dprintk("Irqs nested beyond limit. Current count: ", current_irq_nest_count);
|
||||
printascii("Halting system...\n");
|
||||
while(1)
|
||||
;
|
||||
}
|
||||
|
||||
71
src/arch/arm/head.S
Normal file
71
src/arch/arm/head.S
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* ARM Kernel entry point
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
#define C15_C0_M 0x0001 /* MMU */
|
||||
#define C15_C0_A 0x0002 /* Alignment */
|
||||
#define C15_C0_C 0x0004 /* (D) Cache */
|
||||
#define C15_C0_W 0x0008 /* Write buffer */
|
||||
#define C15_C0_B 0x0080 /* Endianness */
|
||||
#define C15_C0_S 0x0100 /* System */
|
||||
#define C15_C0_R 0x0200 /* ROM */
|
||||
#define C15_C0_Z 0x0800 /* Branch Prediction */
|
||||
#define C15_C0_I 0x1000 /* I cache */
|
||||
#define C15_C0_V 0x2000 /* High vectors */
|
||||
|
||||
|
||||
/*
|
||||
* This is the entry point of the L4 ARM architecture.
|
||||
* The boot loader must call _start with the processor in privileged
|
||||
* mode and mmu disabled.
|
||||
*/
|
||||
.section .text.head
|
||||
BEGIN_PROC(_start)
|
||||
/* Setup status register for supervisor mode, interrupts disabled */
|
||||
msr cpsr_fc, #ARM_MODE_SVC
|
||||
|
||||
/* Disable mmu if it is enabled */
|
||||
mrc p15, 0, r0, c1, c0, 0
|
||||
bic r0, r0, #C15_C0_M @ Disable MMU
|
||||
bic r0, r0, #C15_C0_C @ Disable (D) Cache
|
||||
bic r0, r0, #C15_C0_I @ Disable I cache
|
||||
bic r0, r0, #C15_C0_W @ Disable Write buffer
|
||||
mcr p15, 0, r0, c1, c0, 0
|
||||
|
||||
/* Setup boot stack (physical address) */
|
||||
ldr sp, _kernel_init_stack
|
||||
|
||||
/* Exception stacks are defined in vector page */
|
||||
msr cpsr_fc, #ARM_NOIRQ_ABT
|
||||
ldr sp, _kernel_abt_stack
|
||||
msr cpsr_fc, #ARM_NOIRQ_IRQ
|
||||
ldr sp, _kernel_irq_stack
|
||||
msr cpsr_fc, #ARM_NOIRQ_FIQ
|
||||
ldr sp, _kernel_fiq_stack
|
||||
msr cpsr_fc, #ARM_NOIRQ_UND
|
||||
ldr sp, _kernel_und_stack
|
||||
msr cpsr_fc, #ARM_NOIRQ_SVC
|
||||
|
||||
/* Jump to start_kernel */
|
||||
bl start_kernel
|
||||
|
||||
/* Never reached */
|
||||
1:
|
||||
b 1b
|
||||
|
||||
_kernel_init_stack:
|
||||
.word _bootstack_physical
|
||||
|
||||
/* Exception stacks are defined in vector page */
|
||||
_kernel_abt_stack:
|
||||
.word __abt_stack_high
|
||||
_kernel_irq_stack:
|
||||
.word __irq_stack_high
|
||||
_kernel_fiq_stack:
|
||||
.word __fiq_stack_high
|
||||
_kernel_und_stack:
|
||||
.word __und_stack_high
|
||||
12
src/arch/arm/linker.c
Normal file
12
src/arch/arm/linker.c
Normal file
@@ -0,0 +1,12 @@
|
||||
/*
|
||||
* Any link-related marking variable that gets updated at runtime is listed here
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
|
||||
/* The first free address after the last image loaded in physical memory */
|
||||
unsigned long __svc_images_end;
|
||||
|
||||
/* The new boundaries of page tables after they're relocated */
|
||||
unsigned long __pt_start;
|
||||
unsigned long __pt_end;
|
||||
34
src/arch/arm/syscall.S
Normal file
34
src/arch/arm/syscall.S
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* The syscall page.
|
||||
*
|
||||
* Exported to userspace, used merely for entering the kernel.
|
||||
* Actual handling happens elsewhere.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
.balign 4096
|
||||
.section .data.syscalls
|
||||
|
||||
.global __syscall_page_start;
|
||||
__syscall_page_start:
|
||||
|
||||
/* LR_USR is inspected to find out which system call. */
|
||||
BEGIN_PROC(arm_system_calls)
|
||||
swi 0x14 @ ipc /* 0x0 */
|
||||
swi 0x14 @ thread_switch /* 0x4 */
|
||||
swi 0x14 @ thread_control /* 0x8 */
|
||||
swi 0x14 @ exchange_registers /* 0xc */
|
||||
swi 0x14 @ schedule /* 0x10 */
|
||||
swi 0x14 @ unmap /* 0x14 */
|
||||
swi 0x14 @ space_control /* 0x18 */
|
||||
swi 0x14 @ processor_control /* 0x1c */
|
||||
swi 0x14 @ memory_control /* 0x20 */
|
||||
swi 0x14 @ getid /* 0x24 */
|
||||
swi 0x14 @ kread /* 0x28 */
|
||||
swi 0x14 @ kmem_grant /* 0x2C */
|
||||
swi 0x14 @ kmem_reclaim /* 0x30 */
|
||||
END_PROC(arm_system_calls)
|
||||
|
||||
10
src/arch/arm/v5/SConscript
Normal file
10
src/arch/arm/v5/SConscript
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['mm.c', 'mmu_ops.S', 'mutex.S']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
537
src/arch/arm/v5/mm.c
Normal file
537
src/arch/arm/v5/mm.c
Normal file
@@ -0,0 +1,537 @@
|
||||
/*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_PLAT(printascii.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
/*
|
||||
* These are indices into arrays with pgd_t or pmd_t sized elements,
|
||||
* therefore the index must be divided by appropriate element size
|
||||
*/
|
||||
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) & 0x3FFC) / sizeof(pgd_t))
|
||||
/* Strip out the page offset in this megabyte from a total of 256 pages. */
|
||||
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) & 0x3FC) / sizeof (pmd_t))
|
||||
|
||||
/*
|
||||
* Removes initial mappings needed for transition to virtual memory.
|
||||
* Used one-time only.
|
||||
*/
|
||||
void remove_section_mapping(unsigned long vaddr)
|
||||
{
|
||||
pgd_table_t *pgd = current->pgd;
|
||||
pgd_t pgd_i = PGD_INDEX(vaddr);
|
||||
if (!((pgd->entry[pgd_i] & PGD_TYPE_MASK)
|
||||
& PGD_TYPE_SECTION))
|
||||
while(1);
|
||||
pgd->entry[pgd_i] = 0;
|
||||
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
|
||||
arm_invalidate_tlb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps given section-aligned @paddr to @vaddr using enough number
|
||||
* of section-units to fulfill @size in sections. Note this overwrites
|
||||
* a mapping if same virtual address was already mapped.
|
||||
*/
|
||||
void __add_section_mapping_init(unsigned int paddr,
|
||||
unsigned int vaddr,
|
||||
unsigned int size,
|
||||
unsigned int flags)
|
||||
{
|
||||
pte_t *ppte;
|
||||
unsigned int l1_ptab;
|
||||
unsigned int l1_offset;
|
||||
|
||||
/* 1st level page table address */
|
||||
l1_ptab = virt_to_phys(&kspace);
|
||||
|
||||
/* Get the section offset for this vaddr */
|
||||
l1_offset = (vaddr >> 18) & 0x3FFC;
|
||||
|
||||
/* The beginning entry for mapping */
|
||||
ppte = (unsigned int *)(l1_ptab + l1_offset);
|
||||
for(int i = 0; i < size; i++) {
|
||||
*ppte = 0; /* Clear out old value */
|
||||
*ppte |= paddr; /* Assign physical address */
|
||||
*ppte |= PGD_TYPE_SECTION; /* Assign translation type */
|
||||
/* Domain is 0, therefore no writes. */
|
||||
/* Only kernel access allowed */
|
||||
*ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
|
||||
/* Cacheability/Bufferability flags */
|
||||
*ppte |= flags;
|
||||
ppte++; /* Next section entry */
|
||||
paddr += ARM_SECTION_SIZE; /* Next physical section */
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags)
|
||||
{
|
||||
unsigned int psection;
|
||||
unsigned int vsection;
|
||||
|
||||
/* Align each address to the pages they reside in */
|
||||
psection = paddr & ~ARM_SECTION_MASK;
|
||||
vsection = vaddr & ~ARM_SECTION_MASK;
|
||||
|
||||
if(size == 0)
|
||||
return;
|
||||
|
||||
__add_section_mapping_init(psection, vsection, size, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* TODO: Make sure to flush tlb entry and caches */
|
||||
void __add_mapping(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int flags, pmd_table_t *pmd)
|
||||
{
|
||||
unsigned int pmd_i = PMD_INDEX(vaddr);
|
||||
pmd->entry[pmd_i] = paddr;
|
||||
pmd->entry[pmd_i] |= PMD_TYPE_SMALL; /* Small page type */
|
||||
pmd->entry[pmd_i] |= flags;
|
||||
|
||||
/* TODO: Is both required? Investigate */
|
||||
|
||||
/* TEST:
|
||||
* I think cleaning or invalidating the cache is not required,
|
||||
* because the entries in the cache aren't for the new mapping anyway.
|
||||
* It's required if a mapping is removed, but not when newly added.
|
||||
*/
|
||||
arm_clean_invalidate_cache();
|
||||
|
||||
/* TEST: tlb must be flushed because a new mapping is present in page
|
||||
* tables, and tlb is inconsistent with the page tables */
|
||||
arm_invalidate_tlb();
|
||||
}
|
||||
|
||||
/* Return whether a pmd associated with @vaddr is mapped on a pgd or not. */
|
||||
pmd_table_t *pmd_exists(pgd_table_t *pgd, unsigned long vaddr)
|
||||
{
|
||||
unsigned int pgd_i = PGD_INDEX(vaddr);
|
||||
|
||||
/* Return true if non-zero pgd entry */
|
||||
switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
|
||||
case PGD_TYPE_COARSE:
|
||||
return (pmd_table_t *)
|
||||
phys_to_virt((pgd->entry[pgd_i] &
|
||||
PGD_COARSE_ALIGN_MASK));
|
||||
break;
|
||||
|
||||
case PGD_TYPE_FAULT:
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case PGD_TYPE_SECTION:
|
||||
dprintk("Warning, a section is already mapped "
|
||||
"where a coarse page mapping is attempted:",
|
||||
(u32)(pgd->entry[pgd_i]
|
||||
& PGD_SECTION_ALIGN_MASK));
|
||||
BUG();
|
||||
break;
|
||||
|
||||
case PGD_TYPE_FINE:
|
||||
dprintk("Warning, a fine page table is already mapped "
|
||||
"where a coarse page mapping is attempted:",
|
||||
(u32)(pgd->entry[pgd_i]
|
||||
& PGD_FINE_ALIGN_MASK));
|
||||
printk("Fine tables are unsupported. ");
|
||||
printk("What is this doing here?");
|
||||
BUG();
|
||||
break;
|
||||
|
||||
default:
|
||||
dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Convert a virtual address to a pte if it exists in the page tables. */
|
||||
pte_t virt_to_pte_from_pgd(unsigned long virtual, pgd_table_t *pgd)
|
||||
{
|
||||
pmd_table_t *pmd = pmd_exists(pgd, virtual);
|
||||
|
||||
if (pmd)
|
||||
return (pte_t)pmd->entry[PMD_INDEX(virtual)];
|
||||
else
|
||||
return (pte_t)0;
|
||||
}
|
||||
|
||||
/* Convert a virtual address to a pte if it exists in the page tables. */
|
||||
pte_t virt_to_pte(unsigned long virtual)
|
||||
{
|
||||
pmd_table_t *pmd = pmd_exists(current->pgd, virtual);
|
||||
|
||||
if (pmd)
|
||||
return (pte_t)pmd->entry[PMD_INDEX(virtual)];
|
||||
else
|
||||
return (pte_t)0;
|
||||
}
|
||||
|
||||
void attach_pmd(pgd_table_t *pgd, pmd_table_t *pmd, unsigned int vaddr)
|
||||
{
|
||||
u32 pgd_i = PGD_INDEX(vaddr);
|
||||
u32 pmd_phys = virt_to_phys(pmd);
|
||||
|
||||
/* Domain is 0, therefore no writes. */
|
||||
pgd->entry[pgd_i] = (pgd_t)pmd_phys;
|
||||
pgd->entry[pgd_i] |= PGD_TYPE_COARSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Maps @paddr to @vaddr, covering @size bytes also allocates new pmd if
|
||||
* necessary. This flavor explicitly supplies the pgd to modify. This is useful
|
||||
* when modifying userspace of processes that are not currently running. (Only
|
||||
* makes sense for userspace mappings since kernel mappings are common.)
|
||||
*/
|
||||
void add_mapping_pgd(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags,
|
||||
pgd_table_t *pgd)
|
||||
{
|
||||
pmd_table_t *pmd;
|
||||
unsigned int numpages = (size >> PAGE_BITS);
|
||||
|
||||
if (size < PAGE_SIZE) {
|
||||
printascii("Error: Mapping size must be in bytes not pages.\n");
|
||||
while(1);
|
||||
}
|
||||
if (size & PAGE_MASK)
|
||||
numpages++;
|
||||
|
||||
/* Convert generic map flags to pagetable-specific */
|
||||
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
|
||||
|
||||
/* Map all consecutive pages that cover given size */
|
||||
for (int i = 0; i < numpages; i++) {
|
||||
/* Check if another mapping already has a pmd attached. */
|
||||
pmd = pmd_exists(pgd, vaddr);
|
||||
if (!pmd) {
|
||||
/*
|
||||
* If this is the first vaddr in
|
||||
* this pmd, allocate new pmd
|
||||
*/
|
||||
pmd = alloc_pmd();
|
||||
|
||||
/* Attach pmd to its entry in pgd */
|
||||
attach_pmd(pgd, pmd, vaddr);
|
||||
}
|
||||
|
||||
/* Attach paddr to this pmd */
|
||||
__add_mapping(page_align(paddr),
|
||||
page_align(vaddr), flags, pmd);
|
||||
|
||||
/* Go to the next page to be mapped */
|
||||
paddr += PAGE_SIZE;
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Maps @paddr to @vaddr, covering @size bytes,
|
||||
* also allocates new pmd if necessary. */
|
||||
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags)
|
||||
{
|
||||
pmd_table_t *pmd;
|
||||
unsigned int numpages = (size >> PAGE_BITS);
|
||||
|
||||
if (size < PAGE_SIZE) {
|
||||
printascii("Error: Mapping size must be in bytes not pages.\n");
|
||||
while(1);
|
||||
}
|
||||
if (size & PAGE_MASK)
|
||||
numpages++;
|
||||
|
||||
/* Convert generic map flags to pagetable-specific */
|
||||
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
|
||||
|
||||
/* Map all consecutive pages that cover given size */
|
||||
for (int i = 0; i < numpages; i++) {
|
||||
/* Check if another vaddr in same pmd already
|
||||
* has a pmd attached. */
|
||||
pmd = pmd_exists(current->pgd, vaddr);
|
||||
if (!pmd) {
|
||||
/* If this is the first vaddr in
|
||||
* this pmd, allocate new pmd */
|
||||
pmd = alloc_boot_pmd();
|
||||
|
||||
/* Attach pmd to its entry in pgd */
|
||||
attach_pmd(current->pgd, pmd, vaddr);
|
||||
}
|
||||
|
||||
/* Attach paddr to this pmd */
|
||||
__add_mapping(page_align(paddr),
|
||||
page_align(vaddr), flags, pmd);
|
||||
|
||||
/* Go to the next page to be mapped */
|
||||
paddr += PAGE_SIZE;
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
/* Maps @paddr to @vaddr, covering @size bytes,
|
||||
* also allocates new pmd if necessary. */
|
||||
void add_mapping(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags)
|
||||
{
|
||||
pmd_table_t *pmd;
|
||||
unsigned int numpages = (size >> PAGE_BITS);
|
||||
|
||||
if (size < PAGE_SIZE) {
|
||||
printascii("Error: Mapping size must be in bytes not pages.\n");
|
||||
while(1);
|
||||
}
|
||||
if (size & PAGE_MASK)
|
||||
numpages++;
|
||||
|
||||
/* Convert generic map flags to pagetable-specific */
|
||||
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
|
||||
|
||||
/* Map all consecutive pages that cover given size */
|
||||
for (int i = 0; i < numpages; i++) {
|
||||
/* Check if another vaddr in same pmd already
|
||||
* has a pmd attached. */
|
||||
pmd = pmd_exists(current->pgd, vaddr);
|
||||
if (!pmd) {
|
||||
/* If this is the first vaddr in
|
||||
* this pmd, allocate new pmd */
|
||||
pmd = alloc_pmd();
|
||||
|
||||
/* Attach pmd to its entry in pgd */
|
||||
attach_pmd(current->pgd, pmd, vaddr);
|
||||
}
|
||||
|
||||
/* Attach paddr to this pmd */
|
||||
__add_mapping(page_align(paddr),
|
||||
page_align(vaddr), flags, pmd);
|
||||
|
||||
/* Go to the next page to be mapped */
|
||||
paddr += PAGE_SIZE;
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void add_mapping(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags)
|
||||
{
|
||||
add_mapping_pgd(paddr, vaddr, size, flags, current->pgd);
|
||||
}
|
||||
|
||||
/* FIXME: Empty PMDs should be returned here !!! */
|
||||
void __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
|
||||
{
|
||||
pmd_t pmd_i = PMD_INDEX(vaddr);
|
||||
|
||||
switch (pmd->entry[pmd_i] & PMD_TYPE_MASK) {
|
||||
case PMD_TYPE_LARGE:
|
||||
pmd->entry[pmd_i] = 0;
|
||||
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
|
||||
break;
|
||||
case PMD_TYPE_SMALL:
|
||||
pmd->entry[pmd_i] = 0;
|
||||
pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
|
||||
break;
|
||||
default:
|
||||
printk("Unknown page mapping in pmd. Assuming bug.\n");
|
||||
BUG();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
|
||||
{
|
||||
pgd_t pgd_i = PGD_INDEX(vaddr);
|
||||
pmd_table_t *pmd;
|
||||
pmd_t pmd_i;
|
||||
|
||||
/*
|
||||
* Clean the cache to main memory before removing the mapping. Otherwise
|
||||
* entries in the cache for this mapping will cause tranlation faults
|
||||
* if they're cleaned to main memory after the mapping is removed.
|
||||
*/
|
||||
arm_clean_invalidate_cache();
|
||||
|
||||
/* TEST:
|
||||
* Can't think of a valid reason to flush tlbs here, but keeping it just
|
||||
* to be safe. REMOVE: Remove it if it's unnecessary.
|
||||
*/
|
||||
arm_invalidate_tlb();
|
||||
|
||||
/* Return true if non-zero pgd entry */
|
||||
switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
|
||||
case PGD_TYPE_COARSE:
|
||||
// printk("Removing coarse mapping @ 0x%x\n", vaddr);
|
||||
pmd = (pmd_table_t *)
|
||||
phys_to_virt((pgd->entry[pgd_i]
|
||||
& PGD_COARSE_ALIGN_MASK));
|
||||
pmd_i = PMD_INDEX(vaddr);
|
||||
__remove_mapping(pmd, vaddr);
|
||||
break;
|
||||
|
||||
case PGD_TYPE_FAULT:
|
||||
dprintk("Attempting to remove fault mapping. "
|
||||
"Assuming bug.\n", vaddr);
|
||||
BUG();
|
||||
break;
|
||||
|
||||
case PGD_TYPE_SECTION:
|
||||
printk("Removing section mapping for 0x%lx",
|
||||
vaddr);
|
||||
pgd->entry[pgd_i] = 0;
|
||||
pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
|
||||
break;
|
||||
|
||||
case PGD_TYPE_FINE:
|
||||
printk("Table mapped is a fine page table.\n"
|
||||
"Fine tables are unsupported. Assuming bug.\n");
|
||||
BUG();
|
||||
break;
|
||||
|
||||
default:
|
||||
dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
|
||||
printk("Assuming bug.\n");
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
/* The tlb must be invalidated here because it might have cached the
|
||||
* old translation for this mapping. */
|
||||
arm_invalidate_tlb();
|
||||
}
|
||||
|
||||
void remove_mapping(unsigned long vaddr)
|
||||
{
|
||||
remove_mapping_pgd(vaddr, current->pgd);
|
||||
}
|
||||
|
||||
|
||||
extern pmd_table_t *pmd_array;
|
||||
|
||||
/*
|
||||
* Moves the section mapped kspace that resides far apart from kernel as close
|
||||
* as possible to the kernel image, and unmaps the old 1MB kspace section which
|
||||
* is really largely unused.
|
||||
*/
|
||||
void relocate_page_tables(void)
|
||||
{
|
||||
/* Adjust the end of kernel address to page table alignment. */
|
||||
unsigned long pt_new = align_up(_end_kernel, sizeof(pgd_table_t));
|
||||
unsigned long reloc_offset = (unsigned long)_start_kspace - pt_new;
|
||||
unsigned long pt_area_size = (unsigned long)_end_kspace -
|
||||
(unsigned long)_start_kspace;
|
||||
|
||||
BUG_ON(reloc_offset & (SZ_1K - 1))
|
||||
|
||||
/* Map the new page table area into the current pgd table */
|
||||
add_mapping(virt_to_phys(pt_new), pt_new, pt_area_size,
|
||||
MAP_IO_DEFAULT_FLAGS);
|
||||
|
||||
/* Copy the entire kspace area, i.e. the pgd + static pmds. */
|
||||
memcpy((void *)pt_new, _start_kspace, pt_area_size);
|
||||
|
||||
/* Update the only reference to current pgd table */
|
||||
current->pgd = (pgd_table_t *)pt_new;
|
||||
|
||||
/*
|
||||
* Since pmd's are also moved, update the pmd references in pgd by
|
||||
* subtracting the relocation offset from each valid pmd entry.
|
||||
* TODO: This would be best done within a helper function.
|
||||
*/
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
|
||||
/* If there's a coarse 2nd level entry */
|
||||
if ((current->pgd->entry[i] & PGD_TYPE_MASK)
|
||||
== PGD_TYPE_COARSE)
|
||||
current->pgd->entry[i] -= reloc_offset;
|
||||
|
||||
/* Update the pmd array pointer. */
|
||||
pmd_array = (pmd_table_t *)((unsigned long)_start_pmd - reloc_offset);
|
||||
|
||||
/* Switch the virtual memory system into new area */
|
||||
arm_clean_invalidate_cache();
|
||||
arm_drain_writebuffer();
|
||||
arm_invalidate_tlb();
|
||||
arm_set_ttb(virt_to_phys(current->pgd));
|
||||
arm_invalidate_tlb();
|
||||
|
||||
/* Unmap the old page table area */
|
||||
remove_section_mapping((unsigned long)&kspace);
|
||||
|
||||
/* Update the page table markers to the new area. Any references would
|
||||
* go to these markers. */
|
||||
__pt_start = pt_new;
|
||||
__pt_end = pt_new + pt_area_size;
|
||||
|
||||
printk("Initial page table area relocated from phys 0x%x to 0x%x\n",
|
||||
virt_to_phys(&kspace), virt_to_phys(current->pgd));
|
||||
}
|
||||
|
||||
/*
|
||||
* Useful for upgrading to page-grained control over a section mapping:
|
||||
* Remaps a section mapping in pages. It allocates a pmd, (at all times because
|
||||
* there can't really be an already existing pmd for a section mapping) fills
|
||||
* in the page information, and replaces the direct section physical translation
|
||||
* with the address of the pmd. Flushes the caches/tlbs.
|
||||
*/
|
||||
void remap_as_pages(void *vstart, void *vend)
|
||||
{
|
||||
unsigned long pstart = virt_to_phys(vstart);
|
||||
unsigned long pend = virt_to_phys(vend);
|
||||
unsigned long paddr = pstart;
|
||||
pgd_t pgd_i = PGD_INDEX(vstart);
|
||||
pmd_t pmd_i = PMD_INDEX(vstart);
|
||||
pgd_table_t *pgd = (pgd_table_t *)current->pgd;
|
||||
pmd_table_t *pmd = alloc_pmd();
|
||||
u32 pmd_phys = virt_to_phys(pmd);
|
||||
int numpages = __pfn(page_align_up(pend) - pstart);
|
||||
|
||||
BUG_ON((unsigned long)vstart & ARM_SECTION_MASK);
|
||||
BUG_ON(pmd_i);
|
||||
|
||||
/* Fill in the pmd first */
|
||||
while (pmd_i < numpages) {
|
||||
pmd->entry[pmd_i] = paddr;
|
||||
pmd->entry[pmd_i] |= PMD_TYPE_SMALL; /* Small page type */
|
||||
pmd->entry[pmd_i] |= space_flags_to_ptflags(MAP_SVC_DEFAULT_FLAGS);
|
||||
paddr += PAGE_SIZE;
|
||||
pmd_i++;
|
||||
}
|
||||
|
||||
/* Fill in the type to produce a complete pmd translator information */
|
||||
pmd_phys |= PGD_TYPE_COARSE;
|
||||
|
||||
/* Make sure memory is coherent first. */
|
||||
arm_clean_invalidate_cache();
|
||||
arm_invalidate_tlb();
|
||||
|
||||
/* Replace the direct section physical address with pmd's address */
|
||||
pgd->entry[pgd_i] = (pgd_t)pmd_phys;
|
||||
printk("Kernel area 0x%lx - 0x%lx remapped as %d pages\n",
|
||||
(unsigned long)vstart, (unsigned long)vend, numpages);
|
||||
}
|
||||
|
||||
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long start_i = PGD_INDEX(start);
|
||||
unsigned long end_i = PGD_INDEX(end);
|
||||
unsigned long irange = (end_i != 0) ? (end_i - start_i)
|
||||
: (PGD_ENTRY_TOTAL - start_i);
|
||||
|
||||
memcpy(&to->entry[start_i], &from->entry[start_i],
|
||||
irange * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
155
src/arch/arm/v5/mmu_ops.S
Normal file
155
src/arch/arm/v5/mmu_ops.S
Normal file
@@ -0,0 +1,155 @@
|
||||
/*
|
||||
* low-level mmu operations
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
#define C15_id c0
|
||||
#define C15_control c1
|
||||
#define C15_ttb c2
|
||||
#define C15_dom c3
|
||||
#define C15_fsr c5
|
||||
#define C15_far c6
|
||||
#define C15_tlb c8
|
||||
|
||||
#define C15_C0_M 0x0001 /* MMU */
|
||||
#define C15_C0_A 0x0002 /* Alignment */
|
||||
#define C15_C0_C 0x0004 /* (D) Cache */
|
||||
#define C15_C0_W 0x0008 /* Write buffer */
|
||||
#define C15_C0_B 0x0080 /* Endianness */
|
||||
#define C15_C0_S 0x0100 /* System */
|
||||
#define C15_C0_R 0x0200 /* ROM */
|
||||
#define C15_C0_Z 0x0800 /* Branch Prediction */
|
||||
#define C15_C0_I 0x1000 /* I cache */
|
||||
#define C15_C0_V 0x2000 /* High vectors */
|
||||
|
||||
/* FIXME: Make sure the ops that need r0 dont trash r0, or if they do,
|
||||
* save it on stack before these operations.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In ARM terminology, flushing the cache means invalidating its contents.
|
||||
* Cleaning the cache means, writing the contents of the cache back to
|
||||
* main memory. In write-back caches the cache must be cleaned before
|
||||
* flushing otherwise in-cache data is lost.
|
||||
*/
|
||||
|
||||
BEGIN_PROC(arm_set_ttb)
|
||||
mcr p15, 0, r0, C15_ttb, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_set_ttb)
|
||||
|
||||
BEGIN_PROC(arm_get_domain)
|
||||
mrc p15, 0, r0, C15_dom, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_get_domain)
|
||||
|
||||
BEGIN_PROC(arm_set_domain)
|
||||
mcr p15, 0, r0, C15_dom, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_set_domain)
|
||||
|
||||
BEGIN_PROC(arm_enable_mmu)
|
||||
mrc p15, 0, r0, C15_control, c0, 0
|
||||
orr r0, r0, #C15_C0_M
|
||||
mcr p15, 0, r0, C15_control, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_enable_mmu)
|
||||
|
||||
BEGIN_PROC(arm_enable_icache)
|
||||
mrc p15, 0, r0, C15_control, c0, 0
|
||||
orr r0, r0, #C15_C0_I
|
||||
mcr p15, 0, r0, C15_control, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_enable_icache)
|
||||
|
||||
BEGIN_PROC(arm_enable_dcache)
|
||||
mrc p15, 0, r0, C15_control, c0, 0
|
||||
orr r0, r0, #C15_C0_C
|
||||
mcr p15, 0, r0, C15_control, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_enable_dcache)
|
||||
|
||||
BEGIN_PROC(arm_enable_wbuffer)
|
||||
mrc p15, 0, r0, C15_control, c0, 0
|
||||
orr r0, r0, #C15_C0_W
|
||||
mcr p15, 0, r0, C15_control, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_enable_wbuffer)
|
||||
|
||||
BEGIN_PROC(arm_enable_high_vectors)
|
||||
mrc p15, 0, r0, C15_control, c0, 0
|
||||
orr r0, r0, #C15_C0_V
|
||||
mcr p15, 0, r0, C15_control, c0, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_enable_high_vectors)
|
||||
|
||||
BEGIN_PROC(arm_invalidate_cache)
|
||||
mov r0, #0 @ FIX THIS
|
||||
mcr p15, 0, r0, c7, c7 @ Flush I cache and D cache
|
||||
mov pc, lr
|
||||
END_PROC(arm_invalidate_cache)
|
||||
|
||||
BEGIN_PROC(arm_invalidate_icache)
|
||||
mov r0, #0 @ FIX THIS
|
||||
mcr p15, 0, r0, c7, c5, 0 @ Flush I cache
|
||||
mov pc, lr
|
||||
END_PROC(arm_invalidate_icache)
|
||||
|
||||
BEGIN_PROC(arm_invalidate_dcache)
|
||||
mov r0, #0 @ FIX THIS
|
||||
mcr p15, 0, r0, c7, c6, 0 @ Flush D cache
|
||||
mov pc, lr
|
||||
END_PROC(arm_invalidate_dcache)
|
||||
|
||||
BEGIN_PROC(arm_clean_dcache)
|
||||
mcr p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
|
||||
bne arm_clean_dcache
|
||||
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
|
||||
mov pc, lr
|
||||
END_PROC(arm_clean_dcache)
|
||||
|
||||
BEGIN_PROC(arm_clean_invalidate_dcache)
|
||||
1:
|
||||
mrc p15, 0, pc, c7, c14, 3 @ Test/clean/flush dcache line
|
||||
@ COMMENT: Why use PC?
|
||||
bne 1b
|
||||
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
|
||||
mov pc, lr
|
||||
END_PROC(arm_clean_invalidate_dcache)
|
||||
|
||||
BEGIN_PROC(arm_clean_invalidate_cache)
|
||||
1:
|
||||
mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
|
||||
@ COMMENT: Why use PC?
|
||||
bne 1b
|
||||
mcr p15, 0, ip, c7, c5, 0 @ Flush icache
|
||||
mcr p15, 0, ip, c7, c10, 4 @ Drain WB
|
||||
mov pc, lr
|
||||
END_PROC(arm_clean_invalidate_cache)
|
||||
|
||||
BEGIN_PROC(arm_drain_writebuffer)
|
||||
mov r0, #0 @ FIX THIS
|
||||
mcr p15, 0, r0, c7, c10, 4
|
||||
mov pc, lr
|
||||
END_PROC(arm_drain_writebuffer)
|
||||
|
||||
BEGIN_PROC(arm_invalidate_tlb)
|
||||
mcr p15, 0, ip, c8, c7
|
||||
mov pc, lr
|
||||
END_PROC(arm_invalidate_tlb)
|
||||
|
||||
BEGIN_PROC(arm_invalidate_itlb)
|
||||
mov r0, #0 @ FIX THIS
|
||||
mcr p15, 0, r0, c8, c5, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_invalidate_itlb)
|
||||
|
||||
BEGIN_PROC(arm_invalidate_dtlb)
|
||||
mov r0, #0 @ FIX THIS
|
||||
mcr p15, 0, r0, c8, c6, 0
|
||||
mov pc, lr
|
||||
END_PROC(arm_invalidate_dtlb)
|
||||
|
||||
90
src/arch/arm/v5/mutex.S
Normal file
90
src/arch/arm/v5/mutex.S
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* ARM v5 Binary semaphore (mutex) implementation.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*
|
||||
*/
|
||||
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
/* Recap on swp:
|
||||
* swp rx, ry, [rz]
|
||||
* In one instruction:
|
||||
* 1) Stores the value in ry into location pointed by rz.
|
||||
* 2) Loads the value in the location of rz into rx.
|
||||
* By doing so, in one instruction one can attempt to lock
|
||||
* a word, and discover whether it was already locked.
|
||||
*/
|
||||
|
||||
#define MUTEX_UNLOCKED 0
|
||||
#define MUTEX_LOCKED 1
|
||||
|
||||
BEGIN_PROC(__spin_lock)
|
||||
mov r1, #1
|
||||
__spin:
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #0
|
||||
bne __spin
|
||||
mov pc, lr
|
||||
END_PROC(__spin_lock)
|
||||
|
||||
BEGIN_PROC(__spin_unlock)
|
||||
mov r1, #0
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #1 @ Debug check.
|
||||
1:
|
||||
bne 1b
|
||||
mov pc, lr
|
||||
END_PROC(__spin_unlock)
|
||||
|
||||
|
||||
/*
|
||||
* @r0: Address of mutex location.
|
||||
*/
|
||||
BEGIN_PROC(__mutex_lock)
|
||||
mov r1, #1
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #0
|
||||
movne r0, #0
|
||||
moveq r0, #1
|
||||
mov pc, lr
|
||||
END_PROC(__mutex_lock)
|
||||
|
||||
/*
|
||||
* @r0: Address of mutex location.
|
||||
*/
|
||||
BEGIN_PROC(__mutex_unlock)
|
||||
mov r1, #0
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #1
|
||||
1: @ Debug check.
|
||||
bne 1b
|
||||
mov pc, lr
|
||||
END_PROC(__mutex_unlock)
|
||||
|
||||
/*
|
||||
* @r0: Address of mutex location.
|
||||
*/
|
||||
BEGIN_PROC(__mutex_inc)
|
||||
swp r2, r1, [r0]
|
||||
mov r1, #1
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #0
|
||||
movne r0, #0
|
||||
moveq r0, #1
|
||||
mov pc, lr
|
||||
END_PROC(__mutex_inc)
|
||||
|
||||
/*
|
||||
* @r0: Address of mutex location.
|
||||
*/
|
||||
BEGIN_PROC(__mutex_dec)
|
||||
mov r1, #0
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #1
|
||||
1: @ Debug check.
|
||||
bne 1b
|
||||
mov pc, lr
|
||||
END_PROC(__mutex_dec)
|
||||
|
||||
|
||||
7
src/arch/arm/v6/mm.c
Normal file
7
src/arch/arm/v6/mm.c
Normal file
@@ -0,0 +1,7 @@
|
||||
/*
|
||||
*
|
||||
* Copyright Bahadir Balban (C) 2005
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
0
src/arch/arm/v6/mmu_ops.S
Normal file
0
src/arch/arm/v6/mmu_ops.S
Normal file
710
src/arch/arm/vectors.S
Normal file
710
src/arch/arm/vectors.S
Normal file
@@ -0,0 +1,710 @@
|
||||
/*
|
||||
* The vectors page. Includes all exception handlers.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
.balign 4096
|
||||
.section .data.vectors
|
||||
__vector_vaddr:
|
||||
|
||||
BEGIN_PROC(arm_high_vector)
|
||||
b arm_reset_exception
|
||||
b arm_undef_exception
|
||||
b arm_swi_exception
|
||||
b arm_prefetch_abort_exception_reentrant
|
||||
b arm_data_abort_exception_reentrant
|
||||
nop
|
||||
b arm_irq_exception_reentrant_with_schedule
|
||||
b arm_fiq_exception
|
||||
END_PROC(arm_high_vector)
|
||||
|
||||
.balign 4
|
||||
|
||||
/*
|
||||
* vect_reset
|
||||
*
|
||||
* Upon Entry:
|
||||
* - All registers are undefined and insignificant,
|
||||
* - FIQ/IRQs are disabled.
|
||||
* - PC: 0x00000000
|
||||
*
|
||||
*
|
||||
* PURPOSE:
|
||||
* CPU always starts executing from this vector
|
||||
* upon a HW reset. It may also be used as a SW reset.
|
||||
*/
|
||||
BEGIN_PROC(arm_reset_exception)
|
||||
END_PROC(arm_reset_exception)
|
||||
|
||||
/*
|
||||
* vect_undef
|
||||
*
|
||||
* Upon Entry:
|
||||
* - R14: Address of next instruction after undefined instruction
|
||||
* - PC: 0x00000004
|
||||
* - IRQs are disabled (CPSR[7] = 1)
|
||||
*
|
||||
*
|
||||
* PURPOSE:
|
||||
* A co-processor instruction not supported by the core can be
|
||||
* emulated here. Also unrecognised/invalid instructions are handled.
|
||||
*/
|
||||
BEGIN_PROC(arm_undef_exception)
|
||||
sub lr, lr, #4
|
||||
mov r0, lr @ Get undefined abort address
|
||||
mov r5, lr @ Save it in r5 in case r0 is trashed
|
||||
mov lr, pc @ Save return address
|
||||
ldr pc, =dump_undef_abort
|
||||
1:
|
||||
b 1b
|
||||
END_PROC(arm_undef_exception)
|
||||
|
||||
.macro disable_irqs rx
|
||||
mrs \rx, cpsr_fc
|
||||
orr \rx, #ARM_IRQ_BIT
|
||||
msr cpsr_fc, \rx
|
||||
.endm
|
||||
.macro enable_irqs rx
|
||||
mrs \rx, cpsr_fc
|
||||
bic \rx, #ARM_IRQ_BIT
|
||||
msr cpsr_fc, \rx
|
||||
.endm
|
||||
/* Only works in SVC MODE. Know what you are doing! */
|
||||
.macro get_current rx
|
||||
bic \rx, sp, #0xFF0
|
||||
bic \rx, \rx, #0xF
|
||||
.endm
|
||||
/* Saves the address of system call argument registers pushed to stack
|
||||
* to the current task's ktcb. */
|
||||
.macro ktcb_ref_saved_regs regs_addr, ktcb, regs_off
|
||||
get_current \ktcb
|
||||
ldr \regs_off, =syscall_regs_offset
|
||||
ldr \regs_off, [\regs_off]
|
||||
str \regs_addr, [\ktcb, \regs_off]
|
||||
.endm
|
||||
/*
|
||||
* vect_swi
|
||||
*
|
||||
* Upon Entry:
|
||||
* - R14: Address of next instruction after the SWI
|
||||
* - PC: 0x00000008
|
||||
* - R0-R12: Depending on the system call some of them contain
|
||||
* indicators of what the exception means.
|
||||
* - IRQs are disabled (CPSR[7] = 1)
|
||||
* - SWI instruction's bits [7:0] may contain SWI indicator
|
||||
*
|
||||
* PURPOSE:
|
||||
* Used for trapping into a debugger or OS kernel via system calls.
|
||||
* Argument registers from R0 up to R12 and [7:0] of the causing SWI
|
||||
* instruction contains hints of what to do with this exception. What
|
||||
* R0-R12 contains depends on what userspace has put in them. Note this
|
||||
* is the only exception that userspace can generate and thus has control
|
||||
* on what it put into r0-rx.
|
||||
*
|
||||
* RECAP:
|
||||
* Normally across a function call, only r0-r3 are used for passing parameters.
|
||||
* Why r0-r3 only but not r4, r5...? See APCS (ARM procedure call standard)
|
||||
* Short answer: r4-r12 must be preserved across procedures but r0-r3 can be
|
||||
* trashed because they're set aside for argument passing. Arguments more than 4
|
||||
* go on the stack. Note APCS is a *suggestion*, rather than enforcement. So if
|
||||
* a userspace stub library is created that say, preserves and uses r0-r9 for a
|
||||
* system call, and the system call handler (this) knows about it, it is a
|
||||
* perfectly valid setup. In fact this is what we do here, we don't strictly use
|
||||
* r0-r3. Depending on the system call, the set of input registers (and output
|
||||
* registers to return results from the system call) may be redefined. These are
|
||||
* documented for each system call in the reference manual.
|
||||
* Another caveat to note in SWI usage is that we use the address offset of the
|
||||
* SWI instruction to see which offset it has in the system call vector, to
|
||||
* determine the correct system call, rather than [7:0] bits of the SWI.
|
||||
*/
|
||||
BEGIN_PROC(arm_swi_exception)
|
||||
sub lr, lr, #4 @ Get address of swi instruction user executed.
|
||||
stmfd sp, {r0-r8,sp,lr}^ @ Push arguments, LR_USR and SP_USR to stack.
|
||||
nop
|
||||
@ NOTE: SP_USR MUST be pushed here, otherwise a kernel preemption could
|
||||
@ cause user mode of another process to overwrite SP_USR. The reason we
|
||||
@ save it here is because the preemption path does not currently save it
|
||||
@ if it is a kernel preemption. User SP can also be used here, as the
|
||||
@ user might have pushed data to its stack to be used by system calls.
|
||||
@ But we dont plan to pass data to kernel in this way, so saving of
|
||||
@ SP_USR can be done in preemption path as an optimisation.
|
||||
|
||||
/*
|
||||
* The LR_usr is important here, because the user application uses a BL
|
||||
* to jump to the system call SWI, so the LR_usr contains the return
|
||||
* address, i.e. the next instruction after the *jumping* instruction to
|
||||
* the system call SWI (not the one after the swi itself, which is in
|
||||
* LR_svc).
|
||||
*/
|
||||
|
||||
sub sp, sp, #44 @ stmfd on user registers can't writeback the SP. We do it manually.
|
||||
mrs r0, spsr_fc @ psr also need saving in case this context is interrupted.
|
||||
stmfd sp!, {r0}
|
||||
enable_irqs r0
|
||||
add r0, sp, #4 @ Pass sp address + 4 as a pointer to saved regs.
|
||||
ktcb_ref_saved_regs r0, r1, r2 @ Save regs pointer in ktcb
|
||||
mov r1, lr @ Pass swi instruction address in LR as arg1
|
||||
mov lr, pc
|
||||
ldr pc, =syscall
|
||||
disable_irqs r1 @ Not disabling irqs at this point causes the SP_USR and spsr
|
||||
@ to get corrupt causing havoc.
|
||||
ldmfd sp!, {r1}
|
||||
msr spsr, r1
|
||||
add sp, sp, #4 @ Skip, r0's location, since r0 already has returned result.
|
||||
@ Note we're obliged to preserve at least r3-r8 because they're MRs.
|
||||
ldmfd sp!, {r1-r8} @ Restore r1-r8 pushed to stack earlier. r0 already has return result.
|
||||
ldmfd sp, {sp}^ @ Restore user stack pointer, which might have been corrupt on preemption
|
||||
nop
|
||||
add sp, sp, #4 @ Update sp.
|
||||
ldmfd sp!, {lr} @ Load userspace return address
|
||||
movs pc, lr
|
||||
END_PROC(arm_swi_exception)
|
||||
|
||||
/* Minimal abort state saved on data abort stack right after abort vector enters: */
|
||||
#define ABT_R0 0
|
||||
#define ABT_SPSR -4
|
||||
#define ABT_R14 -8
|
||||
|
||||
/* Minimal prefetch abort state saved on abort stack upon entry. */
|
||||
#define ABT_R0 0
|
||||
#define ABT_SPSR -4
|
||||
#define ABT_R14 -8
|
||||
|
||||
/* Depending on the SPSR condition determines whether irqs should be enabled
|
||||
* during abort handling. If abort occured in userspace it orders irqs
|
||||
* should be enabled. Else if irqs come from kernel mode, it orders irqs are
|
||||
* enabled only if they were alreday enabled before the abort. */
|
||||
.macro can_abort_enable_irqs temp1, r_spsr
|
||||
and \temp1, \r_spsr, #ARM_MODE_MASK
|
||||
cmp \temp1, #ARM_MODE_USR @ Usermode indicates irqs can be enabled.
|
||||
beq 1f @ Z flag set. Which indicates "can enable"
|
||||
and \temp1, \r_spsr, #ARM_IRQ_BIT @ Clear irq bit indicates irqs were enabled
|
||||
cmp \temp1, #0 @ before the abort and can be safely enabled.
|
||||
1: @ Z flag must be set for "can enable" here.
|
||||
.endm
|
||||
|
||||
/* Pushes the user sp and lr to stack, updates the stack pointer */
|
||||
.macro push_user_sp_lr sp
|
||||
@ stack state: (Low) |..|..|->(Original)| (High)
|
||||
stmfd \sp, {sp, lr}^ @ Push USR banked regs to stack.
|
||||
nop @ Need a NOOP after push/popping user registers.
|
||||
@ stack state: (Low) |SP_USR|LR_USR|->(Original)| (High)
|
||||
sub \sp, \sp, #8 @ Adjust SP, since stack op on banked regs is no writeback.
|
||||
@ stack state: (Low) |->SP_USR|LR_USR|(Original)| (High)
|
||||
.endm
|
||||
/*
|
||||
* vect_pabt
|
||||
*
|
||||
* Upon Entry:
|
||||
* - R14_svc: Address of next instruction after aborted instruction
|
||||
* - R14_usr: Address of return instruction in last function call**
|
||||
* - PC: 0x0000000c
|
||||
* - IRQs are disabled (CPSR[7] = 1)
|
||||
*
|
||||
*
|
||||
* PURPOSE:
|
||||
* Used for handling instructions that caused *memory aborts* during
|
||||
* the *prefetching* of the instruction. The instruction is also marked
|
||||
* as invalid by the core. It handles the cause for the memory abort.
|
||||
*
|
||||
* (One reason why a memory abort would occur is when we were entering
|
||||
* into a new page region that contained executable code and was not
|
||||
* present in memory, or its physical-to-virtual translation was not
|
||||
* present in the page tables. See other causes for memory aborts)
|
||||
*
|
||||
* **In case abort occured in userspace. This is useful if the abort
|
||||
* was due to a null/invalid function pointer call. Since R14_abt
|
||||
* includes the aborting instruction itself, R14_usr gives the clue to
|
||||
* where this call came from.
|
||||
*/
|
||||
BEGIN_PROC(arm_prefetch_abort_exception_reentrant)
|
||||
sub lr, lr, #4 @ lr-4 points at aborted instruction
|
||||
str lr, [r13, #ABT_R14] @ Store abort address.
|
||||
mrs lr, spsr @ Get SPSR
|
||||
str lr, [r13, #ABT_SPSR] @ Store SPSR
|
||||
str r0, [r13, #ABT_R0] @ Store R0 to use as temp register.
|
||||
mov r0, r13 @ SP to R0
|
||||
mrs lr, cpsr @ Change to SVC mode.
|
||||
bic lr, #ARM_MODE_MASK
|
||||
orr lr, lr, #ARM_MODE_SVC
|
||||
msr cpsr_fc, r14
|
||||
@ FIXME: Ensure 8-byte stack here.
|
||||
str lr, [sp, #-8]! @ NOTE: Switched mode! Save LR_SVC 2 words down from SP_SVC.
|
||||
transfer_pabt_state_to_svc: @ Move data saved on PABT stack to SVC stack.
|
||||
ldr lr, [r0, #ABT_R14]
|
||||
str lr, [sp, #4]
|
||||
@ Stack state: |LR_SVC<-|LR_PABT|{original SP_SVC}|
|
||||
ldr lr, [r0, #ABT_SPSR]
|
||||
ldr r0, [r0, #ABT_R0]
|
||||
stmfd sp!, {r0-r3,r12,lr}
|
||||
@ Stack state: |R0<-|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
|
||||
push_user_sp_lr sp
|
||||
@ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
|
||||
read_pabt_state:
|
||||
mrc p15, 0, r1, c5, c0, 0 @ Read FSR (Tells why the fault occured) FIXME: Do we need this in pabt?
|
||||
mrc p15, 0, r2, c6, c0, 0 @ Read FAR (Contains the faulted data address) Do we need this in pabt?
|
||||
@ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
|
||||
ldr r3, [sp, #28] @ Load PABT_SPSR
|
||||
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
|
||||
bne 1f @ Branch here based on previous irq judgement.
|
||||
enable_irqs r3
|
||||
1:
|
||||
ldr r0, [sp, #36] @ Load LR_PABT saved previously.
|
||||
mov lr, pc
|
||||
ldr pc, =prefetch_abort_handler @ Jump to function outside this page.
|
||||
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
|
||||
@ (i.e. an interrupt could overwrite spsr with current psr)
|
||||
ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
|
||||
nop @ User reg mod requires nop
|
||||
add sp, sp, #8 @ Update SP.
|
||||
ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
|
||||
msr spsr_cxsf, r14 @ Restore spsr register from lr.
|
||||
@ Stack state: |LR_SVC<-|LR_PREV(PABT)|{original SP_SVC}|
|
||||
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
|
||||
@ and pc gets lr_dabt. Saved at #4 and #8 offsets
|
||||
@ down from where svc stack had left.
|
||||
END_PROC(arm_prefetch_abort_exception_reentrant)
|
||||
|
||||
/*
|
||||
* vect_dabt
|
||||
*
|
||||
* Upon Entry:
|
||||
* - R14_abt: Address of next instruction after aborted instruction
|
||||
* - PC: 0x00000010
|
||||
* - IRQs are disabled (CPSR[7] = 1)
|
||||
*
|
||||
*
|
||||
* PURPOSE:
|
||||
* Used for handling instructions that caused *memory aborts* during
|
||||
* the *execution* of the current instruction. This may happen if the
|
||||
* instruction accessed a memory address (e.g LDR/STR) that is not
|
||||
* defined as part of the currently executing process (aka illegal
|
||||
* access). Another possibility is the address is within the address
|
||||
* space of the process, but it is not mapped, i.e. does not have
|
||||
* physical-to-virtual translation entry in the page tables.
|
||||
*/
|
||||
BEGIN_PROC(arm_data_abort_exception)
|
||||
sub lr, lr, #8 @ lr-8 points at aborted instruction
|
||||
mrc p15, 0, r2, c5, c0, 0 @ Read FSR
|
||||
mrc p15, 0, r1, c6, c0, 0 @ Read FAR
|
||||
mov r0, lr @ Get data abort address
|
||||
mov r5, lr @ Save it in r5 in case r0 will get trashed
|
||||
mov lr, pc @ Save return address
|
||||
ldr pc, =data_abort_handler @ Jump to function outside this page.
|
||||
1:
|
||||
b 1b
|
||||
END_PROC(arm_data_abort_exception)
|
||||
/*
|
||||
* The method of saving abort state to svc stack is identical with that of
|
||||
* reentrant irq vector. Natural to this, Restoring of the previous state
|
||||
* is also identical.
|
||||
*/
|
||||
BEGIN_PROC(arm_data_abort_exception_reentrant)
|
||||
sub lr, lr, #8 @ Get abort address
|
||||
str lr, [r13, #ABT_R14] @ Store abort address
|
||||
mrs lr, spsr @ Get SPSR
|
||||
str lr, [r13, #ABT_SPSR] @ Store SPSR
|
||||
str r0, [r13, #ABT_R0] @ Store r0
|
||||
@ NOTE: Can increase data abort nest here.
|
||||
mov r0, r13 @ Keep current sp point in R0
|
||||
mrs lr, cpsr @ Change to SVC mode.
|
||||
bic lr, #ARM_MODE_MASK
|
||||
orr lr, lr, #ARM_MODE_SVC
|
||||
msr cpsr_fc, r14
|
||||
@ FIXME: Ensure 8-byte stack here.
|
||||
str lr, [sp, #-8]! @ Save lr_svc 2 words down from interrupted SP_SVC
|
||||
transfer_dabt_state_to_svc:
|
||||
ldr lr, [r0, #ABT_R14]
|
||||
str lr, [sp, #4]
|
||||
@ Stack state: |LR_SVC<-|LR_DABT|{original SP_SVC}|
|
||||
ldr lr, [r0, #ABT_SPSR]
|
||||
ldr r0, [r0, #ABT_R0]
|
||||
stmfd sp!, {r0-r3,r12,lr}
|
||||
@ Stack state: |R0<-|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
|
||||
push_user_sp_lr sp
|
||||
@ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
|
||||
read_dabt_state:
|
||||
mrc p15, 0, r1, c5, c0, 0 @ Read FSR (Tells why the fault occured)
|
||||
mrc p15, 0, r2, c6, c0, 0 @ Read FAR (Contains the faulted data address)
|
||||
@ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
|
||||
ldr r3, [sp, #28] @ Load DABT_SPSR
|
||||
can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
|
||||
bne 1f @ Branch here based on previous irq judgement.
|
||||
enable_irqs r3
|
||||
1:
|
||||
ldr r0, [sp, #36] @ Load LR_DABT saved previously.
|
||||
mov lr, pc
|
||||
ldr pc, =data_abort_handler @ Jump to function outside this page.
|
||||
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
|
||||
ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
|
||||
nop @ User reg mod requires nop
|
||||
add sp, sp, #8 @ Update SP.
|
||||
ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
|
||||
msr spsr_cxsf, r14 @ Restore spsr register from lr.
|
||||
@ Stack state: |LR_SVC<-|LR_PREV(DABT)|{original SP_SVC}|
|
||||
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
|
||||
@ and pc gets lr_dabt. Saved at #4 and #8 offsets
|
||||
@ down from where svc stack had left.
|
||||
END_PROC(arm_data_abort_exception_reentrant)
|
||||
|
||||
/*
|
||||
* vect_irq
|
||||
*
|
||||
* Upon Entry:
|
||||
* - R14: Address of next instruction after interrupted instruction.
|
||||
* - PC: 0x00000018
|
||||
* - IRQs are disabled (CPSR[7] = 1)
|
||||
* - A vectored interrupt controller would also provide where to jump in
|
||||
* order to handle the interrupt, or an irq controller in general would
|
||||
* provide registers that indicate what kind of interrupt has occured.
|
||||
*
|
||||
*
|
||||
* PURPOSE:
|
||||
* Used for handling IRQs. IRQs have lower priority compared to other
|
||||
* types of exceptions.
|
||||
*/
|
||||
|
||||
/* The most basic handler where neither context switching nor re-entry can occur. */
|
||||
BEGIN_PROC(arm_irq_exception_basic)
|
||||
sub lr, lr, #4
|
||||
stmfd sp!, {r0-r3,lr}
|
||||
mov lr, pc
|
||||
ldr pc, =do_irq
|
||||
ldmfd sp!, {r0-r3, pc}^
|
||||
END_PROC(arm_irq_exception)
|
||||
|
||||
/* Minimal IRQ state saved on irq stack right after irq vector enters: */
|
||||
#define IRQ_R0 0
|
||||
#define IRQ_SPSR -4
|
||||
#define IRQ_R14 -8
|
||||
|
||||
/* A reentrant handler that uses svc mode stack to prevent banked lr_irq corruption. */
|
||||
BEGIN_PROC(arm_irq_exception_reentrant)
|
||||
sub lr, lr, #4
|
||||
@ Save minimal state to irq stack:
|
||||
str r14, [r13, #IRQ_R14] @ Save lr_irq
|
||||
mrs r14, spsr @ Copy spsr
|
||||
str r14, [r13, #IRQ_SPSR] @ Save spsr on irq stack
|
||||
str r0, [r13, #IRQ_R0] @ Save r0.
|
||||
mov r0, r13 @ Using r0 to keep banked sp_irq when mode is switched.
|
||||
mrs r14, cpsr @ Get current psr (irq)
|
||||
bic r14, #ARM_MODE_MASK @ Clear mode part from psr
|
||||
orr r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
|
||||
msr cpsr_fc, r14 @ Change to SVC mode.
|
||||
str r14, [r13, #-8]! @ Save lr_svc 2 words down from where svc stack left.
|
||||
@ Transfer minimal irq state saved to svc stack:
|
||||
ldr r14, [r0, #IRQ_R14] @ Load lr_irq to lr using r0 that contains sp_irq.
|
||||
str r14, [r13, #4] @ Save lr_irq 1 word down from where svc stack left.
|
||||
ldr r14, [r0, #IRQ_SPSR] @ Load irq spsr.
|
||||
ldr r0, [r0, #IRQ_R0] @ Restore r0.
|
||||
stmfd sp!, {r0-r3,r12,lr} @ Save all of rest of irq context to svc stack.
|
||||
bl do_irq @ Read irq number etc. Free to re-enable irqs here.
|
||||
ldmfd sp!, {r0-r3-r12,lr} @ Restore previous context. (note, lr has spsr)
|
||||
msr spsr_cxsf, lr @ Restore spsr register from lr.
|
||||
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
|
||||
@ and pc gets lr_irq. Saved at #4 and #8 offsets
|
||||
@ down from where svc stack had left.
|
||||
END_PROC(arm_irq_exception_reentrant)
|
||||
|
||||
.macro was_irq_mode rx
|
||||
mrs rx, spsr_fc
|
||||
and rx, rx, 0x1F
|
||||
cmp rx, #ARM_MODE_IRQ
|
||||
.endm
|
||||
|
||||
.macro need_resched rx, ry
|
||||
get_current \rx
|
||||
ldr \ry, =need_resched_offset
|
||||
ldr \ry, [\ry]
|
||||
ldr \ry, [\rx, \ry]
|
||||
cmp \ry, #1
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Keeps the PSR of the last pre-empted process. This helps to tell
|
||||
* what mode the process was in when it was preempted.
|
||||
*/
|
||||
.global preempted_psr;
|
||||
preempted_psr:
|
||||
.word 0
|
||||
|
||||
/* Keeps track of how many nests of irqs have happened. */
|
||||
.global current_irq_nest_count;
|
||||
current_irq_nest_count:
|
||||
.word 0
|
||||
|
||||
#define IRQ_NESTING_MAX 15
|
||||
.macro inc_irq_cnt_with_overnest_check rx, ry
|
||||
ldr \rx, =current_irq_nest_count @ Load the irq nest status word.
|
||||
ldr \ry, [\rx]
|
||||
add \ry, \ry, #1 @ No need for atomic inc since irqs are disabled.
|
||||
str \ry, [\rx]
|
||||
cmp \ry, #IRQ_NESTING_MAX @ Check no more than max nests, and die miserably if so.
|
||||
ldrge pc, =irq_overnest_error
|
||||
.endm
|
||||
|
||||
@ This decrement need not be atomic because if you are *decrementing* this, then it means
|
||||
@ Preemption is already *disabled*. Ruling out preemption, only race could be against irqs.
|
||||
@ If an irq preempts it during decrement and modifies it, it is still responsible to change
|
||||
@ it back to the original value as it was when we read it, before it returns. So effectively
|
||||
@ anything that runs during the decrement does not affect the value of the count.
|
||||
.macro dec_irq_nest_cnt rx, ry
|
||||
ldr \ry, =current_irq_nest_count
|
||||
ldr \rx, [\ry]
|
||||
sub \rx, \rx, #1
|
||||
str \rx, [\ry]
|
||||
.endm
|
||||
.macro in_process_context rx
|
||||
ldr \rx, =current_irq_nest_count
|
||||
ldr \rx, [\rx]
|
||||
cmp \rx, #0
|
||||
.endm
|
||||
/* If interrupted a process (as opposed to another irq), saves spsr value to preempted_psr */
|
||||
.macro cmp_and_save_process_psr rx, process_psr
|
||||
in_process_context \rx @ If nest count is 0, a running process is preempted.
|
||||
ldreq \rx, =preempted_psr
|
||||
streq \process_psr, [\rx]
|
||||
.endm
|
||||
.macro is_psr_usr rx
|
||||
and \rx, \rx, #ARM_MODE_MASK
|
||||
cmp \rx, #ARM_MODE_USR
|
||||
.endm
|
||||
|
||||
#define CONTEXT_PSR 0
|
||||
#define CONTEXT_R0 4
|
||||
#define CONTEXT_R1 8
|
||||
#define CONTEXT_R2 12
|
||||
#define CONTEXT_R3 16
|
||||
#define CONTEXT_R4 20
|
||||
#define CONTEXT_R5 24
|
||||
#define CONTEXT_R6 28
|
||||
#define CONTEXT_R7 32
|
||||
#define CONTEXT_R8 36
|
||||
#define CONTEXT_r9 40
|
||||
#define CONTEXT_R10 44
|
||||
#define CONTEXT_R11 48
|
||||
#define CONTEXT_R12 52
|
||||
#define CONTEXT_R13 56
|
||||
#define CONTEXT_R14 60
|
||||
#define CONTEXT_PC 64
|
||||
|
||||
BEGIN_PROC(arm_irq_exception_reentrant_with_schedule)
|
||||
sub lr, lr, #4
|
||||
str lr, [r13, #IRQ_R14] @ Save lr_irq
|
||||
mrs r14, spsr @ Copy spsr
|
||||
str r14, [r13, #IRQ_SPSR] @ Save spsr on irq stack
|
||||
str r0, [r13, #IRQ_R0] @ Save r0.
|
||||
cmp_and_save_process_psr r0, r14 @ R14 should have spsr here.
|
||||
inc_irq_cnt_with_overnest_check r0, r14
|
||||
mov r0, r13 @ Using r0 to keep banked sp_irq when mode is switched.
|
||||
mrs r14, cpsr @ Get current psr (irq)
|
||||
bic r14, #ARM_MODE_MASK @ Clear mode part from psr
|
||||
orr r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
|
||||
msr cpsr_fc, r14 @ Change to SVC mode.
|
||||
@ FIXME: Ensure 8-byte aligned stack here! Make sure to restore original state later!
|
||||
str r14, [r13, #-8]! @ Save lr_svc 2 words down from where svc stack left. SP updated.
|
||||
@ Transfer minimal irq state to svc stack:
|
||||
ldr r14, [r0, #IRQ_R14] @ Load lr_irq to lr using r0 that contains sp_irq.
|
||||
str r14, [r13, #4] @ Save lr_irq 1 word down from where svc stack left.
|
||||
ldr r14, [r0, #IRQ_SPSR] @ Load irq spsr.
|
||||
ldr r0, [r0, #IRQ_R0] @ Restore r0.
|
||||
stmfd sp!, {r0-r3,r12,lr} @ Save all of rest of irq context to svc stack.
|
||||
mov lr, pc
|
||||
ldr pc, =do_irq @ Read irq number etc. Free to re-enable irqs here.
|
||||
@ stack state: (Low) r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ| (High)
|
||||
ldr r0, =current_irq_nest_count
|
||||
ldr r0, [r0]
|
||||
cmp r0, #1 @ Expect 1 as lowest since each irq increase preempt cnt by 1.
|
||||
bgt return_to_prev_context @ if (irq_nest > 1) return_to_prev_context();
|
||||
need_resched r0, r1 @ if (irq_nest == 1 && need_resched) schedule();
|
||||
beq preemption_path @ if (irq_nest == 1 && !need_resched) return_to_prev_context();
|
||||
return_to_prev_context:
|
||||
dec_irq_nest_cnt r0, r1
|
||||
disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
|
||||
ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
|
||||
msr spsr_cxsf, r14 @ Restore spsr register from lr.
|
||||
@ stack state: (Low) |LR_SVC<-|LR_PREV(IRQ)|{original SP_SVC}| (High)
|
||||
ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
|
||||
@ and pc gets lr_irq. Saved at #4 and #8 offsets
|
||||
@ down from where svc stack had left.
|
||||
preemption_path:
|
||||
disable_irqs r0 @ Interrupts can corrupt stack state.
|
||||
get_current r0 @ Get the interrupted process
|
||||
@ stack state: (Low) |->r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ(<return_address>)| (High)
|
||||
save_interrupted_context:
|
||||
add sp, sp, #4
|
||||
@ stack state: (Low) |r0|->r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ(<return_address>)| (High)
|
||||
ldmfd sp!, {r1-r3, r12, lr}
|
||||
@ stack state: (Low) |r0|..|..|..|..|..|->LR_SVC|LR_IRQ(<return_address>)| (High)
|
||||
str lr, [r0, #CONTEXT_PSR]
|
||||
is_psr_usr lr
|
||||
add r0, r0, #CONTEXT_R1 @ Points at register save location for #CONTEXT_R1
|
||||
stmia r0!, {r1-r12}
|
||||
ldmfd sp!, {r1-r2} @ At this point SP_SVC is at its original svc location.
|
||||
@ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
|
||||
@ register state: r0 = (register save loc for #CONTEXT_R13) r1 = LR_SVC, r2 = LR_IRQ
|
||||
beq save_usr_context
|
||||
save_svc_context:
|
||||
stmib r0, {r1-r2} @ Save LR_SVC and LR_RETURN in advancing locations.
|
||||
str sp, [r0] @ Current sp is where sp_svc has left, and r0 at #CONTEXT_SP loc.
|
||||
sub r0, r0, #CONTEXT_R13 @ Go back to first word from SP position.
|
||||
ldr r1, [sp, #-32] @ Load r0 from stack
|
||||
str r1, [r0, #CONTEXT_R0] @ Save r0
|
||||
b prepare_schedule @ All registers saved.
|
||||
save_usr_context:
|
||||
sub r0, r0, #CONTEXT_R13
|
||||
str r2, [r0, #CONTEXT_PC] @ Save Program counter
|
||||
@ LR_SVC need restoring because it won't be pushed to context frame. SP_SVC is already up-to-date.
|
||||
mov lr, r1
|
||||
stmfd sp, {sp, lr}^ @ Push USR banked regs to stack.
|
||||
@ stack state: (Low) |r0|..|..|..|..|..|SP_USR|LR_USR|->(Original)| (High)
|
||||
nop @ Need a NOP after twiddling with usr registers.
|
||||
sub sp, sp, #8 @ Adjust SP, since stack op on banked regs is no writeback.
|
||||
@ stack state: (Low) |r0|..|..|..|..|..|->SP_USR|LR_USR|(Original)| (High)
|
||||
ldmfd sp!, {r1-r2} @ Pop USR Banked regs.
|
||||
@ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
|
||||
str r1, [r0, #CONTEXT_R13] @ Save SP_USR to context frame.
|
||||
str r2, [r0, #CONTEXT_R14] @ Save LR_USR to context frame.
|
||||
ldr r1, [sp, #-32]
|
||||
str r1, [r0, #CONTEXT_R0]
|
||||
@ stack state: (Low) |..|..|..|..|..|..|..|..|->(Original)| (High)
|
||||
prepare_schedule:
|
||||
ldr pc, =schedule
|
||||
END_PROC(arm_irq_exception_reentrant_with_schedule)
|
||||
|
||||
/*
|
||||
* Context switch implementation.
|
||||
*
|
||||
* Upon entry:
|
||||
*
|
||||
* - r0 = current ktcb ptr, r1 = next ktcb ptr. r2 and r3 = insignificant.
|
||||
* - The current mode is always SVC, but the call may be coming from interrupt
|
||||
* or process context.
|
||||
* - If coming from interrupt, the interrupted context is already copied to current
|
||||
* ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
|
||||
*
|
||||
* PURPOSE: Handles all paths from irq exception, thread_switch system call,
|
||||
* and sleeping in the kernel.
|
||||
*
|
||||
* NOTES:
|
||||
* - If coming from interrupt, the interrupted context is already copied to current
|
||||
* ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
|
||||
* - If coming from a process context, the current process context need saving here.
|
||||
* - From irq contexts, preemption is disabled, i.e. preemption count is 1. This is because
|
||||
* irqs naturally increase preemption count. From process context preemption count is 0.
|
||||
* Process context disables preemption during schedule(), but re-enables before calling
|
||||
* switch_to(). Irq and process contexts are distinguished by preemption_count.
|
||||
* Furthermore, irqs are also disabled shortly before calling switch_to() from both contexts.
|
||||
* This happens at points where stack state would be irrecoverable if an irq occured.
|
||||
*/
|
||||
BEGIN_PROC(switch_to)
|
||||
in_process_context r2 @ Note this depends on preempt count being 0.
|
||||
beq save_process_context @ Voluntary switch needs explicit saving of current state.
|
||||
dec_irq_nest_cnt r2, r3 @ Soon leaving irq context, so reduce preempt count here.
|
||||
b load_next_context @ Interrupted context already saved by irq handler.
|
||||
save_process_context: @ Voluntary process schedules enter here:
|
||||
mrs r2, cpsr_fc
|
||||
str r2, [r0]
|
||||
stmib r0, {r0-r14} @ Voluntary scheduling always in SVC mode, so using svc regs.
|
||||
str r14, [r0, #CONTEXT_PC] @ Store R15 as R14. R14 has return address for switch_to().
|
||||
load_next_context:
|
||||
@ stack state: (Low) |..|..|..|..|..|..|..|..|..|->(Original)| (High)
|
||||
mov sp, r1
|
||||
ldr r0, [sp, #CONTEXT_PSR] @ Load r0 with SPSR
|
||||
bic r0, r0, #ARM_IRQ_BIT @ Enable irqs on will-be-restored context.
|
||||
msr spsr_fcxs, r0 @ Restore spsr from r0.
|
||||
is_psr_usr r0
|
||||
bne load_next_context_svc @ Loading user context is different than svc.
|
||||
load_next_context_usr:
|
||||
ldmib sp, {r0-r14}^ @ Load all including banked user regs.
|
||||
ldr lr, [sp, #CONTEXT_PC] @ Load value of PC to r14
|
||||
orr sp, sp, #0xFF0
|
||||
orr sp, sp, #0x8 @ 8-byte aligned.
|
||||
movs pc, lr @ Jump to user changing modes.
|
||||
load_next_context_svc:
|
||||
ldmib sp, {r0-r15}^ @ Switch to svc context and jump, loading R13 and R14 from stack.
|
||||
@ This is OK since the jump is to current context.
|
||||
END_PROC(switch_to)
|
||||
|
||||
|
||||
/*
|
||||
* vect_fiq
|
||||
*
|
||||
* Upon Entry:
|
||||
* - R14: Address of next instruction after interrupted instruction.
|
||||
* - PC: 0x00000014
|
||||
* - FIQs are disabled (CPSR[6] = 1)
|
||||
* - IRQs are disabled (CPSR[7] = 1)
|
||||
* - As in IRQ, the irq controller would provide registers that indicate
|
||||
* what kind of interrupt has occured.
|
||||
*
|
||||
* PURPOSE:
|
||||
* Handling of high-priority interrupts. FIQs have highest priority after
|
||||
* reset and data abort exceptions. They're mainly used for achieving
|
||||
* low-latency interrupts, e.g. for DMA.
|
||||
*/
|
||||
BEGIN_PROC(arm_fiq_exception)
|
||||
END_PROC(arm_fiq_exception)
|
||||
|
||||
/* * * * * * * * * * * * * * * * * * * * * * * *
|
||||
* External functions with absolute addresses *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
/*
|
||||
* NOTE: Notes on relative and absolute symbols on this file:
|
||||
*
|
||||
* Note that branches (B and BL) are *RELATIVE* on ARM. So no need to take any
|
||||
* special action to access symbols within this file, even though this page
|
||||
* (in virtual memory) is relocated to another address at run-time (high or low
|
||||
* vectors) - this is an address other than where it is linked at, at
|
||||
* compile-time.
|
||||
*
|
||||
* To access external symbols from this file, (e.g. calling some function in the
|
||||
* kernel) one needs to use the: `LDR, pc, =external_symbol' pseudo-instruction,
|
||||
* (note the "=") and use absolute addressing. This automatically generates an
|
||||
* inline data word within the current module and indirectly loads the value in
|
||||
* that word to resolve the undefined reference. All other methods, (LDR, B
|
||||
* instructions, or ADR pseudoinstruction) generate relative addresses, and they
|
||||
* will complain for external symbols because a relative offset cannot be
|
||||
* calculated for an unknown distance. In conclusion, relative branches are
|
||||
* useful for accessing symbols on this page, but they mean nothing outside this
|
||||
* page, because the page is relocated at run-time. So, wherever you access
|
||||
* *relatively* outside this page, would be *relative* to where this page is at
|
||||
* that moment.
|
||||
*/
|
||||
|
||||
/* * * * * * * * * * * * * * * * *
|
||||
* Stacks for Exception Vectors *
|
||||
* * * * * * * * * * * * * * * * */
|
||||
.global __stacks_end;
|
||||
.global __abt_stack_high;
|
||||
.global __irq_stack_high;
|
||||
.global __fiq_stack_high;
|
||||
.global __und_stack_high;
|
||||
|
||||
/*
|
||||
* These are also linked at high vectors, just as any other symbol
|
||||
* on this page.
|
||||
*/
|
||||
.balign 4
|
||||
.equ __abt_stack_high, (__abt_stack - __vector_vaddr + 0xFFFF0000);
|
||||
.equ __irq_stack_high, (__irq_stack - __vector_vaddr + 0xFFFF0000);
|
||||
.equ __fiq_stack_high, (__fiq_stack - __vector_vaddr + 0xFFFF0000);
|
||||
.equ __und_stack_high, (__und_stack - __vector_vaddr + 0xFFFF0000);
|
||||
|
||||
/*
|
||||
* NOTE: This could be cache line aligned.
|
||||
* (use a macro, e.g. ____arm_asm_cache_aligned)
|
||||
*/
|
||||
.balign 4
|
||||
__stacks_end: .space 256
|
||||
__abt_stack: .space 256
|
||||
__irq_stack: .space 256
|
||||
__fiq_stack: .space 256
|
||||
__und_stack: .space 256
|
||||
|
||||
.balign 4096
|
||||
|
||||
9
src/arch/tests/SConscript
Normal file
9
src/arch/tests/SConscript
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['linker.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
8
src/arch/tests/linker.c
Normal file
8
src/arch/tests/linker.c
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
#include <macros.h>
|
||||
#include <config.h>
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_PLAT(offsets.h)
|
||||
|
||||
unsigned int kernel_mapping_end = 0;
|
||||
unsigned int _end = 0;
|
||||
Reference in New Issue
Block a user