Kernel updates since December 2009

This commit is contained in:
Bahadir Balban
2010-03-25 01:12:40 +02:00
parent 16818191b3
commit 74b5963fcb
487 changed files with 22477 additions and 3857 deletions

View File

@@ -0,0 +1,16 @@
/*
* Copyright (C) 2010 B Labs Ltd.
*
* Common assembler macros
*
* Prem Mallappa, Bahadir Balban
*/
#ifndef __ASM_MACROS_S__
#define __ASM_MACROS_S__
.macro get_cpuid cpuid
mrc p15, 0, \cpuid, c0, c0, 5 @ Read MPIDR
and \cpuid, \cpuid, #0xF @ Mask lower cpuid bits
.endm
#endif /* __ASM_MACROS_S__ */

View File

@@ -20,9 +20,11 @@
#define ARM_NOIRQ_FIQ 0xD1
#define ARM_NOIRQ_USR 0xD0
#define ARM_NOIRQ_SYS 0xDF
/* For enabling *clear* these bits */
#define ARM_IRQ_BIT 0x80
#define ARM_FIQ_BIT 0x40
#define ARM_IRQ_BIT 0x080
#define ARM_FIQ_BIT 0x040
#define ARM_A_BIT 0x100 /* Asynchronous abort */
/* Notes about ARM instructions:
*

View File

@@ -1,78 +1,57 @@
/*
* Definitions for exception support on ARM
* Common definitions for exceptions
* across ARM sub-architectures.
*
* Copyright (C) 2007 Bahadir Balban
* Copyright (C) 2010 B Labs Ltd.
*/
#ifndef __ARCH_EXCEPTIONS_H__
#define __ARCH_EXCEPTIONS_H__
#ifndef __EXCEPTION_H__
#define __EXCEPTION_H__
#include INC_SUBARCH(exception.h)
#include INC_ARCH(asm.h)
static inline void enable_irqs()
{
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"bic r0, r0, #0x80\n" /* ARM_IRQ_BIT */
"msr cpsr_fc, r0\n"
);
}
static inline void disable_irqs()
{
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"orr r0, r0, #0x80\n" /* ARM_IRQ_BIT */
"msr cpsr_fc, r0\n"
);
}
int irqs_enabled();
/* Disable the irqs unconditionally, but also keep the previous state such that
* if it was already disabled before the call, the restore call would retain
* this state. */
void irq_local_disable_save(unsigned long *state);
#if 0
{
unsigned long temp;
__asm__ __volatile__ (
"mrs %0, cpsr_fc\n"
"orr %2, %0, #0x80\n"
"msr cpsr_fc, %2\n"
: "=r" (*state)
: "r" (*state),"r" (temp)
);
}
/* Abort debugging conditions */
//#define DEBUG_ABORTS
#if defined (DEBUG_ABORTS)
#define dbg_abort(...) printk(__VA_ARGS__)
#else
#define dbg_abort(...)
#endif
/* Simply change it back to original state supplied in @flags. This might enable
* or retain disabled state of the irqs for example. Useful for nested calls. */
void irq_local_restore(unsigned long state);
/* Codezero-specific abort type */
#define ABORT_TYPE_PREFETCH 1
#define ABORT_TYPE_DATA 0
static inline void irq_local_enable()
{
enable_irqs();
}
/* If abort is handled and resolved in check_aborts */
#define ABORT_HANDLED 1
/* Codezero makes use of bit 8 (Always Zero) of FSR to define which type of abort */
#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
/* Kernel's data about the fault */
typedef struct fault_kdata {
u32 faulty_pc; /* In DABT: Aborting PC, In PABT: Same as FAR */
u32 fsr; /* In DABT: DFSR, In PABT: IFSR */
u32 far; /* In DABT: DFAR, in PABT: IFAR */
pte_t pte; /* Faulty page table entry */
} __attribute__ ((__packed__)) fault_kdata_t;
static inline void irq_local_disable()
{
disable_irqs();
}
/* This is filled on entry to irq handler, only if a process was interrupted.*/
extern unsigned int preempted_psr;
/*
* FIXME: TASK_IN_KERNEL works for non-current tasks, in_kernel() works for current task?
* in_kernel() is for irq, since normally in process context you know if you are in kernel or not :-)
*/
/* Implementing these as functions cause circular include dependency for tcb.h */
#define TASK_IN_KERNEL(tcb) (((tcb)->context.spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
#define TASK_IN_USER(tcb) (!TASK_IN_KERNEL(tcb))
static inline int is_user_mode(u32 spsr)
{
return ((spsr & ARM_MODE_MASK) == ARM_MODE_USR);
}
static inline int in_kernel()
{
return (((preempted_psr & ARM_MODE_MASK) == ARM_MODE_SVC)) ? 1 : 0;
@@ -86,4 +65,9 @@ static inline int in_user()
int pager_pagein_request(unsigned long vaddr, unsigned long size,
unsigned int flags);
#endif
int fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag);
int is_kernel_abort(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
#endif /* __EXCEPTION_H__ */

View File

@@ -6,7 +6,20 @@
* Copyright (C) 2007 Bahadir Balban
*/
#define read(val, address) val = *((volatile unsigned int *) address)
#define write(val, address) *((volatile unsigned int *) address) = val
#if defined (__KERNEL__)
#include INC_GLUE(memlayout.h)
#define read(address) *((volatile unsigned int *) (address))
#define write(val, address) *((volatile unsigned int *) (address)) = val
#endif /* ends __KERNEL__ */
/*
* Generic uart virtual address until a file-based console access
* is available for userspace
*/
#define USERSPACE_CONSOLE_VBASE 0xF9800000
#endif /* __ARM_IO_H__ */

29
include/l4/arch/arm/irq.h Normal file
View File

@@ -0,0 +1,29 @@
#ifndef __ARM_IRQ_H__
#define __ARM_IRQ_H__
#include INC_SUBARCH(irq.h)
void irq_local_restore(unsigned long state);
void irq_local_disable_save(unsigned long *state);
int irqs_enabled();
static inline void irq_local_enable()
{
enable_irqs();
}
static inline void irq_local_disable()
{
disable_irqs();
}
/*
* Destructive atomic-read.
*
* Write 0 to byte at @location as its contents are read back.
*/
char l4_atomic_dest_readb(void *location);
#endif /* __ARM_IRQ_H__ */

View File

@@ -16,9 +16,15 @@ extern unsigned long arm_high_vector[];
extern unsigned long _end_vectors[];
extern unsigned long _start_kip[];
extern unsigned long _end_kip[];
extern unsigned long _start_syscalls[];
extern unsigned long _end_syscalls[];
extern unsigned long _start_init[];
extern unsigned long _end_init[];
extern unsigned long _bootstack[];
extern unsigned long _start_bootstack[];
extern unsigned long _end_bootstack[];
extern unsigned long _start_init_pgd[];
extern unsigned long _end_init_pgd[];
extern unsigned long _end_kernel[];
extern unsigned long _end[];

View File

@@ -0,0 +1,89 @@
/*
* Simple linker script
*
* Copyright (C) 2007 Bahadir Balban
*/
#if !defined (CONFIG_NCPU)
#define CONFIG_NCPU 1
#endif
phys_ram_start = PLATFORM_PHYS_MEM_START;
kernel_offset = KERNEL_AREA_START - phys_ram_start;
kernel_physical = 0x8000 + phys_ram_start;
kernel_virtual = kernel_physical + kernel_offset;
/* A temporary boot stack is used before a proper kernel stack is set up */
_bootstack_physical = _end_bootstack - kernel_offset;
/* The symbols are linked at virtual addresses. So is _start.
* We must set the entry point to a physical address, so that
* when the image is loaded, it doesn't jump to a non existing
* virtual address.
*/
ENTRY(kernel_physical)
SECTIONS
{
. = kernel_virtual;
_start_kernel = .;
.text : AT (ADDR(.text) - kernel_offset)
{
_start_text = .;
/* Make sure head.S comes first */
/* *head.o(.text) This only works when given its full path. Bad limitation. */
*(.text.head)
*(.text)
_end_text = .;
}
. = ALIGN(4);
/* rodata is needed else your strings will link at physical! */
.rodata : AT (ADDR(.rodata) - kernel_offset) { *(.rodata) }
.rodata1 : AT (ADDR(.rodata1) - kernel_offset) { *(.rodata1) }
.data : AT (ADDR(.data) - kernel_offset)
{
_start_data = .;
*(.data)
/* Best alignment because we need 4 x (4K) and 1 x 16K block */
. = ALIGN(16K);
_start_vectors = .;
*(.data.vectors)
_end_vectors = .;
. = ALIGN(4K);
_start_kip = .;
*(.data.kip)
. = ALIGN(4K);
_end_kip = .;
_start_syscalls = .;
*(.data.syscalls)
. = ALIGN(4K);
_end_syscalls = .;
_start_init_pgd = .;
*(.data.pgd);
_end_init_pgd = .;
_start_bootstack = .;
. = ALIGN(4K);
. += PAGE_SIZE * CONFIG_NCPU;
_end_bootstack = .;
_end_data = .;
}
.bss : AT (ADDR(.bss) - kernel_offset)
{
*(.bss)
}
. = ALIGN(4K);
. += PAGE_SIZE * 2; /* This is required as the link counter does not seem
* to increment for the bss section
* TODO: Change this with PAGE_SIZE */
/* Below part is to be discarded after boot */
_start_init = .;
.init : AT (ADDR(.init) - kernel_offset)
{
*(.init.task.pgd) /* Non-global task table on split tables, otherwise nil */
*(.init.bootmem)
*(.init.data)
}
_end_init = .;
_end_kernel = .;
_end = .;
}

View File

@@ -1,15 +1,16 @@
#ifndef __ARCH_MUTEX_H__
#define __ARCH_MUTEX_H__
/*
* ARM specific low-level mutex interfaces
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARCH_MUTEX_H__
#define __ARCH_MUTEX_H__
/* TODO: The return types could be improved for debug checking */
void __spin_lock(unsigned int *s);
void __spin_unlock(unsigned int *s);
unsigned int __mutex_lock(unsigned int *m);
void __mutex_unlock(unsigned int *m);
#endif
#endif /* __ARCH_MUTEX_H__ */

34
include/l4/arch/arm/scu.h Normal file
View File

@@ -0,0 +1,34 @@
/*
* SCU registers
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Prem Mallappa
*/
#ifndef __SCU_H__
#define __SCU_H__
/* Following defines may well go into realview/scu.h */
#define SCU_CTRL_REG 0x00 /* Control Register */
#define SCU_CFG_REG 0x04 /* Configuration Register */
#define SCU_CPU_PWR_REG 0x08 /* SCU CPU Power state register */
#define SCU_INV_ALL_S 0x0C /* SCU Invalidate all Secure Registers */
#define SCU_ACCESS_REG_S 0x50 /* SCU Access Control Secure */
#define SCU_ACCESS_REG_NS 0x54 /* SCU Access Control Non-Secure */
/* The contents of CONTROL AND CONFIG are Implementation Defined. so they may go into platform specific scu.h */
#define SCU_CTRL_EN (1 << 0)
#define SCU_CTRL_ADDR_FLTR_EN (1 << 1)
#define SCU_CTRL_PARITY_ON (1 << 2)
#define SCU_CTRL_STBY_EN (1 << 5) /* SCU StandBy Enable */
#define SCU_CTRL_GIC_STBY_EN (1 << 6) /* GIC Standby enable */
/* Config register */
#define SCU_CFG_SMP_MASK 0x000000f0
#define SCU_CFG_TAG_RAM_MASK 0x0000ff00
#define SCU_CFG_NCPU_MASK 0x7
#define SCU_CFG_SMP_NCPU_SHIFT 4
#endif /* __SCU_H__ */

View File

@@ -0,0 +1,24 @@
/*
* Cpu specific features
* defined upon the base architecture.
*
* Copyright (C) 2010 B Labs Ltd.
* Written by Bahadir Balban
*/
#ifndef __V5_CPU_H__
#define __V5_CPU_H__
#include INC_SUBARCH(mmu_ops.h)
static inline void cpu_startup(void)
{
}
static inline int smp_get_cpuid()
{
return 0;
}
#endif /* __V5_CPU_H__ */

View File

View File

@@ -0,0 +1,33 @@
/*
* Definitions for exception support on ARMv5
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARCH_V5_EXCEPTION_H__
#define __ARCH_V5_EXCEPTION_H__
#include INC_ARCH(asm.h)
/*
* v5 Architecture-defined data abort values for FSR ordered
* in highest to lowest priority.
*/
#define DABT_TERMINAL 0x2
#define DABT_VECTOR 0x0 /* Obsolete */
#define DABT_ALIGN 0x1
#define DABT_EXT_XLATE_LEVEL1 0xC
#define DABT_EXT_XLATE_LEVEL2 0xE
#define DABT_XLATE_SECT 0x5
#define DABT_XLATE_PAGE 0x7
#define DABT_DOMAIN_SECT 0x9
#define DABT_DOMAIN_PAGE 0xB
#define DABT_PERM_SECT 0xD
#define DABT_PERM_PAGE 0xF
#define DABT_EXT_LFETCH_SECT 0x4
#define DABT_EXT_LFETCH_PAGE 0x6
#define DABT_EXT_NON_LFETCH_SECT 0x8
#define DABT_EXT_NON_LFETCH_PAGE 0xA
#define FSR_FS_MASK 0xF
#endif /* __ARCH_V5_EXCEPTION_H__ */

View File

@@ -1,11 +1,28 @@
#ifndef __ARM_V5_IRQ_H__
#define __ARM_V5_IRQ_H__
/*
* Destructive atomic-read.
*
* Write 0 to byte at @location as its contents are read back.
*/
char l4_atomic_dest_readb(void *location);
static inline void enable_irqs()
{
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"bic r0, r0, #0x80\n" /* ARM_IRQ_BIT */
"msr cpsr_fc, r0\n"
);
}
static inline void disable_irqs()
{
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"orr r0, r0, #0x80\n" /* ARM_IRQ_BIT */
"msr cpsr_fc, r0\n"
);
}
/* Disable the irqs unconditionally, but also keep the previous state such that
* if it was already disabled before the call, the restore call would retain
* this state. */
void irq_local_disable_save(unsigned long *state);
void irq_local_restore(unsigned long state);
#endif

View File

@@ -9,53 +9,65 @@
/* ARM specific definitions */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
#define ARM_SECTION_SIZE SZ_1MB
#define ARM_SECTION_MASK (ARM_SECTION_SIZE - 1)
#define ARM_SECTION_BITS 20
#define SECTION_SIZE SZ_1MB
#define SECTION_MASK (SECTION_SIZE - 1)
#define SECTION_ALIGN_MASK (~SECTION_MASK)
#define SECTION_BITS 20
#define ARM_PAGE_SIZE SZ_4K
#define ARM_PAGE_MASK 0xFFF
#define ARM_PAGE_BITS 12
#define PGD_SIZE SZ_4K * 4
#define PGD_ENTRY_TOTAL SZ_4K
#define PGD_TYPE_MASK 0x3
#define PGD_COARSE_ALIGN_MASK 0xFFFFFC00
#define PGD_SECTION_ALIGN_MASK 0xFFF00000
#define PGD_FINE_ALIGN_MASK 0xFFFFF000
#define PGD_TYPE_FAULT 0
#define PGD_TYPE_COARSE 1
#define PGD_TYPE_SECTION 2
#define PGD_TYPE_FINE 3
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PMD_TYPE_LARGE 1
#define PMD_TYPE_SMALL 2
#define PMD_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
#define PMD_SIZE SZ_1K
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PMD_TYPE_PMD 1
#define PMD_TYPE_SECTION 2
/* We need this as printascii.S is including this file */
#define PTE_TYPE_MASK 0x3
#define PTE_TYPE_FAULT 0
#define PTE_TYPE_LARGE 1
#define PTE_TYPE_SMALL 2
#define PTE_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
/*
* These are indices into arrays with pgd_t or pmd_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) \
& 0x3FFC) / sizeof(pmd_t))
/*
* Strip out the page offset in this
* megabyte from a total of 256 pages.
*/
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) \
& 0x3FC) / sizeof (pte_t))
/* We need this as print-early.S is including this file */
#ifndef __ASSEMBLY__
/* Type-checkable page table elements */
typedef u32 pgd_t;
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pgd_t entries */
typedef struct pgd_table {
pgd_t entry[PGD_ENTRY_TOTAL];
pmd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pmd_t entries */
typedef struct pmd_table {
pmd_t entry[PMD_ENTRY_TOTAL];
pte_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
/* Applies for both small and large pages */
@@ -79,86 +91,38 @@ typedef struct pmd_table {
#define unbufferable 0
/* Helper macros for common cases */
#define __MAP_USR_RW_FLAGS (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO_FLAGS (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_SVC_RW_FLAGS (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_SVC_IO_FLAGS (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO_FLAGS (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RW (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_KERN_RW (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_KERN_IO (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
/* Abort information */
/* There is no execute bit in ARMv5, so we ignore it */
#define __MAP_USR_RWX __MAP_USR_RW
#define __MAP_USR_RX __MAP_USR_RO
#define __MAP_KERN_RWX __MAP_KERN_RW
#define __MAP_KERN_RX __MAP_KERN_RW /* We always have kernel RW */
#define __MAP_FAULT 0
/*FIXME: Carry all these definitions to an abort.h, Also carry all abort code to abort.c. Much neater!!! */
/* Abort type */
#define ARM_PABT 1
#define ARM_DABT 0
/* The kernel makes use of bit 8 (Always Zero) of FSR to define which type of abort */
#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
#define ARM_FSR_MASK 0xF
#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
/*
* v5 Architecture-defined data abort values for FSR ordered
* in highest to lowest priority.
*/
#define DABT_TERMINAL 0x2
#define DABT_VECTOR 0x0 /* Obsolete */
#define DABT_ALIGN 0x1
#define DABT_EXT_XLATE_LEVEL1 0xC
#define DABT_EXT_XLATE_LEVEL2 0xE
#define DABT_XLATE_SECT 0x5
#define DABT_XLATE_PAGE 0x7
#define DABT_DOMAIN_SECT 0x9
#define DABT_DOMAIN_PAGE 0xB
#define DABT_PERM_SECT 0xD
#define DABT_PERM_PAGE 0xF
#define DABT_EXT_LFETCH_SECT 0x4
#define DABT_EXT_LFETCH_PAGE 0x6
#define DABT_EXT_NON_LFETCH_SECT 0x8
#define DABT_EXT_NON_LFETCH_PAGE 0xA
#define TASK_PGD(x) (x)->space->pgd
#define STACK_ALIGNMENT 8
/* Kernel's data about the fault */
typedef struct fault_kdata {
u32 faulty_pc;
u32 fsr;
u32 far;
pte_t pte;
} __attribute__ ((__packed__)) fault_kdata_t;
void arch_hardware_flush(pgd_table_t *pgd);
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
struct address_space;
int delete_page_tables(struct address_space *space);
int copy_user_tables(struct address_space *new, struct address_space *orig);
pgd_table_t *copy_page_tables(pgd_table_t *from);
void remap_as_pages(void *vstart, void *vend);
int pgd_count_pmds(pgd_table_t *pgd);
pgd_table_t *realloc_page_tables(void);
void remove_section_mapping(unsigned long vaddr);
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end);
extern pgd_table_t init_pgd;
void arch_update_utcb(unsigned long utcb_address);
void system_identify(void);
#endif /* __ASSEMBLY__ */
#endif /* __V5_MM_H__ */

View File

@@ -18,6 +18,7 @@ void arm_enable_high_vectors(void);
void arm_invalidate_cache(void);
void arm_invalidate_icache(void);
void arm_invalidate_dcache(void);
void arm_clean_dcache(void);
void arm_clean_invalidate_dcache(void);
void arm_clean_invalidate_cache(void);
void arm_drain_writebuffer(void);
@@ -31,4 +32,22 @@ static inline void arm_enable_caches(void)
arm_enable_dcache();
}
static inline void dmb(void)
{
/* This is the closest to its meaning */
arm_drain_writebuffer();
}
static inline void dsb(void)
{
/* No op */
}
static inline void isb(void)
{
/* No op */
}
#endif /* __MMU__OPS__H__ */

View File

@@ -0,0 +1,6 @@
#ifndef __PERFMON_H__
#define __PERFMON_H__
static inline void perfmon_init(void) { }
#endif

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,43 @@
/*
* Cpu specific features
* defined upon the base architecture.
*
* Copyright (C) 2010 B Labs Ltd.
* Written by Bahadir Balban
*/
#ifndef __V6_CPU_H__
#define __V6_CPU_H__
#include INC_SUBARCH(mmu_ops.h)
#define MPIDR_CPUID_MASK 0x7
/* Read multi-processor affinity register */
static inline unsigned int __attribute__((always_inline))
cp15_read_mpidr(void)
{
unsigned int val;
__asm__ __volatile__ (
"mrc p15, 0, %0, c0, c0, 5\n"
: "=r" (val)
:
);
return val;
}
static inline int smp_get_cpuid()
{
volatile u32 mpidr = cp15_read_mpidr();
return mpidr & MPIDR_CPUID_MASK;
}
static inline void cpu_startup(void)
{
}
#endif /* __V6_CPU_H__ */

View File

@@ -0,0 +1,33 @@
/*
* Definitions for exception support on ARMv5
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARCH_V5_EXCEPTION_H__
#define __ARCH_V5_EXCEPTION_H__
#include INC_ARCH(asm.h)
/*
* v5 Architecture-defined data abort values for FSR ordered
* in highest to lowest priority.
*/
#define DABT_TERMINAL 0x2
#define DABT_VECTOR 0x0 /* Obsolete */
#define DABT_ALIGN 0x1
#define DABT_EXT_XLATE_LEVEL1 0xC
#define DABT_EXT_XLATE_LEVEL2 0xE
#define DABT_XLATE_SECT 0x5
#define DABT_XLATE_PAGE 0x7
#define DABT_DOMAIN_SECT 0x9
#define DABT_DOMAIN_PAGE 0xB
#define DABT_PERM_SECT 0xD
#define DABT_PERM_PAGE 0xF
#define DABT_EXT_LFETCH_SECT 0x4
#define DABT_EXT_LFETCH_PAGE 0x6
#define DABT_EXT_NON_LFETCH_SECT 0x8
#define DABT_EXT_NON_LFETCH_PAGE 0xA
#define FSR_FS_MASK 0xF
#endif /* __ARCH_V5_EXCEPTION_H__ */

View File

@@ -0,0 +1,26 @@
#ifndef __ARM_V5_IRQ_H__
#define __ARM_V5_IRQ_H__
static inline void enable_irqs()
{
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"bic r0, r0, #0x80\n" /* ARM_IRQ_BIT */
"msr cpsr_fc, r0\n"
);
}
static inline void disable_irqs()
{
__asm__ __volatile__(
"mrs r0, cpsr_fc\n"
"orr r0, r0, #0x80\n" /* ARM_IRQ_BIT */
"msr cpsr_fc, r0\n"
);
}
/* Disable the irqs unconditionally, but also keep the previous state such that
* if it was already disabled before the call, the restore call would retain
* this state. */
void irq_local_disable_save(unsigned long *state);
#endif

View File

@@ -9,53 +9,65 @@
/* ARM specific definitions */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
#define ARM_SECTION_SIZE SZ_1MB
#define ARM_SECTION_MASK (ARM_SECTION_SIZE - 1)
#define ARM_SECTION_BITS 20
#define SECTION_SIZE SZ_1MB
#define SECTION_MASK (SECTION_SIZE - 1)
#define SECTION_ALIGN_MASK (~SECTION_MASK)
#define SECTION_BITS 20
#define ARM_PAGE_SIZE SZ_4K
#define ARM_PAGE_MASK 0xFFF
#define ARM_PAGE_BITS 12
#define PGD_SIZE SZ_4K * 4
#define PGD_ENTRY_TOTAL SZ_4K
#define PGD_TYPE_MASK 0x3
#define PGD_COARSE_ALIGN_MASK 0xFFFFFC00
#define PGD_SECTION_ALIGN_MASK 0xFFF00000
#define PGD_FINE_ALIGN_MASK 0xFFFFF000
#define PGD_TYPE_FAULT 0
#define PGD_TYPE_COARSE 1
#define PGD_TYPE_SECTION 2
#define PGD_TYPE_FINE 3
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PMD_TYPE_LARGE 1
#define PMD_TYPE_SMALL 2
#define PMD_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
#define PMD_SIZE SZ_1K
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PMD_TYPE_PMD 1
#define PMD_TYPE_SECTION 2
/* We need this as printascii.S is including this file */
#define PTE_TYPE_MASK 0x3
#define PTE_TYPE_FAULT 0
#define PTE_TYPE_LARGE 1
#define PTE_TYPE_SMALL 2
#define PTE_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
/*
* These are indices into arrays with pgd_t or pmd_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) \
& 0x3FFC) / sizeof(pmd_t))
/*
* Strip out the page offset in this
* megabyte from a total of 256 pages.
*/
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) \
& 0x3FC) / sizeof (pte_t))
/* We need this as print-early.S is including this file */
#ifndef __ASSEMBLY__
/* Type-checkable page table elements */
typedef u32 pgd_t;
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pgd_t entries */
typedef struct pgd_table {
pgd_t entry[PGD_ENTRY_TOTAL];
pmd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pmd_t entries */
typedef struct pmd_table {
pmd_t entry[PMD_ENTRY_TOTAL];
pte_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
/* Applies for both small and large pages */
@@ -79,86 +91,35 @@ typedef struct pmd_table {
#define unbufferable 0
/* Helper macros for common cases */
#define __MAP_USR_RW_FLAGS (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO_FLAGS (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_SVC_RW_FLAGS (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_SVC_IO_FLAGS (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO_FLAGS (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RW (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_KERN_RW (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_KERN_IO (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
/* Abort information */
/* There is no execute bit in ARMv5, so we ignore it */
#define __MAP_USR_RWX __MAP_USR_RW
#define __MAP_USR_RX __MAP_USR_RO
#define __MAP_KERN_RWX __MAP_KERN_RW
#define __MAP_KERN_RX __MAP_KERN_RW /* We always have kernel RW */
#define __MAP_FAULT 0
/*FIXME: Carry all these definitions to an abort.h, Also carry all abort code to abort.c. Much neater!!! */
/* Abort type */
#define ARM_PABT 1
#define ARM_DABT 0
/* The kernel makes use of bit 8 (Always Zero) of FSR to define which type of abort */
#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
#define ARM_FSR_MASK 0xF
#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
/*
* v5 Architecture-defined data abort values for FSR ordered
* in highest to lowest priority.
*/
#define DABT_TERMINAL 0x2
#define DABT_VECTOR 0x0 /* Obsolete */
#define DABT_ALIGN 0x1
#define DABT_EXT_XLATE_LEVEL1 0xC
#define DABT_EXT_XLATE_LEVEL2 0xE
#define DABT_XLATE_SECT 0x5
#define DABT_XLATE_PAGE 0x7
#define DABT_DOMAIN_SECT 0x9
#define DABT_DOMAIN_PAGE 0xB
#define DABT_PERM_SECT 0xD
#define DABT_PERM_PAGE 0xF
#define DABT_EXT_LFETCH_SECT 0x4
#define DABT_EXT_LFETCH_PAGE 0x6
#define DABT_EXT_NON_LFETCH_SECT 0x8
#define DABT_EXT_NON_LFETCH_PAGE 0xA
#define TASK_PGD(x) (x)->space->pgd
#define STACK_ALIGNMENT 8
/* Kernel's data about the fault */
typedef struct fault_kdata {
u32 faulty_pc;
u32 fsr;
u32 far;
pte_t pte;
} __attribute__ ((__packed__)) fault_kdata_t;
void arch_hardware_flush(pgd_table_t *pgd);
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
struct address_space;
int delete_page_tables(struct address_space *space);
int copy_user_tables(struct address_space *new, struct address_space *orig);
pgd_table_t *copy_page_tables(pgd_table_t *from);
void remap_as_pages(void *vstart, void *vend);
int pgd_count_pmds(pgd_table_t *pgd);
pgd_table_t *realloc_page_tables(void);
void remove_section_mapping(unsigned long vaddr);
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end);
extern pgd_table_t init_pgd;
#endif /* __ASSEMBLY__*/
#endif /* __ASSEMBLY__ */
#endif /* __V5_MM_H__ */

View File

@@ -6,7 +6,6 @@
* Copyright (C) 2005 Bahadir Balban
*
*/
void arm_set_ttb(unsigned int);
void arm_set_domain(unsigned int);
unsigned int arm_get_domain(void);
@@ -18,6 +17,7 @@ void arm_enable_high_vectors(void);
void arm_invalidate_cache(void);
void arm_invalidate_icache(void);
void arm_invalidate_dcache(void);
void arm_clean_dcache(void);
void arm_clean_invalidate_dcache(void);
void arm_clean_invalidate_cache(void);
void arm_drain_writebuffer(void);
@@ -31,4 +31,22 @@ static inline void arm_enable_caches(void)
arm_enable_dcache();
}
static inline void dmb(void)
{
/* This is the closest to its meaning */
arm_drain_writebuffer();
}
static inline void dsb(void)
{
/* No op */
}
static inline void isb(void)
{
/* No op */
}
#endif /* __MMU__OPS__H__ */

View File

@@ -1,13 +0,0 @@
/*
*
* Copyright (C) 2005 Bahadir Balban
*
*/
#ifndef __ARM926EJS__H__
#define __ARM926EJS__H__
#endif /* __ARM926EJS__H__ */

View File

@@ -1,164 +0,0 @@
/*
* ARM v5-specific virtual memory details
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __V5_MM_H__
#define __V5_MM_H__
/* ARM specific definitions */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
#define ARM_SECTION_SIZE SZ_1MB
#define ARM_SECTION_MASK (ARM_SECTION_SIZE - 1)
#define ARM_SECTION_BITS 20
#define ARM_PAGE_SIZE SZ_4K
#define ARM_PAGE_MASK 0xFFF
#define ARM_PAGE_BITS 12
#define PGD_SIZE SZ_4K * 4
#define PGD_ENTRY_TOTAL SZ_4K
#define PGD_TYPE_MASK 0x3
#define PGD_COARSE_ALIGN_MASK 0xFFFFFC00
#define PGD_SECTION_ALIGN_MASK 0xFFF00000
#define PGD_FINE_ALIGN_MASK 0xFFFFF000
#define PGD_TYPE_FAULT 0
#define PGD_TYPE_COARSE 1
#define PGD_TYPE_SECTION 2
#define PGD_TYPE_FINE 3
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PMD_TYPE_LARGE 1
#define PMD_TYPE_SMALL 2
#define PMD_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
#define PMD_SIZE SZ_1K
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
/* We need this as printascii.S is including this file */
#ifndef __ASSEMBLY__
/* Type-checkable page table elements */
typedef u32 pgd_t;
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pgd_t entries */
typedef struct pgd_table {
pgd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pmd_t entries */
typedef struct pmd_table {
pmd_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
/* Applies for both small and large pages */
#define PAGE_AP0 4
#define PAGE_AP1 6
#define PAGE_AP2 8
#define PAGE_AP3 10
/* Permission values with rom and sys bits ignored */
#define SVC_RW_USR_NONE 1
#define SVC_RW_USR_RO 2
#define SVC_RW_USR_RW 3
#define PTE_PROT_MASK (0xFF << 4)
#define CACHEABILITY 3
#define BUFFERABILITY 2
#define cacheable (1 << CACHEABILITY)
#define bufferable (1 << BUFFERABILITY)
#define uncacheable 0
#define unbufferable 0
/* Helper macros for common cases */
#define __MAP_USR_RW_FLAGS (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO_FLAGS (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_SVC_RW_FLAGS (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_SVC_IO_FLAGS (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO_FLAGS (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
/* Abort information */
/*FIXME: Carry all these definitions to an abort.h, Also carry all abort code to abort.c. Much neater!!! */
/* Abort type */
#define ARM_PABT 1
#define ARM_DABT 0
/* The kernel makes use of bit 8 (Always Zero) of FSR to define which type of abort */
#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
#define ARM_FSR_MASK 0xF
#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
/*
* v5 Architecture-defined data abort values for FSR ordered
* in highest to lowest priority.
*/
#define DABT_TERMINAL 0x2
#define DABT_VECTOR 0x0 /* Obsolete */
#define DABT_ALIGN 0x1
#define DABT_EXT_XLATE_LEVEL1 0xC
#define DABT_EXT_XLATE_LEVEL2 0xE
#define DABT_XLATE_SECT 0x5
#define DABT_XLATE_PAGE 0x7
#define DABT_DOMAIN_SECT 0x9
#define DABT_DOMAIN_PAGE 0xB
#define DABT_PERM_SECT 0xD
#define DABT_PERM_PAGE 0xF
#define DABT_EXT_LFETCH_SECT 0x4
#define DABT_EXT_LFETCH_PAGE 0x6
#define DABT_EXT_NON_LFETCH_SECT 0x8
#define DABT_EXT_NON_LFETCH_PAGE 0xA
#define TASK_PGD(x) (x)->space->pgd
#define STACK_ALIGNMENT 8
/* Kernel's data about the fault */
typedef struct fault_kdata {
u32 faulty_pc;
u32 fsr;
u32 far;
pte_t pte;
} __attribute__ ((__packed__)) fault_kdata_t;
void arch_hardware_flush(pgd_table_t *pgd);
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
struct address_space;
int delete_page_tables(struct address_space *space);
int copy_user_tables(struct address_space *new, struct address_space *orig);
pgd_table_t *copy_page_tables(pgd_table_t *from);
void remap_as_pages(void *vstart, void *vend);
int pgd_count_pmds(pgd_table_t *pgd);
pgd_table_t *realloc_page_tables(void);
void remove_section_mapping(unsigned long vaddr);
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
unsigned long start, unsigned long end);
#endif /* __ASSEMBLY__*/
#endif /* __V5_MM_H__ */

View File

@@ -1,34 +0,0 @@
#ifndef __MMU__OPS__H__
#define __MMU__OPS__H__
/*
* Prototypes for low level mmu operations
*
* Copyright (C) 2005 Bahadir Balban
*
*/
void arm_set_ttb(unsigned int);
void arm_set_domain(unsigned int);
unsigned int arm_get_domain(void);
void arm_enable_mmu(void);
void arm_enable_icache(void);
void arm_enable_dcache(void);
void arm_enable_wbuffer(void);
void arm_enable_high_vectors(void);
void arm_invalidate_cache(void);
void arm_invalidate_icache(void);
void arm_invalidate_dcache(void);
void arm_clean_invalidate_dcache(void);
void arm_clean_invalidate_cache(void);
void arm_drain_writebuffer(void);
void arm_invalidate_tlb(void);
void arm_invalidate_itlb(void);
void arm_invalidate_dtlb(void);
static inline void arm_enable_caches(void)
{
arm_enable_icache();
arm_enable_dcache();
}
#endif /* __MMU__OPS__H__ */