Changes since April

Clean up of build directories.
Simplifications to capability model.
This commit is contained in:
Bahadir Balban
2010-06-01 15:08:13 +03:00
parent aef14b55ec
commit 6fa4884a5a
450 changed files with 10449 additions and 7383 deletions

View File

@@ -0,0 +1,15 @@
#ifndef __ARM_ASM_H__
#define __ARM_ASM_H__
#define BEGIN_PROC(name) \
.global name; \
.type name,function; \
.align; \
name:
#define END_PROC(name) \
.fend_##name: \
.size name,.fend_##name - name;
#endif /* __ARM_ASM_H__ */

View File

@@ -0,0 +1,11 @@
#ifndef __L4LIB_ARCH_IRQ_H__
#define __L4LIB_ARCH_IRQ_H__
/*
* Destructive atomic-read.
*
* Write 0 to byte at @location as its contents are read back.
*/
char l4_atomic_dest_readb(void *location);
#endif

View File

@@ -0,0 +1,95 @@
/*
* System call prototypes.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARM_SYSCALLS_H__
#define __ARM_SYSCALLS_H__
#include L4LIB_INC_ARCH(types.h)
#include L4LIB_INC_ARCH(utcb.h)
#include <l4/generic/space.h>
#include <l4/api/space.h>
#include <l4/api/kip.h>
#include <l4/api/ipc.h>
#include <l4/api/thread.h>
struct task_ids {
l4id_t tid;
l4id_t spid;
l4id_t tgid;
};
static inline void *
l4_kernel_interface(unsigned int *api_version, unsigned int *api_flags,
unsigned int *kernel_id)
{
return (void *)L4_KIP_ADDRESS;
}
typedef unsigned int (*__l4_thread_switch_t)(u32);
extern __l4_thread_switch_t __l4_thread_switch;
unsigned int l4_thread_switch (u32 dest);
typedef int (*__l4_getid_t)(struct task_ids *ids);
extern __l4_getid_t __l4_getid;
int l4_getid(struct task_ids *ids);
typedef int (*__l4_ipc_t)(l4id_t to, l4id_t from, u32 flags);
extern __l4_ipc_t __l4_ipc;
int l4_ipc(l4id_t to, l4id_t from, u32 flags);
typedef int (*__l4_capability_control_t)(unsigned int req, unsigned int flags, void *buf);
extern __l4_capability_control_t __l4_capability_control;
int l4_capability_control(unsigned int req, unsigned int flags, void *buf);
typedef int (*__l4_map_t)(void *phys, void *virt,
u32 npages, u32 flags, l4id_t tid);
extern __l4_map_t __l4_map;
int l4_map(void *p, void *v, u32 npages, u32 flags, l4id_t tid);
typedef int (*__l4_unmap_t)(void *virt, unsigned long npages, l4id_t tid);
extern __l4_unmap_t __l4_unmap;
int l4_unmap(void *virtual, unsigned long numpages, l4id_t tid);
typedef int (*__l4_thread_control_t)(unsigned int action, struct task_ids *ids);
extern __l4_thread_control_t __l4_thread_control;
int l4_thread_control(unsigned int action, struct task_ids *ids);
typedef int (*__l4_irq_control_t)(unsigned int req, unsigned int flags, l4id_t id);
extern __l4_irq_control_t __l4_irq_control;
int l4_irq_control(unsigned int req, unsigned int flags, l4id_t id);
typedef int (*__l4_ipc_control_t)(unsigned int action, l4id_t blocked_sender,
u32 blocked_tag);
extern __l4_ipc_control_t __l4_ipc_control;
int l4_ipc_control(unsigned int, l4id_t blocked_sender, u32 blocked_tag);
typedef int (*__l4_exchange_registers_t)(void *exregs_struct, l4id_t tid);
extern __l4_exchange_registers_t __l4_exchange_registers;
int l4_exchange_registers(void *exregs_struct, l4id_t tid);
typedef int (*__l4_container_control_t)(unsigned int req, unsigned int flags, void *buf);
extern __l4_container_control_t __l4_container_control;
int l4_container_control(unsigned int req, unsigned int flags, void *buf);
typedef int (*__l4_time_t)(void *timeval, int set);
extern __l4_time_t __l4_time;
int l4_time(void *timeval, int set);
typedef int (*__l4_mutex_control_t)(void *mutex_word, int op);
extern __l4_mutex_control_t __l4_mutex_control;
int l4_mutex_control(void *mutex_word, int op);
typedef int (*__l4_cache_control_t)(void *start, void *end, unsigned int flags);
extern __l4_cache_control_t __l4_cache_control;
int l4_cache_control(void *start, void *end, unsigned int flags);
/* To be supplied by server tasks. */
void *virt_to_phys(void *);
void *phys_to_virt(void *);
#endif /* __ARM_SYSCALLS_H__ */

View File

@@ -0,0 +1,366 @@
/*
* Helper functions that wrap raw l4 syscalls.
*
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#ifndef __L4LIB_SYSLIB_H__
#define __L4LIB_SYSLIB_H__
#include <stdio.h>
#include <l4/macros.h>
#include L4LIB_INC_ARCH(syscalls.h)
/*
* NOTE:
* Its best to use these wrappers because they generalise the way
* common ipc data like sender id, error, ipc tag are passed
* between ipc parties.
*
* The arguments to l4_ipc() are used by the microkernel to initiate
* the ipc. Any data passed in message registers may or may not be
* a duplicate of this data, but the distinction is that anything
* that is passed via the mrs are meant to be used by the other party
* participating in the ipc.
*/
/* For system call arguments */
#define L4SYS_ARG0 (MR_UNUSED_START)
#define L4SYS_ARG1 (MR_UNUSED_START + 1)
#define L4SYS_ARG2 (MR_UNUSED_START + 2)
#define L4SYS_ARG3 (MR_UNUSED_START + 3)
#define L4_IPC_TAG_MASK 0x00000FFF
/*
* Servers get sender.
*/
static inline l4id_t l4_get_sender(void)
{
return (l4id_t)read_mr(MR_SENDER);
}
/*
* When doing an ipc the sender never has to be explicitly set in
* the utcb via this function since this information is found out
* by the microkernel by checking the system caller's id. This is
* only used for restoring the sender on the utcb in order to
* complete an earlier ipc.
*/
static inline void l4_set_sender(l4id_t sender)
{
write_mr(MR_SENDER, sender);
}
static inline unsigned int l4_set_ipc_size(unsigned int word, unsigned int size)
{
word &= ~L4_IPC_FLAGS_SIZE_MASK;
word |= ((size << L4_IPC_FLAGS_SIZE_SHIFT) & L4_IPC_FLAGS_SIZE_MASK);
return word;
}
static inline unsigned int l4_get_ipc_size(unsigned int word)
{
return (word & L4_IPC_FLAGS_SIZE_MASK) >> L4_IPC_FLAGS_SIZE_SHIFT;
}
static inline unsigned int l4_set_ipc_msg_index(unsigned int word, unsigned int index)
{
/* FIXME: Define MR_PRIMARY_TOTAL, MR_TOTAL etc. and use MR_TOTAL HERE! */
BUG_ON(index > UTCB_SIZE);
word &= ~L4_IPC_FLAGS_MSG_INDEX_MASK;
word |= (index << L4_IPC_FLAGS_MSG_INDEX_SHIFT) &
L4_IPC_FLAGS_MSG_INDEX_MASK;
return word;
}
static inline unsigned int l4_get_ipc_msg_index(unsigned int word)
{
return (word & L4_IPC_FLAGS_MSG_INDEX_MASK)
>> L4_IPC_FLAGS_MSG_INDEX_SHIFT;
}
static inline unsigned int l4_set_ipc_flags(unsigned int word, unsigned int flags)
{
word &= ~L4_IPC_FLAGS_TYPE_MASK;
word |= flags & L4_IPC_FLAGS_TYPE_MASK;
return word;
}
static inline unsigned int l4_get_ipc_flags(unsigned int word)
{
return word & L4_IPC_FLAGS_TYPE_MASK;
}
static inline unsigned int l4_get_tag(void)
{
return read_mr(MR_TAG) & L4_IPC_TAG_MASK;
}
static inline void l4_set_tag(unsigned int tag)
{
unsigned int tag_flags = read_mr(MR_TAG);
tag_flags &= ~L4_IPC_TAG_MASK;
tag_flags |= tag & L4_IPC_TAG_MASK;
write_mr(MR_TAG, tag_flags);
}
/* Servers:
* Sets the message register for returning errors back to client task.
* These are usually posix error codes.
*/
static inline void l4_set_retval(int retval)
{
write_mr(MR_RETURN, retval);
}
/* Clients:
* Learn result of request.
*/
static inline int l4_get_retval(void)
{
return read_mr(MR_RETURN);
}
/*
* This is useful for stacked IPC. A stacked IPC happens
* when a new IPC is initiated before concluding the current
* one.
*
* This saves the last ipc's parameters such as the sender
* and tag information. Any previously saved data in save
* slots are destroyed. This is fine as IPC stacking is only
* useful if done once.
*/
static inline void l4_save_ipcregs(void)
{
l4_get_utcb()->saved_sender = l4_get_sender();
l4_get_utcb()->saved_tag = l4_get_tag();
}
static inline void l4_restore_ipcregs(void)
{
l4_set_tag(l4_get_utcb()->saved_tag);
l4_set_sender(l4_get_utcb()->saved_sender);
}
#define TASK_CID_MASK 0xFF000000
#define TASK_ID_MASK 0x00FFFFFF
#define TASK_CID_SHIFT 24
static inline l4id_t __raw_tid(l4id_t tid)
{
return tid & TASK_ID_MASK;
}
static inline l4id_t __cid(l4id_t tid)
{
return (tid & TASK_CID_MASK) >> TASK_CID_SHIFT;
}
static inline l4id_t self_tid(void)
{
struct task_ids ids;
l4_getid(&ids);
return ids.tid;
}
static inline l4id_t __raw_self_tid(void)
{
return __raw_tid(self_tid());
}
static inline int l4_send_full(l4id_t to, unsigned int tag)
{
l4_set_tag(tag);
return l4_ipc(to, L4_NILTHREAD, L4_IPC_FLAGS_FULL);
}
static inline int l4_receive_full(l4id_t from)
{
return l4_ipc(L4_NILTHREAD, from, L4_IPC_FLAGS_FULL);
}
static inline int l4_sendrecv_full(l4id_t to, l4id_t from, unsigned int tag)
{
int err;
BUG_ON(to == L4_NILTHREAD || from == L4_NILTHREAD);
l4_set_tag(tag);
err = l4_ipc(to, from, L4_IPC_FLAGS_FULL);
return err;
}
static inline int l4_send_extended(l4id_t to, unsigned int tag,
unsigned int size, void *buf)
{
unsigned int flags = 0;
l4_set_tag(tag);
/* Set up flags word for extended ipc */
flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
flags = l4_set_ipc_size(flags, size);
flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
/* Write buffer pointer to MR index that we specified */
write_mr(L4SYS_ARG0, (unsigned long)buf);
return l4_ipc(to, L4_NILTHREAD, flags);
}
static inline int l4_receive_extended(l4id_t from, unsigned int size, void *buf)
{
unsigned int flags = 0;
/* Indicate extended receive */
flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
/* How much data is accepted */
flags = l4_set_ipc_size(flags, size);
/* Indicate which MR index buffer pointer is stored */
flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
/* Set MR with buffer to receive data */
write_mr(L4SYS_ARG0, (unsigned long)buf);
return l4_ipc(L4_NILTHREAD, from, flags);
}
/*
* Return result value as extended IPC.
*
* Extended IPC copies up to 2KB user address space buffers.
* Along with such an ipc, a return value is sent using a primary
* mr that is used as the return register.
*
* It may not be desirable to return a payload on certain conditions,
* (such as an error return value) So a nopayload field is provided.
*/
static inline int l4_return_extended(int retval, unsigned int size,
void *buf, int nopayload)
{
unsigned int flags = 0;
l4id_t sender = l4_get_sender();
l4_set_retval(retval);
/* Set up flags word for extended ipc */
flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
/* Write buffer pointer to MR index that we specified */
write_mr(L4SYS_ARG0, (unsigned long)buf);
if (nopayload)
flags = l4_set_ipc_size(flags, 0);
else
flags = l4_set_ipc_size(flags, size);
return l4_ipc(sender, L4_NILTHREAD, flags);
}
static inline int l4_sendrecv_extended(l4id_t to, l4id_t from,
unsigned int tag, void *buf)
{
/* Need to imitate sendrecv but with extended send/recv flags */
return 0;
}
static inline int l4_send(l4id_t to, unsigned int tag)
{
l4_set_tag(tag);
return l4_ipc(to, L4_NILTHREAD, 0);
}
static inline int l4_sendrecv(l4id_t to, l4id_t from, unsigned int tag)
{
int err;
BUG_ON(to == L4_NILTHREAD || from == L4_NILTHREAD);
l4_set_tag(tag);
err = l4_ipc(to, from, 0);
return err;
}
static inline int l4_receive(l4id_t from)
{
return l4_ipc(L4_NILTHREAD, from, 0);
}
static inline void l4_print_mrs()
{
printf("Message registers: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
read_mr(0), read_mr(1), read_mr(2), read_mr(3),
read_mr(4), read_mr(5));
}
/* Servers:
* Return the ipc result back to requesting task.
*/
static inline int l4_ipc_return(int retval)
{
l4id_t sender = l4_get_sender();
l4_set_retval(retval);
/* Setting the tag would overwrite retval so we l4_send without tagging */
return l4_ipc(sender, L4_NILTHREAD, 0);
}
void *l4_new_virtual(int npages);
void *l4_del_virtual(void *virt, int npages);
/* A helper that translates and maps a physical address to virtual */
static inline void *l4_map_helper(void *phys, int npages)
{
struct task_ids ids;
int err;
void *virt = l4_new_virtual(npages);
l4_getid(&ids);
if ((err = l4_map(phys, virt, npages,
MAP_USR_DEFAULT, ids.tid)) < 0)
return PTR_ERR(err);
return virt;
}
/* A helper that translates and maps a physical address to virtual */
static inline void *l4_unmap_helper(void *virt, int npages)
{
struct task_ids ids;
l4_getid(&ids);
l4_unmap(virt, npages, ids.tid);
l4_del_virtual(virt, npages);
return 0;
}
#define L4_EXIT_MASK 0xFFFF
static inline void l4_exit(unsigned int exit_code)
{
struct task_ids ids;
l4_getid(&ids);
l4_thread_control(THREAD_DESTROY |
(exit_code & L4_EXIT_MASK),
&ids);
}
#endif /* __L4LIB_SYSLIB_H__ */

View File

@@ -0,0 +1,8 @@
#ifndef __L4LIB_ARM_TYPES_H___
#define __L4LIB_ARM_TYPES_H__
#define TASK_ID_INVALID 0xFFFFFFFF
#include <l4/arch/arm/types.h>
#endif /* __L4LIB_ARM_TYPES_H__ */

View File

@@ -0,0 +1,78 @@
/*
* Copyright (C) 2009 Bahadir Bilgehan Balban
*/
#ifndef __ARM_UTCB_H__
#define __ARM_UTCB_H__
#define USER_UTCB_REF 0xFF000050
#define L4_KIP_ADDRESS 0xFF000000
#define UTCB_KIP_OFFSET 0x50
#ifndef __ASSEMBLY__
#include <l4lib/types.h>
#include <l4/macros.h>
#include <l4/lib/math.h>
#include INC_GLUE(message.h)
#include INC_GLUE(memory.h)
#include <string.h>
#include <stdio.h>
#include L4LIB_INC_SUBARCH(utcb.h)
/*
* See kernel glue/arch/message.h for utcb details
*/
extern struct kip *kip;
/* Functions to read/write utcb registers */
static inline unsigned int read_mr(int offset)
{
if (offset < MR_TOTAL)
return l4_get_utcb()->mr[offset];
else
return l4_get_utcb()->mr_rest[offset - MR_TOTAL];
}
static inline void write_mr(unsigned int offset, unsigned int val)
{
if (offset < MR_TOTAL)
l4_get_utcb()->mr[offset] = val;
else
l4_get_utcb()->mr_rest[offset - MR_TOTAL] = val;
}
static inline void *utcb_full_buffer()
{
return &l4_get_utcb()->mr_rest[0];
}
static inline char *utcb_full_strcpy_from(const char *src)
{
return strncpy((char *)&l4_get_utcb()->mr_rest[0], src,
L4_UTCB_FULL_BUFFER_SIZE);
}
static inline void *utcb_full_memcpy_from(const char *src, int size)
{
return memcpy(&l4_get_utcb()->mr_rest[0], src,
min(size, L4_UTCB_FULL_BUFFER_SIZE));
}
static inline char *utcb_full_strcpy_to(char *dst)
{
return strncpy(dst, (char *)&l4_get_utcb()->mr_rest[0],
L4_UTCB_FULL_BUFFER_SIZE);
}
static inline void *utcb_full_memcpy_to(char *dst, int size)
{
return memcpy(dst, &l4_get_utcb()->mr_rest[0],
min(size, L4_UTCB_FULL_BUFFER_SIZE));
}
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_UTCB_H__ */

View File

@@ -0,0 +1,3 @@
#ifndef __PERFMON_H__
#endif

View File

@@ -0,0 +1,21 @@
#ifndef __ARM_V5_UTCB_H__
#define __ARM_V5_UTCB_H__
/*
* Pointer to Kernel Interface Page's UTCB pointer offset.
*/
extern struct utcb **kip_utcb_ref;
static inline struct utcb *l4_get_utcb()
{
/*
* By double dereferencing, we get the private TLS
* (aka UTCB). First reference is to the KIP's utcb
* offset, second is to the utcb itself, to which
* the KIP's utcb reference had been updated during
* context switch.
*/
return *kip_utcb_ref;
}
#endif /* __ARM_V5_UTCB_H__ */

View File

@@ -0,0 +1,405 @@
/*
* ARMv7 Performance Monitor operations
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#ifndef __PERFMON_H__
#define __PERFMON_H__
#include <l4lib/types.h>
/* Perfmon control register */
#define PMCR_DP_BIT 5 /* Disable prohibited */
#define PMCR_X_BIT 4 /* Export event enable */
#define PMCR_D_BIT 3 /* 64-cycle granularity */
#define PMCR_C_BIT 2 /* PMCCNTR reset */
#define PMCR_P_BIT 1 /* Events all reset */
#define PMCR_E_BIT 0 /* Enable all */
/* Obtain number of event counters */
#define PMCR_N_SHIFT 11
#define PMCR_N_MASK 0x1F
/* Special bit for cycle counter */
#define PMCCNTR_BIT 31
/*
* Performance Events
*/
/* Generic v7 events */
#define PERFMON_EVENT_SOFTINC 0
#define PERFMON_EVENT_IFETCH_L1CREFILL 1
#define PERFMON_EVENT_IFETCH_TLBREFILL 2
#define PERFMON_EVENT_DFETCH_L1CREFILL 3
#define PERFMON_EVENT_DFETCH_L1CACCESS 4
#define PERFMON_EVENT_DFETCH_TLBREFILL 5
#define PERFMON_EVENT_MEMREAD_INSTR 6
#define PERFMON_EVENT_MEMWRITE_INSTR 7
#define PERFMON_EVENT_ALL_INSTR 8
#define PERFMON_EVENT_EXCEPTION 9
#define PERFMON_EVENT_EXCEPTION_RETURN 10
#define PERFMON_EVENT_CONTEXTIDR_CHANGE 11
#define PERFMON_EVENT_PC_CHANGE 12
#define PERFMON_EVENT_IMM_BRANCH 13
#define PERFMON_EVENT_FUNCTION_RETURN 14
#define PERFMON_EVENT_UNALIGNED_ACCESS 15
#define PERFMON_EVENT_BRANCH_MISS 16
#define PERFMON_EVENT_RAW_CYCLE_COUNT 17
#define PERFMON_EVENT_BRANCH_MAYBEHIT 18
/*
* Cortex-A9 events (only relevant ones)
* 0x40-2, 0x6E, 0x70, 0x71-4, 0x80-0x81, 0x8A-8B
* 0xA0-5 omitted
*/
/*
* Linefill not satisfied from other cpu caches but
* has to go to external memory
*/
#define PERFMON_EVENT_SMP_LINEFILL_MISS 0x50
/* Linefill satisfied from other cpu caches */
#define PERFMON_EVENT_SMP_LINEFILL_HIT 0x51
/* Icache refill stall cycles on cpu pipeline */
#define PERFMON_EVENT_ICACHE_CPU_STALL 0x60
/* Dcache refill stall cycles on cpu pipeline */
#define PERFMON_EVENT_DCACHE_CPU_STALL 0x61
/* TLB miss stall cycles on cpu pipeline */
#define PERFMON_EVENT_TLBMISS_CPU_STALL 0x62
#define PERFMON_EVENT_STREX_SUCCESS 0x63
#define PERFMON_EVENT_STREX_FAIL 0x64
#define PERFMON_EVENT_DCACHE_EVICTION 0x65
/* Issue stage can't proceed to dispatch any instruction */
#define PERFMON_EVENT_PIPELINE_CANT_ISSUE 0x66
/* Issue stage empty */
#define PERFMON_EVENT_PIPELINE_ISSUE_EMPTY 0x67
/* Register renamed instructions */
#define PERFMON_EVENT_REGRENAMED_INSTR 0x68
#define PERFMON_EVENT_CPUSTALL_ITLB_MISS 0x82
#define PERFMON_EVENT_CPUSTALL_DTLB_MISS 0x83
#define PERFMON_EVENT_CPUSTALL_IUTLB_MISS 0x84
#define PERFMON_EVENT_CPUSTALL_DUTLB_MISS 0x85
#define PERFMON_EVENT_CPUSTALL_DMB 0x86
#define PERFMON_EVENT_ISB_COUNT 0x90
#define PERFMON_EVENT_DSB_COUNT 0x91
#define PERFMON_EVENT_DMB_COUNT 0x92
#define PERFMON_EVENT_EXTIRQ_COUNT 0x93
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_ctrl(void)
{
volatile u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 0\n"
"isb\n"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_ctrl(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 0"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_cntenset(void)
{
volatile u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 1\n"
"isb\n"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_cntenset(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 1"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_cntenclr(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 2"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_cntenclr(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 2"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_overflow(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 3"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_overflow(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 3"
:
: "r" (word)
);
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_softinc(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 4"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_evcntsel(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 5"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_evcntsel(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 5"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_cyccnt(void)
{
volatile u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c13, 0\n"
"isb\n"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_cyccnt(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c13, 0"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_evtypesel(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c13, 1"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_evtypesel(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c13, 1"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_evcnt(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c13, 2"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_evcnt(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c13, 2"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_useren(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c14, 0"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_useren(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c14, 0"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_intenset(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c14, 1"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_intenset(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c14, 1"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_intenclr(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c14, 2"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_intenclr(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c14, 2"
:
: "r" (word)
);
}
#include <stdio.h>
#if defined (CONFIG_DEBUG_PERFMON_USER)
static inline
u32 perfmon_read_cyccnt()
{
u32 cnt = cp15_read_perfmon_cyccnt();
u32 ovfl = cp15_read_perfmon_overflow();
/* Detect overflow and signal something was wrong */
if (ovfl & (1 << PMCCNTR_BIT))
printf("%s: Overflow.\n", __FUNCTION__);
return cnt;
}
void perfmon_reset_start_cyccnt();
u32 perfmon_read_reset_start_cyccnt();
#endif
void perfmon_init();
#endif /* __PERFMON_H__ */

View File

@@ -0,0 +1,59 @@
#ifndef __ARM_V5_UTCB_H__
#define __ARM_V5_UTCB_H__
/*
* NOTE: Any changes you make here, you *MUST* change
* utcb_address() macro in syscall.S assembler.
*/
/* Read Thread ID User RW register */
static inline u32 l4_cp15_read_tid_usr_rw(void)
{
volatile u32 val;
__asm__ __volatile__ (
"mrc p15, 0, %0, c13, c0, 2"
: "=r" (val)
:
);
return val;
}
/* Write Thread ID User RW register */
static inline void l4_cp15_write_tid_usr_rw(volatile u32 val)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c13, c0, 2"
:
: "r" (val)
);
}
/* Read Thread ID User RO register */
static inline u32 l4_cp15_read_tid_usr_ro(void)
{
volatile u32 val;
__asm__ __volatile__ (
"mrc p15, 0, %0, c13, c0, 3"
: "=r" (val)
:
);
return val;
}
/*
* In ARMv7, utcb resides in the userspace read-only
* thread register. This adds the benefit of avoiding
* dirtying the cache and extra management for smp since
* it is per-cpu.
*/
static inline struct utcb *l4_get_utcb()
{
// printf("%s: UTCB Adddress: 0x%x\n", __FUNCTION__, l4_cp15_read_tid_usr_ro());
return (struct utcb *)l4_cp15_read_tid_usr_ro();
}
#endif /* __ARM_V5_UTCB_H__ */

View File

@@ -0,0 +1,13 @@
/*
* Cache control operations
*
* Copyright (C) 2009 Bora Sahin
*/
#ifndef __L4_CACHE_CONTROL__
#define __L4_CACHE_CONTROL__
#include <l4/api/cache.h>
#endif /* __L4_CACHE_CONTROL__ */

View File

@@ -0,0 +1,27 @@
#ifndef __MM0_EXREGS_H__
#define __MM0_EXREGS_H__
#include <l4/api/exregs.h>
void exregs_set_stack(struct exregs_data *s, unsigned long sp);
void exregs_set_mr(struct exregs_data *s, int offset, unsigned long val);
void exregs_set_pc(struct exregs_data *s, unsigned long pc);
void exregs_set_pager(struct exregs_data *s, l4id_t pagerid);
void exregs_set_utcb(struct exregs_data *s, unsigned long virt);
void exregs_set_read(struct exregs_data *exregs);
unsigned long exregs_get_utcb(struct exregs_data *s);
unsigned long exregs_get_stack(struct exregs_data *s);
/*
exregs_set_stack(unsigned long sp)
exregs_set_pc(unsigned long pc)
exregs_set_return(unsigned long retreg)
exregs_set_arg0(unsigned long arg0)
exregs_set_mr0(unsigned long mr0)
exregs_set_mr_sender(unsigned long sender)
exregs_set_mr_return(unsigned long retreg)
exregs_set_all(unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3,
unsigned long sp, unsigned long pc, u32 valid_vector, l4id_t pager);
*/
#endif /* __MM0_EXREGS_H__ */

View File

@@ -0,0 +1,6 @@
#ifndef __L4LIB_INIT__
#define __L4LIB_INIT__
void __l4_init(void);
#endif

View File

@@ -0,0 +1,77 @@
/*
* Copyright (C) 2007, 2008 Bahadir Balban
*
* This file contains ipc definitions that are needed for server tasks
* to communicate with each other. For example common shared memory ids
* between two servers, or common ipc tags used between two servers are
* defined here.
*/
#ifndef __IPCDEFS_H__
#define __IPCDEFS_H__
#include <l4/api/ipc.h>
#include <l4lib/types.h>
/*** IPC Tags used between server tasks ***/
/*
* Tag 0 for L4_IPC_TAG_PFAULT
* Tag 1 for L4_IPC_TAG_UNDEF_FAULT
*/
/* For ping ponging */
#define L4_IPC_TAG_SYNC_EXTENDED 3
#define L4_IPC_TAG_SYNC_FULL 4
#define L4_IPC_TAG_SYNC 5
/* Posix system call tags */
#define L4_IPC_TAG_SHMGET 6
#define L4_IPC_TAG_SHMAT 7
#define L4_IPC_TAG_SHMDT 8
#define L4_IPC_TAG_MMAP 9
#define L4_IPC_TAG_MUNMAP 10
#define L4_IPC_TAG_MSYNC 11
#define L4_IPC_TAG_OPEN 12
#define L4_IPC_TAG_READ 13
#define L4_IPC_TAG_WRITE 14
#define L4_IPC_TAG_LSEEK 15
#define L4_IPC_TAG_CLOSE 16
#define L4_IPC_TAG_BRK 17
#define L4_IPC_TAG_READDIR 18
#define L4_IPC_TAG_MKDIR 19
#define L4_IPC_TAG_EXECVE 20
#define L4_IPC_TAG_CHDIR 21
#define L4_IPC_TAG_FORK 22
#define L4_IPC_TAG_STAT 23
#define L4_IPC_TAG_FSTAT 24
#define L4_IPC_TAG_FSYNC 25
#define L4_IPC_TAG_CLONE 26
#define L4_IPC_TAG_EXIT 27
#define L4_IPC_TAG_WAIT 28
/* Tags for ipc between fs0 and mm0 */
#define L4_IPC_TAG_TASKDATA 40
#define L4_IPC_TAG_PAGER_OPEN 41 /* vfs sends the pager open file data. */
#define L4_IPC_TAG_PAGER_READ 42 /* Pager reads file contents from vfs */
#define L4_IPC_TAG_PAGER_WRITE 43 /* Pager writes file contents to vfs */
#define L4_IPC_TAG_PAGER_CLOSE 44 /* Pager notifies vfs of file close */
#define L4_IPC_TAG_PAGER_UPDATE_STATS 45 /* Pager updates file stats in vfs */
#define L4_IPC_TAG_NOTIFY_FORK 46 /* Pager notifies vfs of process fork */
#define L4_IPC_TAG_NOTIFY_EXIT 47 /* Pager notifies vfs of process exit */
#define L4_IPC_TAG_PAGER_OPEN_BYPATH 48 /* Pager opens a vfs file by pathname */
#define L4_REQUEST_CAPABILITY 50 /* Request a capability from pager */
extern l4id_t pagerid;
/* For ipc to uart service (TODO: Shared mapping buffers???) */
#define L4_IPC_TAG_UART_SENDCHAR 51 /* Single char send (output) */
#define L4_IPC_TAG_UART_RECVCHAR 52 /* Single char recv (input) */
#define L4_IPC_TAG_UART_SENDBUF 53 /* Buffered send */
#define L4_IPC_TAG_UART_RECVBUF 54 /* Buffered recv */
/* For ipc to timer service (TODO: Shared mapping buffers???) */
#define L4_IPC_TAG_TIMER_GETTIME 55
#define L4_IPC_TAG_TIMER_SLEEP 56
#define L4_IPC_TAG_TIMER_WAKE_THREADS 57
#endif /* __IPCDEFS_H__ */

View File

@@ -0,0 +1,7 @@
#ifndef __L4LIB_IRQ_H__
#define __L4LIB_IRQ_H__
int l4_irq_wait(int slot, int irqnum);
#endif /* __L4LIB_IRQ_H__ */

View File

@@ -0,0 +1,15 @@
/*
* Kernel Interface Page
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#ifndef __L4LIB_KIP_H__
#define __L4LIB_KIP_H__
/* Use the kernel header */
#include <l4lib/types.h>
#include <l4/api/kip.h>
#include L4LIB_INC_ARCH(syscalls.h)
#endif /* __KIP_H__ */

View File

@@ -0,0 +1,27 @@
/*
* Address allocation pool.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ADDR_H__
#define __ADDR_H__
#include <l4lib/lib/idpool.h>
/* Address pool to allocate from a range of addresses */
struct address_pool {
struct id_pool *idpool;
unsigned long start;
unsigned long end;
};
int address_pool_init(struct address_pool *pool,
struct id_pool *idpool,
unsigned long start, unsigned long end);
int address_pool_alloc_init(struct address_pool *pool,
unsigned long start, unsigned long end,
unsigned int size);
void *address_new(struct address_pool *pool, int nitems, int size);
int address_del(struct address_pool *, void *addr, int nitems, int size);
#endif /* __ADDR_H__ */

View File

@@ -0,0 +1,44 @@
#ifndef __BIT_H__
#define __BIT_H__
#include <l4lib/types.h>
unsigned int __clz(unsigned int bitvector);
int find_and_set_first_free_bit(u32 *word, unsigned int lastbit);
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits);
int check_and_clear_bit(u32 *word, int bit);
int check_and_clear_contig_bits(u32 *word, int first, int nbits);
int check_and_set_bit(u32 *word, int bit);
/* Set */
static inline void setbit(unsigned int *w, unsigned int flags)
{
*w |= flags;
}
/* Clear */
static inline void clrbit(unsigned int *w, unsigned int flags)
{
*w &= ~flags;
}
/* Test */
static inline int tstbit(unsigned int *w, unsigned int flags)
{
return *w & flags;
}
/* Test and clear */
static inline int tstclr(unsigned int *w, unsigned int flags)
{
int res = tstbit(w, flags);
clrbit(w, flags);
return res;
}
#endif /* __BIT_H__ */

View File

@@ -0,0 +1,77 @@
/*
* Capability-related management.
*
* Copyright (C) 2009 Bahadir Balban
*/
#ifndef __LIBL4_CAPABILITY_H__
#define __LIBL4_CAPABILITY_H__
#include <l4lib/types.h>
#include <l4/lib/list.h>
#include <l4/api/capability.h>
#include <l4/generic/cap-types.h>
void cap_dev_print(struct capability *cap);
void cap_print(struct capability *cap);
void cap_array_print(int total_caps, struct capability *caparray);
/*
* Definitions for lists of capabilities
*/
struct cap_list {
int ncaps;
struct link caps;
};
static inline void cap_list_init(struct cap_list *clist)
{
clist->ncaps = 0;
link_init(&clist->caps);
}
static inline void cap_list_insert(struct capability *cap,
struct cap_list *clist)
{
list_insert(&cap->list, &clist->caps);
clist->ncaps++;
}
/* Detach a whole list of capabilities from list head */
static inline struct capability *
cap_list_detach(struct cap_list *clist)
{
struct link *list = list_detach(&clist->caps);
clist->ncaps = 0;
return link_to_struct(list, struct capability, list);
}
/* Attach a whole list of capabilities to list head */
static inline void cap_list_attach(struct capability *cap,
struct cap_list *clist)
{
/* Attach as if cap is the list and clist is the element */
list_insert(&clist->caps, &cap->list);
/* Count the number of caps attached */
list_foreach_struct(cap, &clist->caps, list)
clist->ncaps++;
}
static inline void cap_list_move(struct cap_list *to,
struct cap_list *from)
{
struct capability *cap_head = cap_list_detach(from);
cap_list_attach(cap_head, to);
}
/*
* Definitions for reading from the library capability array
*/
void __l4_capability_init(void);
struct capability *cap_get_by_type(unsigned int cap_type);
struct capability *cap_get_physmem(unsigned int cap_type);
int caps_read_all(void);
struct capability* cap_get_all();
int cap_get_count();
#endif /* __LIBL4_CAPABILITY_H__ */

View File

@@ -0,0 +1,32 @@
#ifndef __IDPOOL_H__
#define __IDPOOL_H__
#include <l4lib/lib/bit.h>
#include <string.h>
#include <l4/macros.h>
#include INC_GLUE(memory.h)
struct id_pool {
int nwords;
int bitlimit;
u32 bitmap[];
};
/* Copy one id pool to another by calculating its size */
static inline void id_pool_copy(struct id_pool *to, struct id_pool *from, int totalbits)
{
int nwords = BITWISE_GETWORD(totalbits);
memcpy(to, from, nwords * SZ_WORD + sizeof(struct id_pool));
}
void id_pool_init(struct id_pool *idpool, int bits);
struct id_pool *id_pool_new_init(int mapsize);
int id_new(struct id_pool *pool);
int id_del(struct id_pool *pool, int id);
int id_get(struct id_pool *pool, int id);
int id_is_empty(struct id_pool *pool);
int ids_new_contiguous(struct id_pool *pool, int numids);
int ids_del_contiguous(struct id_pool *pool, int first, int numids);
#endif /* __IDPOOL_H__ */

View File

@@ -0,0 +1,65 @@
#ifndef __THREAD_H__
#define __THREAD_H__
#include <l4lib/macros.h>
#include L4LIB_INC_ARCH(syslib.h)
#include L4LIB_INC_ARCH(syscalls.h)
#include <l4lib/exregs.h>
#include <l4lib/mutex.h>
#include <l4/api/thread.h>
#include <l4/lib/list.h>
/*
* Library specific-flags for thread creation
*/
#define TC_USER_FLAGS_MASK 0x000F0000
#define TC_NOSTART 0x00010000
/* For same space */
#define STACK_SIZE PAGE_SIZE
/* Total threads the library supports */
#define THREADS_TOTAL 10
/*
* Keeps track of threads in the system
* created by the pager
*/
struct l4_thread_list {
int total; /* Total number of threads */
struct l4_mutex lock; /* Threads list lock */
struct link thread_list; /* Threads list */
struct mem_cache *thread_cache; /* Cache for thread structures */
};
struct l4_thread {
struct task_ids ids; /* Thread ids */
struct l4_mutex lock; /* Lock for thread struct */
struct link list; /* Link to list of threads */
unsigned long *stack; /* Stack (grows downwards) */
struct utcb *utcb; /* UTCB address */
};
/*
* These are thread calls that are meant to be
* called by library users
*/
int thread_create(int (*func)(void *), void *args, unsigned int flags,
struct l4_thread **tptr);
int thread_wait(struct l4_thread *t);
void thread_exit(int exitcode);
/*
* This is to be called only if to-be-destroyed thread is in
* sane condition for destruction
*/
int thread_destroy(struct l4_thread *thread);
/* Library init function called by __container_init */
void __l4_threadlib_init(void);
void l4_parent_thread_init(void);
extern struct mem_cache *utcb_cache, *stack_cache;
extern struct l4_thread_list l4_thread_list;
extern void setup_new_thread(void);
#endif /* __THREAD_H__ */

View File

@@ -0,0 +1,17 @@
/*
* Generic macros for cache operations
*
* Copyright (C) 2009 B Labs Ltd.
*/
#ifndef __CACHE_CONTROL_H__
#define __CACHE_CONTROL_H__
#include L4LIB_INC_GLUE(cache.h)
#define L4_INVALIDATE_ICACHE ARCH_INVALIDATE_ICACHE
#define L4_INVALIDATE_DCACHE ARCH_INVALIDATE_DCACHE
#define L4_CLEAN_DCACHE ARCH_CLEAN_DCACHE
#define L4_CLEAN_INVALIDATE_DCACHE ARCH_CLEAN_INVALIDATE_DCACHE
#define L4_INVALIDATE_TLB ARCH_INVALIDATE_TLB
#endif /* __CACHE_CONTROL_H__ */

View File

@@ -0,0 +1,96 @@
/*
* Syscall API for capability manipulation
*
* Copyright (C) 2009 Bahadir Balban
*/
#ifndef __API_CAPABILITY_H__
#define __API_CAPABILITY_H__
#include <l4lib/lib/list.h>
#include L4LIB_INC_ARCH(types.h)
/* Capability syscall request types */
#define CAP_CONTROL_NCAPS 0x00000000
#define CAP_CONTROL_READ 0x00000001
#define CAP_CONTROL_SHARE 0x00000002
#define CAP_CONTROL_GRANT 0x00000003
#define CAP_CONTROL_REPLICATE 0x00000004
#define CAP_CONTROL_SPLIT 0x00000005
#define CAP_CONTROL_DEDUCE 0x00000006
#define CAP_CONTROL_DESTROY 0x00000007
#define CAP_SHARE_MASK 0x0000000F
#define CAP_SHARE_SINGLE 0x00000001
#define CAP_SHARE_ALL_CONTAINER 0x00000002
#define CAP_SHARE_ALL_SPACE 0x00000003
#define CAP_GRANT_MASK 0x0000000F
#define CAP_GRANT_SINGLE 0x00000001
#define CAP_GRANT_IMMUTABLE 0x00000004
#define CAP_SPLIT_MASK 0x0000000F
#define CAP_SPLIT_SIZE 0x00000001
#define CAP_SPLIT_ACCESS 0x00000002
#define CAP_SPLIT_RANGE 0x00000003 /* Returns -EPERM */
/*
* A capability is a unique representation of security
* qualifiers on a particular resource.
*
* In this structure:
*
* The capid denotes the unique capability ID.
* The resid denotes the unique ID of targeted resource.
* The owner denotes the unique ID of the one and only capability owner. This is
* almost always a thread ID.
*
* The type field contains two types:
* - The capability type,
* - The targeted resource type.
*
* The targeted resouce type denotes what type of resource the capability is
* allowed to operate on. For example a thread, a thread group, an address space
* or a memory can be of this type.
*
* The capability type defines the general set of operations allowed on a
* particular resource. For example a capability type may be thread_control,
* exchange_registers, ipc, or map operations. A resource type may be such as a
* thread, a thread group, a virtual or physical memory region.
*
* There are also quantitative capability types. While their names denote
* quantitative objects such as memory, threads, and address spaces, these
* types actually define the quantitative operations available on those
* resources such as creation and deletion of a thread, allocation and
* deallocation of a memory region etc.
*
* The access field denotes the fine-grain operations available on a particular
* resource. The meaning of each bitfield differs according to the type of the
* capability. For example, for a capability type thread_control, the bitfields
* may mean suspend, resume, create, delete etc.
*/
struct capability {
struct link list;
/* Capability identifiers */
l4id_t capid; /* Unique capability ID */
l4id_t owner; /* Capability owner ID */
l4id_t resid; /* Targeted resource ID */
unsigned int type; /* Capability and target resource type */
/* Capability limits/permissions */
u32 access; /* Permitted operations */
/* Limits on the resource (NOTE: must never have signed type) */
unsigned long start; /* Resource start value */
unsigned long end; /* Resource end value */
unsigned long size; /* Resource size */
/* Use count of resource */
unsigned long used;
/* Device attributes, if this is a device. */
unsigned int attr;
l4id_t irq;
};
#endif /* __API_CAPABILITY_H__ */

View File

@@ -0,0 +1,148 @@
#ifndef __ERRNO_H__
#define __ERRNO_H__
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOMSG 42 /* No message of desired type */
#define EIDRM 43 /* Identifier removed */
#define ECHRNG 44 /* Channel number out of range */
#define EL2NSYNC 45 /* Level 2 not synchronized */
#define EL3HLT 46 /* Level 3 halted */
#define EL3RST 47 /* Level 3 reset */
#define ELNRNG 48 /* Link number out of range */
#define EUNATCH 49 /* Protocol driver not attached */
#define ENOCSI 50 /* No CSI structure available */
#define EL2HLT 51 /* Level 2 halted */
#define EBADE 52 /* Invalid exchange */
#define EBADR 53 /* Invalid request descriptor */
#define EXFULL 54 /* Exchange full */
#define ENOANO 55 /* No anode */
#define EBADRQC 56 /* Invalid request code */
#define EBADSLT 57 /* Invalid slot */
#define EDEADLOCK EDEADLK
#define EBFONT 59 /* Bad font file format */
#define ENOSTR 60 /* Device not a stream */
#define ENODATA 61 /* No data available */
#define ETIME 62 /* Timer expired */
#define ENOSR 63 /* Out of streams resources */
#define ENONET 64 /* Machine is not on the network */
#define ENOPKG 65 /* Package not installed */
#define EREMOTE 66 /* Object is remote */
#define ENOLINK 67 /* Link has been severed */
#define EADV 68 /* Advertise error */
#define ESRMNT 69 /* Srmount error */
#define ECOMM 70 /* Communication error on send */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
#define EREMCHG 78 /* Remote address changed */
#define ELIBACC 79 /* Can not access a needed shared library */
#define ELIBBAD 80 /* Accessing a corrupted shared library */
#define ELIBSCN 81 /* .lib section in a.out corrupted */
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
#define EILSEQ 84 /* Illegal byte sequence */
#define ERESTART 85 /* Interrupted system call should be restarted */
#define ESTRPIPE 86 /* Streams pipe error */
#define EUSERS 87 /* Too many users */
#define ENOTSOCK 88 /* Socket operation on non-socket */
#define EDESTADDRREQ 89 /* Destination address required */
#define EMSGSIZE 90 /* Message too long */
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
#define ENOPROTOOPT 92 /* Protocol not available */
#define EPROTONOSUPPORT 93 /* Protocol not supported */
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define EADDRINUSE 98 /* Address already in use */
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define ENETDOWN 100 /* Network is down */
#define ENETUNREACH 101 /* Network is unreachable */
#define ENETRESET 102 /* Network dropped connection because of reset */
#define ECONNABORTED 103 /* Software caused connection abort */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EISCONN 106 /* Transport endpoint is already connected */
#define ENOTCONN 107 /* Transport endpoint is not connected */
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
#define ETIMEDOUT 110 /* Connection timed out */
#define ECONNREFUSED 111 /* Connection refused */
#define EHOSTDOWN 112 /* Host is down */
#define EHOSTUNREACH 113 /* No route to host */
#define EALREADY 114 /* Operation already in progress */
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale NFS file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
#define ECANCELED 125 /* Operation Canceled */
#define ENOKEY 126 /* Required key not available */
#define EKEYEXPIRED 127 /* Key has expired */
#define EKEYREVOKED 128 /* Key has been revoked */
#define EKEYREJECTED 129 /* Key was rejected by service */
/* Codezero specific error codes */
#define EACTIVE 132 /* Task active */
#define ENOIPC 133 /* General IPC error */
#define ENOCAP 134 /* None or insufficient capability */
#define ENOUTCB 135 /* Task has no utcb set up */
#define ENOMAP 136 /* The memory area has unmapped regions */
#define ENOIRQ 137 /* Irq cannot be registered */
#define EABORT 138 /* Abort cannot be handled */
#define ENOCHILD 139 /* Task is not paged by caller */
#endif /* __ERRNO_H__ */

View File

@@ -0,0 +1,50 @@
/*
* Exchange registers system call data.
*
* Copyright (C) 2008 Bahadir Balban
*/
#ifndef __EXREGS_H__
#define __EXREGS_H__
#include L4LIB_INC_GLUE(syscall.h)
#include L4LIB_INC_GLUE(context.h)
#include <l4lib/types.h>
#define EXREGS_SET_PAGER 1
#define EXREGS_SET_UTCB 2
#define EXREGS_READ 4
#define EXREGS_VALID_REGULAR_REGS \
(FIELD_TO_BIT(exregs_context_t, r0) | \
FIELD_TO_BIT(exregs_context_t, r1) | \
FIELD_TO_BIT(exregs_context_t, r2) | \
FIELD_TO_BIT(exregs_context_t, r3) | \
FIELD_TO_BIT(exregs_context_t, r4) | \
FIELD_TO_BIT(exregs_context_t, r5) | \
FIELD_TO_BIT(exregs_context_t, r6) | \
FIELD_TO_BIT(exregs_context_t, r7) | \
FIELD_TO_BIT(exregs_context_t, r8) | \
FIELD_TO_BIT(exregs_context_t, r9) | \
FIELD_TO_BIT(exregs_context_t, r10) | \
FIELD_TO_BIT(exregs_context_t, r11) | \
FIELD_TO_BIT(exregs_context_t, r12) | \
FIELD_TO_BIT(exregs_context_t, lr)) \
#define EXREGS_VALID_SP \
FIELD_TO_BIT(exregs_context_t, sp) \
#define EXREGS_VALID_PC \
FIELD_TO_BIT(exregs_context_t, pc) \
/* Structure passed by userspace pagers for exchanging registers */
struct exregs_data {
exregs_context_t context;
u32 valid_vect;
u32 flags;
l4id_t pagerid;
unsigned long utcb_address;
};
#endif /* __EXREGS_H__ */

View File

@@ -0,0 +1,27 @@
#ifndef __IPC_H__
#define __IPC_H__
#define L4_NILTHREAD 0xFFFFFFFF
#define L4_ANYTHREAD 0xFFFFFFFE
#define L4_IPC_TAG_MR_OFFSET 0
/* Pagefault */
#define L4_IPC_TAG_PFAULT 0
#define L4_IPC_TAG_UNDEF_FAULT 1
#define L4_IPC_FLAGS_TYPE_MASK 0x0000000F
#define L4_IPC_FLAGS_SHORT 0x00000000 /* Short IPC involves just primary message registers */
#define L4_IPC_FLAGS_FULL 0x00000001 /* Full IPC involves full UTCB copy */
#define L4_IPC_FLAGS_EXTENDED 0x00000002 /* Extended IPC can page-fault and copy up to 2KB */
/* Extended IPC extra fields */
#define L4_IPC_FLAGS_MSG_INDEX_MASK 0x00000FF0 /* Index of message register with buffer pointer */
#define L4_IPC_FLAGS_SIZE_MASK 0x0FFF0000
#define L4_IPC_FLAGS_SIZE_SHIFT 16
#define L4_IPC_FLAGS_MSG_INDEX_SHIFT 4
#define L4_IPC_EXTENDED_MAX_SIZE (SZ_1K*2)
#endif /* __IPC_H__ */

View File

@@ -0,0 +1,10 @@
#ifndef __API_IRQ_H__
#define __API_IRQ_H__
#define IRQ_CONTROL_REGISTER 0
#define IRQ_CONTROL_RELEASE 1
#define IRQ_CONTROL_WAIT 2
#endif /* __API_IRQ_H__ */

View File

@@ -0,0 +1,82 @@
/*
* Kernel Interface Page
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __KIP_H__
#define __KIP_H__
#include <l4lib/types.h>
#define __YEAR__ ((((__DATE__ [7] - '0') * 10 + (__DATE__ [8] - '0')) * 10 \
+ (__DATE__ [9] - '0')) * 10 + (__DATE__ [10] - '0'))
#define __MONTH__ (__DATE__ [2] == 'n' ? (__DATE__ [1] == 'a' ? 0 : 5) \
: __DATE__ [2] == 'b' ? 1 \
: __DATE__ [2] == 'r' ? (__DATE__ [0] == 'M' ? 2 : 3) \
: __DATE__ [2] == 'y' ? 4 \
: __DATE__ [2] == 'l' ? 6 \
: __DATE__ [2] == 'g' ? 7 \
: __DATE__ [2] == 'p' ? 8 \
: __DATE__ [2] == 't' ? 9 \
: __DATE__ [2] == 'v' ? 10 : 11)
#define __DAY__ ((__DATE__ [4] == ' ' ? 0 : __DATE__ [4] - '0') * 10 \
+ (__DATE__ [5] - '0'))
#define CODEZERO_VERSION 0
#define CODEZERO_SUBVERSION 2
#define KDESC_DATE_SIZE 12
#define KDESC_TIME_SIZE 9
struct kernel_descriptor {
u32 version;
u32 subversion;
u32 magic;
char date[KDESC_DATE_SIZE];
char time[KDESC_TIME_SIZE];
} __attribute__((__packed__));
/* Experimental KIP with non-standard offsets */
struct kip {
/* System descriptions */
u32 magic;
u16 version_rsrv;
u8 api_subversion;
u8 api_version;
u32 api_flags;
u32 container_control;
u32 time;
u32 irq_control;
u32 thread_control;
u32 ipc_control;
u32 map;
u32 ipc;
u32 capability_control;
u32 unmap;
u32 exchange_registers;
u32 thread_switch;
u32 schedule;
u32 getid;
u32 mutex_control;
u32 cache_control;
u32 arch_syscall0;
u32 arch_syscall1;
u32 arch_syscall2;
u32 utcb;
struct kernel_descriptor kdesc;
} __attribute__((__packed__));
#if defined (__KERNEL__)
extern struct kip kip;
#endif /* __KERNEL__ */
#endif /* __KIP_H__ */

View File

@@ -0,0 +1,60 @@
#ifndef __MUTEX_CONTROL_H__
#define __MUTEX_CONTROL_H__
#if !defined(__LINUX_CONTAINER__)
/* Request ids for mutex_control syscall */
#if defined (__KERNEL__)
#define MUTEX_CONTROL_LOCK L4_MUTEX_LOCK
#define MUTEX_CONTROL_UNLOCK L4_MUTEX_UNLOCK
#define MUTEX_CONTROL_OPMASK L4_MUTEX_OPMASK
#define mutex_operation(x) ((x) & MUTEX_CONTROL_OPMASK)
#define mutex_contenders(x) ((x) & ~MUTEX_CONTROL_OPMASK)
#include <l4lib/lib/wait.h>
#include <l4lib/lib/list.h>
#include <l4lib/lib/mutex.h>
/*
* Contender threashold is the total number of contenders
* who are expected to sleep on the mutex, and will be waited
* for a wakeup.
*/
struct mutex_queue {
int contenders;
unsigned long physical;
struct link list;
struct waitqueue_head wqh_contenders;
struct waitqueue_head wqh_holders;
};
/*
* Mutex queue head keeps the list of all userspace mutexes.
*
* Here, mutex_control_mutex is a single lock for:
* (1) Mutex_queue create/deletion
* (2) List add/removal.
* (3) Wait synchronization:
* - Both waitqueue spinlocks need to be acquired for
* rendezvous inspection to occur atomically. Currently
* it's not done since we rely on this mutex for that.
*/
struct mutex_queue_head {
struct link list;
struct mutex mutex_control_mutex;
int count;
};
void init_mutex_queue_head(struct mutex_queue_head *mqhead);
#endif
#define L4_MUTEX_OPMASK 0xF0000000
#define L4_MUTEX_LOCK 0x10000000
#define L4_MUTEX_UNLOCK 0x20000000
#endif /* __LINUX_CONTAINER__ */
#endif /* __MUTEX_CONTROL_H__*/

View File

@@ -0,0 +1,5 @@
#ifndef __API_SPACE_H__
#define __API_SPACE_H__
#endif /* __API_SPACE_H__ */

View File

@@ -0,0 +1,25 @@
#ifndef __API_THREAD_H__
#define __API_THREAD_H__
#define THREAD_ACTION_MASK 0xF0000000
#define THREAD_CREATE 0x00000000
#define THREAD_RUN 0x10000000
#define THREAD_SUSPEND 0x20000000
#define THREAD_DESTROY 0x30000000
#define THREAD_RECYCLE 0x40000000
#define THREAD_WAIT 0x50000000
#define THREAD_SHARE_MASK 0x00F00000
#define THREAD_SPACE_MASK 0x0F000000
#define THREAD_CREATE_MASK (THREAD_SHARE_MASK | THREAD_SPACE_MASK)
#define TC_SHARE_CAPS 0x00100000 /* Share all thread capabilities */
#define TC_SHARE_UTCB 0x00200000 /* Share utcb location (same space */
#define TC_SHARE_GROUP 0x00400000 /* Share thread group id */
#define TC_SHARE_SPACE 0x01000000 /* New thread, use given space */
#define TC_COPY_SPACE 0x02000000 /* New thread, copy given space */
#define TC_NEW_SPACE 0x04000000 /* New thread, new space */
/* #define THREAD_USER_MASK 0x000F0000 Reserved for userspace */
#define THREAD_EXIT_MASK 0x0000FFFF /* Thread exit code */
#endif /* __API_THREAD_H__ */

View File

@@ -0,0 +1,73 @@
/*
* Common definitions for exceptions
* across ARM sub-architectures.
*
* Copyright (C) 2010 B Labs Ltd.
*/
#ifndef __EXCEPTION_H__
#define __EXCEPTION_H__
//#include _INC_SUBARCH(exception.h)
#include L4LIB_INC_ARCH(asm.h)
/* Abort debugging conditions */
// #define DEBUG_ABORTS
#if defined (DEBUG_ABORTS)
#define dbg_abort(...) printk(__VA_ARGS__)
#else
#define dbg_abort(...)
#endif
/* Codezero-specific abort type */
#define ABORT_TYPE_PREFETCH 1
#define ABORT_TYPE_DATA 0
/* If abort is handled and resolved in check_aborts */
#define ABORT_HANDLED 1
/* Codezero makes use of bit 8 (Always Zero) of FSR to define which type of abort */
#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
/* Kernel's data about the fault */
typedef struct fault_kdata {
u32 faulty_pc; /* In DABT: Aborting PC, In PABT: Same as FAR */
u32 fsr; /* In DABT: DFSR, In PABT: IFSR */
u32 far; /* In DABT: DFAR, in PABT: IFAR */
pte_t pte; /* Faulty page table entry */
} __attribute__ ((__packed__)) fault_kdata_t;
/* This is filled on entry to irq handler, only if a process was interrupted.*/
extern unsigned int preempted_psr;
/* Implementing these as functions cause circular include dependency for tcb.h */
#define TASK_IN_KERNEL(tcb) (((tcb)->context.spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
#define TASK_IN_USER(tcb) (!TASK_IN_KERNEL(tcb))
static inline int is_user_mode(u32 spsr)
{
return ((spsr & ARM_MODE_MASK) == ARM_MODE_USR);
}
static inline int in_kernel()
{
return (((preempted_psr & ARM_MODE_MASK) == ARM_MODE_SVC)) ? 1 : 0;
}
static inline int in_user()
{
return !in_kernel();
}
int pager_pagein_request(unsigned long vaddr, unsigned long size,
unsigned int flags);
int fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag);
int is_kernel_abort(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
#endif /* __EXCEPTION_H__ */

View File

@@ -0,0 +1,25 @@
#ifndef __ARM_IO_H__
#define __ARM_IO_H__
/*
* Arch-specific io functions/macros.
*
* Copyright (C) 2007 Bahadir Balban
*/
#if defined (__KERNEL__) && !defined (__LINUX_CONTAINER__)
#include INC_GLUE(memlayout.h)
#define read(address) *((volatile unsigned int *) (address))
#define write(val, address) *((volatile unsigned int *) (address)) = val
#endif /* ends __KERNEL__ */
/*
* Generic uart virtual address until a file-based console access
* is available for userspace
*/
#define USERSPACE_CONSOLE_VBASE 0xF9800000
#endif /* __ARM_IO_H__ */

View File

@@ -0,0 +1,29 @@
#ifndef __ARM_IRQ_H__
#define __ARM_IRQ_H__
#include INC_SUBARCH(irq.h)
void irq_local_restore(unsigned long state);
void irq_local_disable_save(unsigned long *state);
int irqs_enabled();
static inline void irq_local_enable()
{
enable_irqs();
}
static inline void irq_local_disable()
{
disable_irqs();
}
/*
* Destructive atomic-read.
*
* Write 0 to byte at @location as its contents are read back.
*/
char l4_atomic_dest_readb(void *location);
#endif /* __ARM_IRQ_H__ */

View File

@@ -0,0 +1,16 @@
/*
* ARM specific low-level mutex interfaces
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARCH_MUTEX_H__
#define __ARCH_MUTEX_H__
/* TODO: The return types could be improved for debug checking */
void __spin_lock(unsigned int *s);
void __spin_unlock(unsigned int *s);
unsigned int __mutex_lock(unsigned int *m);
void __mutex_unlock(unsigned int *m);
#endif /* __ARCH_MUTEX_H__ */

View File

@@ -0,0 +1,137 @@
/*
* ARM v5-specific virtual memory details
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __V5_MM_H__
#define __V5_MM_H__
/* ARM specific definitions */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
#if !defined(__LINUX_CONTAINER__)
#define SECTION_SIZE SZ_1MB
#define SECTION_MASK (SECTION_SIZE - 1)
#endif
#define SECTION_ALIGN_MASK (~SECTION_MASK)
#define SECTION_BITS 20
#define ARM_PAGE_SIZE SZ_4K
#define ARM_PAGE_MASK 0xFFF
#define ARM_PAGE_BITS 12
#define PGD_SIZE SZ_4K * 4
#define PGD_ENTRY_TOTAL SZ_4K
#if !defined(__LINUX_CONTAINER__)
#define PMD_SIZE SZ_1K
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PTE_TYPE_MASK 0x3
#define PTE_TYPE_FAULT 0
#define PTE_TYPE_LARGE 1
#define PTE_TYPE_SMALL 2
#endif
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
#define PMD_TYPE_PMD 1
#define PMD_TYPE_SECTION 2
#define PTE_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
/*
* These are indices into arrays with pgd_t or pmd_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) \
& 0x3FFC) / sizeof(pmd_t))
/*
* Strip out the page offset in this
* megabyte from a total of 256 pages.
*/
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) \
& 0x3FC) / sizeof (pte_t))
/* We need this as print-early.S is including this file */
#ifndef __ASSEMBLY__
#if !defined(__LINUX_CONTAINER__)
/* Type-checkable page table elements */
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pgd_t entries */
typedef struct pgd_table {
pmd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pmd_t entries */
typedef struct pmd_table {
pte_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
extern pgd_table_t init_pgd;
#endif
/* Applies for both small and large pages */
#define PAGE_AP0 4
#define PAGE_AP1 6
#define PAGE_AP2 8
#define PAGE_AP3 10
/* Permission values with rom and sys bits ignored */
#define SVC_RW_USR_NONE 1
#define SVC_RW_USR_RO 2
#define SVC_RW_USR_RW 3
#define PTE_PROT_MASK (0xFF << 4)
#define CACHEABILITY 3
#define BUFFERABILITY 2
#define cacheable (1 << CACHEABILITY)
#define bufferable (1 << BUFFERABILITY)
#define uncacheable 0
#define unbufferable 0
/* Helper macros for common cases */
#define __MAP_USR_RW (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_KERN_RW (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_KERN_IO (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
/* There is no execute bit in ARMv5, so we ignore it */
#define __MAP_USR_RWX __MAP_USR_RW
#define __MAP_USR_RX __MAP_USR_RO
#define __MAP_KERN_RWX __MAP_KERN_RW
#define __MAP_KERN_RX __MAP_KERN_RW /* We always have kernel RW */
#define __MAP_FAULT 0
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
void remove_section_mapping(unsigned long vaddr);
void arch_update_utcb(unsigned long utcb_address);
void system_identify(void);
#endif /* __ASSEMBLY__ */
#endif /* __V5_MM_H__ */

View File

@@ -0,0 +1,42 @@
/*
* ARMv7 specific abort handling definitions
*
* Copyright (C) 2010 B Labs Ltd.
*/
#ifndef __V7_ARCH_EXCEPTION_H__
#define __V7_ARCH_EXCEPTION_H__
/* Data and Prefetch abort encodings */
#define ABORT_TTBW_SYNC_EXTERNAL_LEVEL1 0x0C
#define ABORT_TTBW_SYNC_EXTERNAL_LEVEL2 0x0E
#define ABORT_TTBW_SYNC_PARITY_LEVEL1 0x1C
#define ABORT_TTBW_SYNC_PARITY_LEVEL2 0x1E
#define ABORT_TRANSLATION_SECTION 0x05
#define ABORT_TRANSLATION_PAGE 0x07
#define ABORT_AFLAG_SECTION 0x03
#define ABORT_AFLAG_PAGE 0x06
#define ABORT_DOMAIN_SECTION 0x09
#define ABORT_DOMAIN_PAGE 0x0B
#define ABORT_PERMISSION_SECTION 0x0D
#define ABORT_PERMISSION_PAGE 0x0F
#define ABORT_DEBUG_EVENT 0x02
#define ABORT_SYNC_EXTERNAL 0x08
#define ABORT_SYNC_PARITY 0x19
#define ABORT_ASYNC_PARITY 0x18 /* Only on Data aborts */
#define ABORT_ASYNC_EXTERNAL 0x16 /* Only on Data aborts */
#define ABORT_ICACHE_MAINTENANCE 0x04 /* Only in Data aborts */
#define ABORT_ALIGNMENT 0x01 /* Only in Data aborts */
/* IFSR/DFSR register bits */
#define FSR_FS_BIT4 10 /* 4th bit of fault status */
#define DFSR_WNR_BIT 11 /* Write-not-read bit */
#define FSR_EXT_BIT 12 /* External abort type bit */
#define FSR_FS_MASK 0xF
static inline u32 fsr_get_status(u32 fsr)
{
return (fsr & FSR_FS_MASK) |
(((fsr >> FSR_FS_BIT4) & 1) << 4);
}
#endif /* __V7_ARCH_EXCEPTION_H__ */

View File

@@ -0,0 +1,315 @@
/*
* v7 memory management definitions
*
* Copyright (C) 2010 B Labs Ltd.
* Written by Bahadir Balban
*/
#ifndef __V7_MM_H__
#define __V7_MM_H__
/* Generic definitions used across the kernel */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
/* Non-global first level descriptor definitions */
#define TASK_PGD_SIZE_MAP4GB SZ_16K
#define TASK_PGD_SIZE_MAP2GB SZ_8K
#define TASK_PGD_SIZE_MAP1GB SZ_4K
#define TASK_PGD_SIZE_MAP512MB (SZ_1K * 2)
#define TASK_PGD_SIZE_MAP256MB SZ_1K
#define TASK_PGD_SIZE_MAP128MB 512
#define TASK_PGD_SIZE_MAP64MB 256
#define TASK_PGD_SIZE_MAP32MB 128
/* Any virtual mapping above this value goes to the global table */
#define PGD_GLOBAL_BOUNDARY 0x80000000
/* Task-specific page table, userspace private + shared memory mappings */
#define PGD_ENTRY_TOTAL (TASK_PGD_SIZE_MAP2GB >> 2)
#define PGD_SIZE (TASK_PGD_SIZE_MAP2GB)
/* Global page table size UTCB + kernel + device mappings */
#define PGD_GLOBAL_SIZE SZ_16K
#define PGD_GLOBAL_ENTRY_TOTAL (PGD_GLOBAL_SIZE >> 2)
#if !defined(__LINUX_CONTAINER__)
#define PMD_SIZE SZ_1K
#endif
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
/* FIXME: Check these shifts/masks are correct */
#define PGD_INDEX_MASK 0x3FFC
#define PGD_INDEX_SHIFT 18
#define PMD_INDEX_MASK 0x3FC
#define PMD_INDEX_SHIFT 10
/*
* These are indices into arrays with pmd_t or pte_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> PGD_INDEX_SHIFT) \
& PGD_INDEX_MASK) / sizeof(pmd_t))
/* Strip out the page offset in this megabyte from a total of 256 pages. */
#define PMD_INDEX(x) (((((unsigned long)(x)) >> PMD_INDEX_SHIFT) \
& PMD_INDEX_MASK) / sizeof (pte_t))
#if !defined (__ASSEMBLY__) && !defined (__LINUX_CONTAINER__)
/* Type-checkable page table elements */
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pmd_t entries */
typedef struct page_table_directory {
pmd_t entry[PGD_GLOBAL_ENTRY_TOTAL];
} pgd_global_table_t;
/* Page non-global directory */
typedef struct task_page_table_directory {
pmd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pte_t entries */
typedef struct pmd_table {
pte_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
extern pgd_table_t init_pgd;
extern pgd_global_table_t init_global_pgd;
#endif /* !defined(__ASSEMBLY__) */
/* PMD definitions (2nd level page tables) */
#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
#define PMD_TYPE_FAULT 0x0
#define PMD_TYPE_PMD 0x1
#define PMD_TYPE_SECTION 0x2
#define PMD_TYPE_MASK 0x3
#define PMD_DOMAIN_SHIFT 5 /* Domain field on PGD entry */
#define PMD_DOMAIN_MASK 0x000001E0 /* Domain mask on PGD entry */
#define PMD_NS_BIT 3 /* Non-secure memory */
/* First level Section definitions */
#define SECT_MAP_SIZE SZ_1MB /* Section base address alignment */
#define SECT_NS_BIT 19
#define SECT_SUPER_BIT 18
#define SECT_NG_BIT 17
#define SECT_SHAREABLE_BIT 16
#define SECT_AP2_BIT 15
#define SECT_TEX2_BIT 14
#define SECT_TEX1_BIT 13
#define SECT_TEX0_BIT 12
#define SECT_AP1_BIT 11
#define SECT_AP0_BIT 10
#define SECT_DOMAIN_SHIFT 5
#define SECT_XN_BIT 4
#define SECT_CACHE_BIT 3
#define SECT_BUFFER_BIT 2
#if !defined (__LINUX_CONTAINER__)
/* Second level entry (PTE) definitions */
#define PTE_TYPE_MASK 0x2
#define PTE_TYPE_FAULT 0
#define PTE_TYPE_LARGE 1
#define PTE_TYPE_SMALL 2
#endif
#define PTE_XN_BIT 0
#define PTE_BUFFER_BIT 2
#define PTE_CACHE_BIT 3
#define PTE_AP0_BIT 4
#define PTE_AP1_BIT 5
#define PTE_TEX0_BIT 6
#define PTE_TEX1_BIT 7
#define PTE_TEX2_BIT 8
#define PTE_AP2_BIT 9
#define PTE_AP01_SHIFT PTE_AP0_BIT
#define PTE_AP01_MASK 0x30
#define PTE_SHARE_BIT 10
#define PTE_NG_BIT 11
/* Domain access types */
#define DOMAIN_ACCESS_NONE 0
#define DOMAIN_ACCESS_CLIENT 1
#define DOMAIN_ACCESS_MANAGER 3
/* Simplified permission model definitions */
#define PTE_ACCESS_FLAG PTE_AP0_BIT
/* Bits [1:0] map as AP[2], AP[1] */
#define AP_SIMPLE_USER_NONE_KERN_RW 0
#define AP_SIMPLE_USER_RW_KERN_RW 1
#define AP_SIMPLE_USER_NONE_KERN_RO 2
#define AP_SIMPLE_USER_RO_KERN_RO 3
/*
* Generic page table flag meanings for v7:
*
* Note these are not hardware-defined bits,
* they are defined by the kernel for
* convenience.
*
* [WXCDU]
* W = write, X = Exec, C = Cached, D = Device
*
* If !D it means Normal memory.
* If !U it means kernel-only.
* If !W it means read-only.
*
* These are actually meaningful but unused
* individually, rather the combination of them
* are directly converted into HW pte.
*/
#define PTE_MAP_USER (1 << 0)
#define PTE_MAP_DEVICE (1 << 1)
#define PTE_MAP_CACHED (1 << 2)
#define PTE_MAP_EXEC (1 << 3)
#define PTE_MAP_WRITE (1 << 4)
/* 0 would mean normal, uncached, kernel mapping */
#define PTE_MAP_FAULT (1 << 5)
/*
* v7-specific conversion of map flags
*/
/* In ARMv7 normal, wbwa, shareable, user-rw/kern-rw, xn=1 */
#define __MAP_USR_RW (PTE_MAP_USER | PTE_MAP_WRITE | PTE_MAP_CACHED)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-ro/kern-ro, xn=1 */
#define __MAP_USR_RO (PTE_MAP_USER | PTE_MAP_CACHED)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-none/kern-rw, xn=1 */
#define __MAP_KERN_RW (PTE_MAP_CACHED | PTE_MAP_WRITE)
/* Uncached. In ARMv7 device, uncached, shareable, user-rw/kern-rw, xn=1 */
#define __MAP_USR_IO (PTE_MAP_USER | PTE_MAP_DEVICE | PTE_MAP_WRITE)
/* Uncached. In ARMv7 device, uncached, shareable, user-none/kern-rw, xn=1 */
#define __MAP_KERN_IO (PTE_MAP_DEVICE | PTE_MAP_WRITE)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-rw/kern-rw, xn=0 */
#define __MAP_USR_RWX (PTE_MAP_USER | PTE_MAP_CACHED \
| PTE_MAP_WRITE | PTE_MAP_EXEC)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-none/kern-rw, xn=0 */
#define __MAP_KERN_RWX (PTE_MAP_CACHED | PTE_MAP_WRITE | PTE_MAP_EXEC)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-ro/kern-ro, xn=0 */
#define __MAP_USR_RX (PTE_MAP_USER | PTE_MAP_CACHED | PTE_MAP_EXEC)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-none/kern-ro, xn=0 */
#define __MAP_KERN_RX (PTE_MAP_CACHED | PTE_MAP_EXEC)
/* Fault/unmapped entry */
#define __MAP_FAULT PTE_MAP_FAULT
/*
* Shareability bit remapping on tex remap
*
* As an example to below, when a normal region has its
* shareability bit set to 1, PRRR_NORMAL_S1_BIT remaps
* and determines the final shareability status. E.g. if
* PRRR_NORMAL_S1_BIT is set to 0, the region becomes
* not shareable, even though the pte S bit == 1.
* On Tex Remap, PRRR is the final decision point.
*/
#define PRRR_DEVICE_S0_BIT 16 /* Meaning of all device memory when S == 0 */
#define PRRR_DEVICE_S1_BIT 17 /* Meaning of all device memory when S == 1 */
#define PRRR_NORMAL_S0_BIT 18 /* Meaning of all normal memory when S == 0 */
#define PRRR_NORMAL_S1_BIT 19 /* Meaning of all normal memory when S == 1 */
#define PRRR_NOS_START_BIT 24
#define NMRR_OUTER_START_BIT 16
#define CACHEABLE_NONE 0
#define CACHEABLE_WBWA 1
#define CACHEABLE_WT_NOWA 2
#define CACHEABLE_WB_NOWA 3
/* Memory type values for tex remap registers */
#define MEMTYPE_ST_ORDERED 0
#define MEMTYPE_DEVICE 1
#define MEMTYPE_NORMAL 2
/* User-defined tex remap slots */
#define TEX_SLOT_NORMAL_UNCACHED 0
#define TEX_SLOT_NORMAL 1
#define TEX_SLOT_DEVICE_UNCACHED 2
#define TEX_SLOT_ST_ORDERED_UNCACHED 3
#define ASID_MASK 0xFF
#define ASID_GROUP_SHIFT 8
#define PROCID_SHIFT 8
#define PROCID_MASK 0xFFFFFF
#define TASK_ASID(x) ((x)->space->spid & ASID_MASK)
#define SPACE_ASID(x) ((x)->spid & ASID_MASK)
#define TASK_PROCID(x) ((x)->tid & PROCID_MASK)
#define PGD_GLOBAL_GET() (kernel_resources.pgd_global)
/*
*
* Page table memory settings for translation table walk hardware:
*
* We assume write-back write-allocate, inner and outer
* cacheable, inner shareable, not outer-shareable,
* normal memory.
*
* ARMv7 VMSA (B3-114) says that the obscure IRGN[1:0]
* mapping ensures same bit values for SMP and v7 base architecture,
* however this is only partially true as seen by the WBWA bit
* mapping differences.
*
* RGN values:
* 00 Uncached
* 01 WBWA
* 10 WT
* 11 WB_NOWA
*
* On below definitions both inner and outer cacheability bits
* are assigned with the same cacheability values.
*/
/* I
* I R
* R R R G
* G N G G I N
* N O N N M 1
* 0 S 1|0 P S C */
#define PGD_MEMORY_NORMAL_WBWA_S_NOS 0x2B /* 00 1 0|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WBWA_S_NOS_SMP 0x6A /* 01 1 0|1 0 1 0 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_NOS 0x3B /* 00 1 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_NOS_SMP 0x7B /* 01 1 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_OS 0x1B /* 00 0 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_OS_SMP 0x5B /* 01 0 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_UNCACHED_S_NOS 0x22 /* 00 1 0|0 0 1 0 */
#define PGD_MEMORY_NORMAL_UNCACHED_S_NOS_SMP 0x22 /* 00 1 0|0 0 1 0 */
#define PGD_MEMORY_NORMAL_WBWA_S_OS 0x0B /* 00 0 0|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WBWA_S_OS_SMP 0x4A /* 01 0 0|1 0 1 0 */
/* Returns page table memory settings for ttb walk fetches */
unsigned int ttb_walk_mem_settings(void);
#if !defined (__ASSEMBLY__)
void v7_flags_prepare_pte(pte_t *pte, unsigned long phys,
unsigned long virt, unsigned int v7_pte_flags);
void section_set_access_simple(pmd_t *pmd, unsigned int perms);
void section_set_tex_remap_slot(pmd_t *pmd, int slot);
void v7_write_section(unsigned long paddr, unsigned long vaddr,
unsigned int section_flags, unsigned int asid);
int pte_get_access_simple(pte_t pte);
void tex_remap_setup_all_slots(void);
struct ktcb;
void arch_update_utcb(unsigned long utcb_address);
void arch_space_switch(struct ktcb *to);
void system_identify(void);
#endif /* !defined(__ASSEMBLY__) */
#endif /* __V7_MM_H__ */

View File

@@ -0,0 +1,148 @@
/*
* Types of capabilities and their operations
*
* Copyright (C) 2009 Bahadir Balban
*/
#ifndef __CAP_TYPES_H__
#define __CAP_TYPES_H__
/*
* Capability types
*/
#define CAP_TYPE_MASK 0x0000FFFF
#define CAP_TYPE_TCTRL (1 << 0)
#define CAP_TYPE_EXREGS (1 << 1)
#define CAP_TYPE_MAP_PHYSMEM (1 << 2)
#define CAP_TYPE_MAP_VIRTMEM (1 << 3)
#define CAP_TYPE_IPC (1 << 4)
#define CAP_TYPE_IRQCTRL (1 << 5)
#define CAP_TYPE_UMUTEX (1 << 6)
#define CAP_TYPE_QUANTITY (1 << 7)
#define CAP_TYPE_CAP (1 << 8)
#define cap_type(c) ((c)->type & CAP_TYPE_MASK)
/*
* Resource types
*/
#define CAP_RTYPE_MASK 0xFFFF0000
#define CAP_RTYPE_THREAD (1 << 16)
#define CAP_RTYPE_SPACE (1 << 17)
#define CAP_RTYPE_CONTAINER (1 << 18)
#define CAP_RTYPE_CPUPOOL (1 << 19)
#define CAP_RTYPE_THREADPOOL (1 << 20)
#define CAP_RTYPE_SPACEPOOL (1 << 21)
#define CAP_RTYPE_MUTEXPOOL (1 << 22)
#define CAP_RTYPE_MAPPOOL (1 << 23) /* For pmd spending */
#define CAP_RTYPE_CAPPOOL (1 << 24) /* For new cap generation */
#define cap_rtype(c) ((c)->type & CAP_RTYPE_MASK)
#define cap_set_rtype(c, rtype) \
{(c)->type &= ~CAP_RTYPE_MASK; \
(c)->type |= CAP_RTYPE_MASK & rtype;}
/*
* User-defined device-types
* (Kept in the user field)
*/
#define CAP_DEVTYPE_TIMER 1
#define CAP_DEVTYPE_UART 2
#define CAP_DEVTYPE_KEYBOARD 3
#define CAP_DEVTYPE_MOUSE 4
#define CAP_DEVTYPE_CLCD 5
#define CAP_DEVTYPE_OTHER 0xF
#define CAP_DEVTYPE_MASK 0xFFFF
#define CAP_DEVNUM_MASK 0xFFFF0000
#define CAP_DEVNUM_SHIFT 16
#define cap_is_devmem(c) ((c)->attr)
#define cap_set_devtype(c, devtype) \
{(c)->attr &= ~CAP_DEVTYPE_MASK; \
(c)->attr |= CAP_DEVTYPE_MASK & devtype;}
#define cap_set_devnum(c, devnum) \
{(c)->attr &= ~CAP_DEVNUM_MASK; \
(c)->attr |= CAP_DEVNUM_MASK & (devnum << CAP_DEVNUM_SHIFT);}
#define cap_devnum(c) \
(((c)->attr & CAP_DEVNUM_MASK) >> CAP_DEVNUM_SHIFT)
#define cap_devtype(c) ((c)->attr & CAP_DEVTYPE_MASK)
/*
* Access permissions
*/
/* Generic permissions */
#define CAP_CHANGEABLE (1 << 28) /* Can modify contents */
#define CAP_TRANSFERABLE (1 << 29) /* Can grant or share it */
#define CAP_REPLICABLE (1 << 30) /* Can create copies */
#define CAP_GENERIC_MASK 0xF0000000
#define CAP_IMMUTABLE 0
#define cap_generic_perms(c) \
((c)->access & CAP_GENERIC_MASK)
/* Thread control capability */
#define CAP_TCTRL_CREATE (1 << 0)
#define CAP_TCTRL_DESTROY (1 << 1)
#define CAP_TCTRL_RUN (1 << 2)
#define CAP_TCTRL_SUSPEND (1 << 3)
#define CAP_TCTRL_RECYCLE (1 << 4)
#define CAP_TCTRL_WAIT (1 << 5)
/* Exchange registers capability */
#define CAP_EXREGS_RW_PAGER (1 << 0)
#define CAP_EXREGS_RW_UTCB (1 << 1)
#define CAP_EXREGS_RW_SP (1 << 2)
#define CAP_EXREGS_RW_PC (1 << 3)
#define CAP_EXREGS_RW_REGS (1 << 4) /* Other regular regs */
#define CAP_EXREGS_RW_CPU (1 << 5)
#define CAP_EXREGS_RW_CPUTIME (1 << 6)
/* Map capability */
#define CAP_MAP_READ (1 << 0)
#define CAP_MAP_WRITE (1 << 1)
#define CAP_MAP_EXEC (1 << 2)
#define CAP_MAP_CACHED (1 << 3)
#define CAP_MAP_UNCACHED (1 << 4)
#define CAP_MAP_UNMAP (1 << 5)
#define CAP_MAP_UTCB (1 << 6)
/* Cache operations, applicable to (virtual) memory regions */
#define CAP_CACHE_INVALIDATE (1 << 7)
#define CAP_CACHE_CLEAN (1 << 8)
/*
* IRQ Control capability
*/
#define CAP_IRQCTRL_WAIT (1 << 8)
/*
* This is a common one and it applies to both
* CAP_TYPE_IRQCTRL and CAP_TYPE_MAP_PHYSMEM
*/
#define CAP_IRQCTRL_REGISTER (1 << 7)
/* Ipc capability */
#define CAP_IPC_SEND (1 << 0)
#define CAP_IPC_RECV (1 << 1)
#define CAP_IPC_SHORT (1 << 2)
#define CAP_IPC_FULL (1 << 3)
#define CAP_IPC_EXTENDED (1 << 4)
#define CAP_IPC_ASYNC (1 << 5)
/* Userspace mutex capability */
#define CAP_UMUTEX_LOCK (1 << 0)
#define CAP_UMUTEX_UNLOCK (1 << 1)
/* Capability control capability */
#define CAP_CAP_GRANT (1 << 0)
#define CAP_CAP_READ (1 << 1)
#define CAP_CAP_SHARE (1 << 2)
#define CAP_CAP_REPLICATE (1 << 3)
#define CAP_CAP_SPLIT (1 << 4)
#define CAP_CAP_DEDUCE (1 << 5)
#define CAP_CAP_DESTROY (1 << 6)
#define CAP_CAP_MODIFY (CAP_CAP_DEDUCE | CAP_CAP_SPLIT \
| CAP_CAP_DESTROY)
#endif /* __CAP_TYPES_H__ */

View File

@@ -0,0 +1,19 @@
/*
* Kernel preemption functions.
*/
#ifndef __PREEMPT_H__
#define __PREEMPT_H__
#if !defined(__LINUX_CONTAINER__)
void preempt_enable(void);
void preempt_disable(void);
int preemptive(void);
int preempt_count(void);
int in_nested_irq_context(void);
int in_irq_context(void);
int in_task_context(void);
#endif /* __LINUX_CONTAINER__ */
#endif /* __PREEMPT_H__ */

View File

@@ -0,0 +1,30 @@
/*
* Generic address space related information.
*
* Copyright (C) 2007-2010 Bahadir Balban
*/
#ifndef __SPACE_H__
#define __SPACE_H__
/*
* Generic mapping flags.
*/
#define MAP_FAULT 0
#define MAP_USR_RW 1
#define MAP_USR_RO 2
#define MAP_KERN_RW 3
#define MAP_USR_IO 4
#define MAP_KERN_IO 5
#define MAP_USR_RWX 6
#define MAP_KERN_RWX 7
#define MAP_USR_RX 8
#define MAP_KERN_RX 9
#define MAP_UNMAP 10 /* For unmap syscall */
#define MAP_INVALID_FLAGS (1 << 31)
/* Some default aliases */
#define MAP_USR_DEFAULT MAP_USR_RW
#define MAP_KERN_DEFAULT MAP_KERN_RW
#define MAP_IO_DEFAULT MAP_KERN_IO
#endif /* __SPACE_H__ */

View File

@@ -0,0 +1,43 @@
/*
* Thread Control Block, kernel portion.
*
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#ifndef __TCB_H__
#define __TCB_H__
/*
* These are a mixture of flags that indicate the task is
* in a transitional state that could include one or more
* scheduling states.
*/
#define TASK_INTERRUPTED (1 << 0)
#define TASK_SUSPENDING (1 << 1)
#define TASK_RESUMING (1 << 2)
#define TASK_PENDING_SIGNAL (TASK_SUSPENDING)
#define TASK_REALTIME (1 << 5)
/*
* This is to indicate a task (either current or one of
* its children) exit has occured and cleanup needs to be
* called
*/
#define TASK_EXITED (1 << 3)
/* Task states */
enum task_state {
TASK_INACTIVE = 0,
TASK_SLEEPING = 1,
TASK_RUNNABLE = 2,
};
#define TASK_CID_MASK 0xFF000000
#define TASK_ID_MASK 0x00FFFFFF
#define TASK_CID_SHIFT 24
/* Values that rather have special meaning instead of an id value */
#define TASK_ID_INVALID 0xFFFFFFFF
#endif /* __TCB_H__ */

View File

@@ -0,0 +1,26 @@
/*
* Generic cache api calls
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#ifndef __GLUE_CACHE_H__
#define __GLUE_CACHE_H__
//#include INC_SUBARCH(mmu_ops.h)
/* Lowest byte is reserved for and used by capability permissions */
#define ARCH_INVALIDATE_ICACHE 0x10
#define ARCH_INVALIDATE_DCACHE 0x20
#define ARCH_CLEAN_DCACHE 0x30
#define ARCH_CLEAN_INVALIDATE_DCACHE 0x40
#define ARCH_INVALIDATE_TLB 0x50
void arch_invalidate_dcache(unsigned long start, unsigned long end);
void arch_clean_invalidate_dcache(unsigned long start, unsigned long end);
void arch_invalidate_icache(unsigned long start, unsigned long end);
void arch_invalidate_tlb(unsigned long start, unsigned long end);
void arch_clean_dcache(unsigned long start, unsigned long end);
#endif /* __GLUE_CACHE_H__ */

View File

@@ -0,0 +1,53 @@
#ifndef __ARM_CONTEXT_H__
#define __ARM_CONTEXT_H__
#include <l4lib/types.h>
/*
* This describes the register context of each task. Simply set
* them and they'll be copied onto real registers upon a context
* switch to that task. exchange_registers() system call is
* designed for this, whose input structure is defined further
* below.
*/
typedef struct arm_context {
u32 spsr; /* 0x0 */
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) task_context_t;
typedef struct arm_exregs_context {
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) exregs_context_t;
#endif /* __ARM_CONTEXT_H__ */

View File

@@ -0,0 +1,61 @@
/*
* Virtual memory layout of ARM systems.
*/
#ifndef __MEMLAYOUT_H__
#define __MEMLAYOUT_H__
#ifndef __ASSEMBLY__
#include L4LIB_INC_GLUE(memory.h)
#endif
#define KERNEL_AREA_START 0xF0000000
#define KERNEL_AREA_END 0xF8000000 /* 128 MB */
#define KERNEL_AREA_SIZE (KERNEL_AREA_END - KERNEL_AREA_START)
#define KERNEL_AREA_SECTIONS (KERNEL_AREA_SIZE / ARM_SECTION_SIZE)
#define UTCB_SIZE (sizeof(int) * 64)
#define IO_AREA_START 0xF9000000
#define IO_AREA_END 0xFF000000
#define IO_AREA_SIZE (IO_AREA_END - IO_AREA_START)
#define IO_AREA_SECTIONS (IO_AREA_SIZE / ARM_SECTION_SIZE)
#define USER_KIP_PAGE 0xFF000000
/* ARM-specific offset in KIP that tells the address of UTCB page */
#define UTCB_KIP_OFFSET 0x50
#define IO_AREA0_VADDR IO_AREA_START
#define IO_AREA1_VADDR (IO_AREA_START + (SZ_1MB*1))
#define IO_AREA2_VADDR (IO_AREA_START + (SZ_1MB*2))
#define IO_AREA3_VADDR (IO_AREA_START + (SZ_1MB*3))
#define IO_AREA4_VADDR (IO_AREA_START + (SZ_1MB*4))
#define IO_AREA5_VADDR (IO_AREA_START + (SZ_1MB*5))
#define IO_AREA6_VADDR (IO_AREA_START + (SZ_1MB*6))
#define IO_AREA7_VADDR (IO_AREA_START + (SZ_1MB*7))
/*
* IO_AREA8_VADDR
* The beginning page in this slot is used for userspace uart mapping
*/
#define ARM_HIGH_VECTOR 0xFFFF0000
#define ARM_SYSCALL_VECTOR 0xFFFFFF00
#if !defined(__LINUX_CONTAINER__)
#define KERNEL_OFFSET (KERNEL_AREA_START - PLATFORM_PHYS_MEM_START)
#endif
/* User tasks define them differently */
#if defined (__KERNEL__) && !defined(__LINUX_CONTAINER__)
#define phys_to_virt(addr) ((unsigned int)(addr) + KERNEL_OFFSET)
#define virt_to_phys(addr) ((unsigned int)(addr) - KERNEL_OFFSET)
#endif
#define KERN_ADDR(x) ((x >= KERNEL_AREA_START) && (x < KERNEL_AREA_END))
#define UTCB_ADDR(x) ((x >= UTCB_AREA_START) && (x < UTCB_AREA_END))
#define is_kernel_address(x) (KERN_ADDR(x) || (x >= ARM_HIGH_VECTOR) || \
(x >= IO_AREA_START && x < IO_AREA_END))
#endif /* __MEMLAYOUT_H__ */

View File

@@ -0,0 +1,85 @@
/*
* Includes memory-related architecture specific definitions and their
* corresponding generic wrappers.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __GLUE_ARM_MEMORY_H__
#define __GLUE_ARM_MEMORY_H__
#include L4LIB_INC_GLUE(memlayout.h) /* Important generic definitions */
#include L4LIB_INC_SUBARCH(mm.h)
/* Generic definitions */
#define PFN_SHIFT 12
#define PAGE_BITS PFN_SHIFT
#if !defined(__LINUX_CONTAINER__)
#define PAGE_SIZE SZ_4K
#define PAGE_MASK (PAGE_SIZE - 1)
#endif
/* Aligns to the upper page (ceiling) FIXME: Must add a wraparound checker. */
#define page_align_up(addr) ((((unsigned long)(addr)) + PAGE_MASK) & \
(~PAGE_MASK))
/* Aligns to the lower page (floor) */
#define page_align(addr) (((unsigned long)(addr)) & \
(~PAGE_MASK))
#define is_aligned(val, size) (!(((unsigned long)(val)) & (((unsigned long)size) - 1)))
#define is_page_aligned(val) (!(((unsigned long)(val)) & PAGE_MASK))
#define page_boundary(x) is_page_aligned(x)
/*
* Align to given size.
*
* Note it must be an alignable size i.e. one that is a power of two.
* E.g. 0x1000 would work but 0x1010 would not.
*/
#define align(addr, size) (((unsigned int)(addr)) & (~((unsigned long)size-1)))
#define align_up(addr, size) ((((unsigned long)(addr)) + \
((size) - 1)) & (~(((unsigned long)size) - 1)))
/* The bytes left until the end of the page that x is in */
#define TILL_PAGE_ENDS(x) (PAGE_SIZE - ((unsigned long)(x) & PAGE_MASK))
/* Extract page frame number from address and vice versa. */
#define __pfn(x) (((unsigned long)(x)) >> PAGE_BITS)
#define __pfn_to_addr(x) (((unsigned long)(x)) << PAGE_BITS)
/* Extract physical address from page table entry (pte) */
#define __pte_to_addr(x) (((unsigned long)(x)) & ~PAGE_MASK)
/* Minimum excess needed for word alignment */
#define SZ_WORD sizeof(unsigned int)
#define WORD_BITS 32
#define WORD_BITS_LOG2 5
#define BITWISE_GETWORD(x) ((x) >> WORD_BITS_LOG2) /* Divide by 32 */
#define BITWISE_GETBIT(x) (1 << ((x) % WORD_BITS))
/* Minimum stack alignment restriction across functions, exceptions */
#define STACK_ALIGNMENT 8
#if !defined(__LINUX_CONTAINER__)
/* Endianness conversion */
static inline void be32_to_cpu(unsigned int x)
{
char *p = (char *)&x;
char tmp;
/* Swap bytes */
tmp = p[0];
p[0] = p[3];
p[3] = tmp;
tmp = p[1];
p[1] = p[2];
p[2] = tmp;
}
struct ktcb;
void task_init_registers(struct ktcb *task, unsigned long pc);
#endif /* !_LINUX_CONTAINER__ */
#endif /* __GLUE_ARM_MEMORY_H__ */

View File

@@ -0,0 +1,95 @@
/*
* Userspace thread control block
*
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#ifndef __GLUE_ARM_MESSAGE_H__
#define __GLUE_ARM_MESSAGE_H__
/*
* Here's a summary of how ARM registers are used during IPC:
*
* System registers:
* r0 - r2: Passed as arguments to ipc() call. They are the registers
* the microkernel will read and they have system-wide meaning.
*
* Primary message registers:
* r3 - r8: These 6 registers are the primary message registers MR0-MR6.
* Their format is application-specific, i.e. the microkernel imposes no
* format restrictions on them.
*
* TODO: The only exception is that, for ANYTHREAD receivers the predefined
* MR_SENDER is touched by the kernel to indicate the sender. This register
* is among the primary MRs and it may be better fit to put it into one of
* the system registers.
*
* l4lib registers: (MR_TAG, MR_SENDER, MR_RETURN)
* Some of the primary message registers are used by the l4lib convenience
* library for operations necessary on most or all common ipcs. For example
* every ipc has a tag that specifies the ipc reason. Also send/receive
* operations require a return value. Threads that are open to receive from
* all threads require the sender id. These values are passed in predefined
* primary message registers, but the microkernel has no knowledge about them.
*
* System call registers: L4SYS_ARG0 to ARG4.(See syslib.h for definitions)
* Finally the rest of the primary message registers are available for
* implementing system call arguments. For example the POSIX services use
* these arguments to pass posix system call information.
*
* Secondary Message Registers:
* These are non-real registers and are present in the UTCB memory region.
* Both real and non-real message registers have a location in the UTCB, but
* non-real ones are copied only if the FULL IPC flag is set.
*
* The big picture:
*
* r0 System register
* r1 System register
* r2 System register
* r3 Primary MR0 MR_RETURN, MR_TAG Present in UTCB, Short IPC
* r4 Primary MR1 MR_SENDER Present in UTCB, Short IPC
* r5 Primary MR2 L4SYS_ARG0 Present in UTCB, Short IPC
* r6 Primary MR3 L4SYS_ARG1 Present in UTCB, Short IPC
* r7 Primary MR4 L4SYS_ARG2 Present in UTCB, Short IPC
* r8 Primary MR5 L4SYS_ARG3 Present in UTCB, Short IPC
* x Secondary MR6 Present in UTCB, Full IPC only
* x Secondary MR64 Present in UTCB, Full IPC only
*
* Complicated for you? Suggest a simpler design and it shall be implemented!
*/
#define MR_REST ((UTCB_SIZE >> 2) - MR_TOTAL - 4) /* -4 is for fields on utcb */
#define MR_TOTAL 6
#define MR_TAG 0 /* Contains the purpose of message */
#define MR_SENDER 1 /* For anythread receivers to discover sender */
#define MR_RETURN 0 /* Contains the posix return value. */
/* These define the mr start - end range that isn't used by userspace syslib */
#define MR_UNUSED_START 2 /* The first mr that's not used by syslib.h */
#define MR_UNUSED_TOTAL (MR_TOTAL - MR_UNUSED_START)
#define MR_USABLE_TOTAL MR_UNUSED_TOTAL
/* These are defined so that we don't hard-code register names */
#define MR0_REGISTER r3
#define MR_RETURN_REGISTER r3
#define TASK_NOTIFY_SLOTS 8
#define TASK_NOTIFY_MAXVALUE 255
/* Primaries aren't used for memcopy. Those ops use this as a parameter */
#define L4_UTCB_FULL_BUFFER_SIZE (MR_REST * sizeof(int))
#include L4LIB_INC_GLUE(memlayout.h)
#if !defined (__ASSEMBLY__)
struct utcb {
u32 mr[MR_TOTAL]; /* MRs that are mapped to real registers */
u32 saved_tag; /* Saved tag field for stacked ipcs */
u32 saved_sender; /* Saved sender field for stacked ipcs */
u8 notify[TASK_NOTIFY_SLOTS]; /* Irq notification slots */
u32 mr_rest[MR_REST]; /* Complete the utcb for up to 64 words */
};
#endif
#endif /* __GLUE_ARM_MESSAGE_H__ */

View File

@@ -0,0 +1,78 @@
/*
* ARM-specific system call details.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARM_GLUE_SYSCALL_H__
#define __ARM_GLUE_SYSCALL_H__
#include <l4lib/types.h>
#include L4LIB_INC_GLUE(message.h)
/* Only specific call is the trap that gives back the kip address
* from which other system calls can be discovered. */
#define L4_TRAP_KIP 0xB4
/* Used in the kernel to refer to virtual address of this page.
* User space discovers it from the KIP */
#define ARM_SYSCALL_PAGE 0xFFFFF000
extern unsigned int __syscall_page_start;
/*
* This structure is saved on the kernel stack
* just after entering a system call exception.
*/
typedef struct syscall_context {
u32 spsr;
u32 r0;
u32 r1;
u32 r2;
u32 r3; /* MR0 */
u32 r4; /* MR1 */
u32 r5; /* MR2 */
u32 r6; /* MR3 */
u32 r7; /* MR4 */
u32 r8; /* MR5 */
u32 r9;
u32 r10;
u32 r11;
u32 r12;
u32 sp_usr;
u32 lr_usr;
} __attribute__((__packed__)) syscall_context_t;
typedef struct msg_regs {
u32 mr0;
u32 mr1;
u32 mr2;
u32 mr3;
u32 mr4;
u32 mr5;
} msg_regs_t;
/* NOTE:
* These references are valid only when they have been explicitly set
* by a kernel entry point, e.g. a system call, a data abort handler
* that imitates a page fault ipc etc.
*
* Second note:
* _If_ these refer to real utcb's in the future, make sure to have
* utcb_map_lazily() check so that they're safe accesses.
*/
#define KTCB_REF_ARG0(ktcb) (&(ktcb)->syscall_regs->r0)
#define KTCB_REF_MR0(ktcb) (&(ktcb)->syscall_regs->MR0_REGISTER)
/* Represents each syscall. We get argument registers
* from stack for now. This is slower but the simplest. */
typedef int (*syscall_fn_t)(struct syscall_context *regs);
/* Entry point for syscall dispatching. Called from asm */
int syscall(struct syscall_context *regs, unsigned long);
/* Syscall-related initialiser called during system init. */
void syscall_init(void);
void kip_init_syscalls(void);
#endif /* __ARM_GLUE_SYSCALL_H__ */

View File

@@ -0,0 +1,131 @@
#ifndef __LIST_H__
#define __LIST_H__
#define L4_DEADWORD 0xDEADCCCC
struct link {
struct link *next;
struct link *prev;
};
static inline void link_init(struct link *l)
{
l->next = l;
l->prev = l;
}
#define LINK_INIT(link) { &(link), &(link) }
#define LINK_DECLARE(l) \
struct link l = LINK_INIT(l)
#if !defined(__LINUX_CONTAINER__)
static inline void list_insert(struct link *new, struct link *list)
{
struct link *next = list->next;
/*
* The new link goes between the
* current and next links on the list e.g.
* list -> new -> next
*/
new->next = next;
next->prev = new;
list->next = new;
new->prev = list;
}
static inline void list_insert_tail(struct link *new, struct link *list)
{
struct link *prev = list->prev;
/*
* The new link goes between the
* current and prev links on the list, e.g.
* prev -> new -> list
*/
new->next = list;
list->prev = new;
new->prev = prev;
prev->next = new;
}
static inline void list_remove(struct link *link)
{
struct link *prev = link->prev;
struct link *next = link->next;
prev->next = next;
next->prev = prev;
link->next = (struct link *)L4_DEADWORD;
link->prev = (struct link *)L4_DEADWORD;
}
static inline void list_remove_init(struct link *link)
{
struct link *prev = link->prev;
struct link *next = link->next;
//BUG_ON(prev == NULL || next == NULL || link == NULL);
prev->next = next;
next->prev = prev;
link->next = link;
link->prev = link;
}
/* Cuts the whole list from head and returns it */
static inline struct link *list_detach(struct link *head)
{
struct link *next = head->next;
/* Detach head from rest of the list */
list_remove_init(head);
/* Return detached list */
return next;
}
/* append new_list to list given by head/end pair */
static inline void list_attach(struct link *new_list, struct link *head, struct link *end)
{
/* attach new list at the end of original list */
end->next = new_list;
new_list->prev = end;
/* go to the end of list to be attached */
while (new_list->next != end->next)
new_list = new_list->next;
/* set end nodes properly */
new_list->next = head;
head->prev = new_list;
/* set end to new end */
end = new_list;
}
static inline int list_empty(struct link *list)
{
return list->prev == list && list->next == list;
}
#define link_to_struct(link, struct_type, link_field) \
container_of(link, struct_type, link_field)
#define list_foreach_struct(struct_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field); \
&struct_ptr->link_field != (link_start); \
struct_ptr = link_to_struct(struct_ptr->link_field.next, typeof(*struct_ptr), link_field))
#define list_foreach_removable_struct(struct_ptr, temp_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field), \
temp_ptr = link_to_struct((struct_ptr)->link_field.next, typeof(*struct_ptr), link_field);\
&struct_ptr->link_field != (link_start); \
struct_ptr = temp_ptr, temp_ptr = link_to_struct(temp_ptr->link_field.next, typeof(*temp_ptr), link_field))
#endif /* __LINUX_CONTAINER__ */
#endif /* __LIST_H__ */

View File

@@ -0,0 +1,44 @@
#ifndef __LIB_MATH_H__
#define __LIB_MATH_H__
#if !defined (__LINUX_CONTAINER__)
#if !defined pow
static inline int pow(int val, int exp)
{
int res = 1;
for (int i = 0; i < exp; i++)
res *= val;
return res;
}
#endif
#if !defined min
static inline int min(int x, int y)
{
return x < y ? x : y;
}
static inline int max(int x, int y)
{
return x > y ? x : y;
}
#endif
#endif /* !__LINUX_CONTAINER__ */
/* Tests if ranges a-b intersect with range c-d */
static inline int set_intersection(unsigned long a, unsigned long b,
unsigned long c, unsigned long d)
{
/*
* Below is the complement set (') of the intersection
* of 2 ranges, much simpler ;-)
*/
if (b <= c || a >= d)
return 0;
/* The rest is always intersecting */
return 1;
}
#endif /* __LIB_MATH_H__ */

View File

@@ -0,0 +1,45 @@
/*
* The elementary concurrency constructs.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __LIB_MUTEX_H__
#define __LIB_MUTEX_H__
#include <l4lib/lib/string.h>
#include <l4lib/lib/spinlock.h>
#include <l4lib/lib/list.h>
#include <l4lib/lib/printk.h>
#include <l4lib/lib/wait.h>
#include INC_ARCH(mutex.h)
/* A mutex is a binary semaphore that can sleep. */
struct mutex {
struct waitqueue_head wqh;
unsigned int lock;
};
static inline void mutex_init(struct mutex *mutex)
{
memset(mutex, 0, sizeof(struct mutex));
waitqueue_head_init(&mutex->wqh);
}
int mutex_trylock(struct mutex *mutex);
int mutex_lock(struct mutex *mutex);
void mutex_unlock(struct mutex *mutex);
void mutex_unlock_async(struct mutex *mutex);
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */
static inline int mutex_inc(unsigned int *cnt)
{
return ++*cnt;
}
static inline int mutex_dec(unsigned int *cnt)
{
return --*cnt;
}
#endif /* __LIB_MUTEX_H__ */

View File

@@ -0,0 +1,90 @@
#ifndef __LIB_SPINLOCK_H__
#define __LIB_SPINLOCK_H__
#include <l4lib/lib/string.h>
#include <l4lib/generic/preempt.h>
#include L4LIB_INC_ARCH(irq.h)
#include L4LIB_INC_ARCH(mutex.h)
#if !defined(__LINUX_CONTAINER__)
struct spinlock {
unsigned int lock;
};
#if !defined(__LINUX_CONTAINER__)
#define DECLARE_SPINLOCK(lockname) \
struct spinlock lockname = { \
.lock = 0, \
}
void spin_lock_record_check(void *lock_addr);
void spin_unlock_delete_check(void *lock_addr);
static inline void spin_lock_init(struct spinlock *s)
{
memset(s, 0, sizeof(struct spinlock));
}
/*
* - Guards from deadlock against local processes, but not local irqs.
* - To be used for synchronising against processes on *other* cpus.
*/
static inline void spin_lock(struct spinlock *s)
{
preempt_disable(); /* This must disable local preempt */
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_lock_record_check(s);
#endif
__spin_lock(&s->lock);
#endif
}
static inline void spin_unlock(struct spinlock *s)
{
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_unlock_delete_check(s);
#endif
__spin_unlock(&s->lock);
#endif
preempt_enable();
}
/*
* - Guards from deadlock against local processes *and* local irqs.
* - To be used for synchronising against processes and irqs
* on other cpus.
*/
static inline void spin_lock_irq(struct spinlock *s,
unsigned long *state)
{
irq_local_disable_save(state);
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_lock_record_check(s);
#endif
__spin_lock(&s->lock);
#endif
}
static inline void spin_unlock_irq(struct spinlock *s,
unsigned long state)
{
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_unlock_delete_check(s);
#endif
__spin_unlock(&s->lock);
#endif
irq_local_restore(state);
}
#endif
#endif /* __LINUX_CONTAINER__ */
#endif /* __LIB__SPINLOCK_H__ */

View File

@@ -0,0 +1,86 @@
#ifndef __LIB_WAIT_H__
#define __LIB_WAIT_H__
#include <l4lib/lib/list.h>
#include <l4lib/lib/spinlock.h>
struct ktcb;
struct waitqueue {
struct link task_list;
struct ktcb *task;
};
#define WAKEUP_ASYNC 0
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0), /* Set interrupt flag for task */
WAKEUP_SYNC = (1 << 1), /* Wake it up synchronously */
};
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
struct waitqueue wq = { \
.task_list = { &wq.task_list, &wq.task_list }, \
.task = tsk, \
};
struct waitqueue_head {
int sleepers;
struct spinlock slock;
struct link task_list;
};
static inline void waitqueue_head_init(struct waitqueue_head *head)
{
memset(head, 0, sizeof(struct waitqueue_head));
link_init(&head->task_list);
}
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
struct waitqueue *wq);
void task_unset_wqh(struct ktcb *task);
/*
* Sleep if the given condition isn't true.
* ret will tell whether condition was met
* or we got interrupted.
*/
#define WAIT_EVENT(wqh, condition, ret) \
do { \
ret = 0; \
for (;;) { \
unsigned long irqsave; \
spin_lock_irq(&(wqh)->slock, &irqsave); \
if (condition) { \
spin_unlock_irq(&(wqh)->slock, irqsave);\
break; \
} \
CREATE_WAITQUEUE_ON_STACK(wq, current); \
task_set_wqh(current, wqh, &wq); \
(wqh)->sleepers++; \
list_insert_tail(&wq.task_list, \
&(wqh)->task_list); \
/* printk("(%d) waiting...\n", current->tid); */\
sched_prepare_sleep(); \
spin_unlock_irq(&(wqh)->slock, irqsave); \
schedule(); \
/* Did we wake up normally or get interrupted */\
if (current->flags & TASK_INTERRUPTED) { \
current->flags &= ~TASK_INTERRUPTED; \
ret = -EINTR; \
break; \
} \
} \
} while(0);
void wake_up(struct waitqueue_head *wqh, unsigned int flags);
int wake_up_task(struct ktcb *task, unsigned int flags);
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
int wait_on(struct waitqueue_head *wqh);
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq);
int wait_on_prepared_wait(void);
#endif /* __LIB_WAIT_H__ */

View File

@@ -0,0 +1,27 @@
/*
* Userspace-specific macros.
*
* Copyright (C) 2010 B Labs Ltd.
*/
#ifndef __LIBL4_MACROS_H__
#define __LIBL4_MACROS_H__
#if !defined (__LINUX_CONTAINER__)
#include <l4/config.h>
#endif
/*
* These are for the userspace code to include
* different directories based on configuration
* values for platform, architecture and so on.
*
* This file is meant to be included from all
* userspace projects by default.
*/
#define L4LIB_INC_ARCH(x) <l4lib/arch/__ARCH__/x>
#define L4LIB_INC_SUBARCH(x) <l4lib/arch/__ARCH__/__SUBARCH__/x>
#define L4LIB_INC_PLAT(x) <l4lib/platform/__PLATFORM__/x>
#define L4LIB_INC_GLUE(x) <l4lib/glue/__ARCH__/x>
#endif /* __LIBL4_MACROS_H__ */

View File

@@ -0,0 +1,42 @@
/*
* User space locking
*
* Copyright (C) 2009 Bahadir Bilgehan Balban
*/
#ifndef __L4_MUTEX_H__
#define __L4_MUTEX_H__
#if !defined(__ASSEMBLY__)
#include <l4/api/mutex.h>
struct l4_mutex {
int lock;
} __attribute__((aligned(sizeof(int))));
void l4_mutex_init(struct l4_mutex *m);
int l4_mutex_lock(struct l4_mutex *m);
int l4_mutex_unlock(struct l4_mutex *m);
#endif
/* Mutex return value - don't mix up with mutes state */
#define L4_MUTEX_CONTENDED -1
#define L4_MUTEX_SUCCESS 0
/*
* Mutex states:
* Unlocked = -1, locked = 0, anything above 0 tells
* number of contended threads
*/
#define L4_MUTEX_LOCKED 0
#define L4_MUTEX_UNLOCKED -1
#define L4_MUTEX(m) \
struct l4_mutex m = { L4_MUTEX_UNLOCKED }
#endif /* __L4_MUTEX_H__ */

View File

@@ -0,0 +1,23 @@
#ifndef __OS_KSTAT_H__
#define __OS_KSTAT_H__
#include <l4lib/types.h>
/*
* Internal codezero-specific stat structure.
* This is converted to posix stat in userspace
*/
struct kstat {
u64 vnum;
u32 mode;
int links;
u16 uid;
u16 gid;
u64 size;
int blksize;
u64 atime;
u64 mtime;
u64 ctime;
};
#endif

View File

@@ -0,0 +1,7 @@
#ifndef __OS_READDIR_H__
#define __OS_READDIR_H__
/* Any os syscall related data that is not in posix */
ssize_t os_readdir(int fd, void *buf, size_t count);
#endif

View File

@@ -0,0 +1,13 @@
#include <l4lib/macros.h>
#include L4LIB_INC_SUBARCH(perfmon.h)
#if !defined (CONFIG_DEBUG_PERFMON_USER)
/* Common empty definitions for all arches */
static inline u32 perfmon_read_cyccnt() { return 0; }
static inline void perfmon_reset_start_cyccnt() { }
static inline u32 perfmon_read_reset_start_cyccnt() { return 0; }
#endif

View File

@@ -0,0 +1,7 @@
#ifndef __TYPES_H__
#define __TYPES_H__
#include <l4lib/macros.h>
#include L4LIB_INC_ARCH(types.h)
#endif /* __TYPES_H__ */

View File

@@ -0,0 +1,13 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __UTCB_H__
#define __UTCB_H__
#include <l4lib/types.h>
#include <l4lib/macros.h>
#include L4LIB_INC_ARCH(utcb.h)
int utcb_init(void);
#endif /* __UTCB_H__ */