Changes since April

Clean up of build directories.
Simplifications to capability model.
This commit is contained in:
Bahadir Balban
2010-06-01 15:08:13 +03:00
parent aef14b55ec
commit 6fa4884a5a
450 changed files with 10449 additions and 7383 deletions

View File

@@ -0,0 +1,17 @@
/*
* Generic macros for cache operations
*
* Copyright (C) 2009 B Labs Ltd.
*/
#ifndef __CACHE_CONTROL_H__
#define __CACHE_CONTROL_H__
#include L4LIB_INC_GLUE(cache.h)
#define L4_INVALIDATE_ICACHE ARCH_INVALIDATE_ICACHE
#define L4_INVALIDATE_DCACHE ARCH_INVALIDATE_DCACHE
#define L4_CLEAN_DCACHE ARCH_CLEAN_DCACHE
#define L4_CLEAN_INVALIDATE_DCACHE ARCH_CLEAN_INVALIDATE_DCACHE
#define L4_INVALIDATE_TLB ARCH_INVALIDATE_TLB
#endif /* __CACHE_CONTROL_H__ */

View File

@@ -0,0 +1,96 @@
/*
* Syscall API for capability manipulation
*
* Copyright (C) 2009 Bahadir Balban
*/
#ifndef __API_CAPABILITY_H__
#define __API_CAPABILITY_H__
#include <l4lib/lib/list.h>
#include L4LIB_INC_ARCH(types.h)
/* Capability syscall request types */
#define CAP_CONTROL_NCAPS 0x00000000
#define CAP_CONTROL_READ 0x00000001
#define CAP_CONTROL_SHARE 0x00000002
#define CAP_CONTROL_GRANT 0x00000003
#define CAP_CONTROL_REPLICATE 0x00000004
#define CAP_CONTROL_SPLIT 0x00000005
#define CAP_CONTROL_DEDUCE 0x00000006
#define CAP_CONTROL_DESTROY 0x00000007
#define CAP_SHARE_MASK 0x0000000F
#define CAP_SHARE_SINGLE 0x00000001
#define CAP_SHARE_ALL_CONTAINER 0x00000002
#define CAP_SHARE_ALL_SPACE 0x00000003
#define CAP_GRANT_MASK 0x0000000F
#define CAP_GRANT_SINGLE 0x00000001
#define CAP_GRANT_IMMUTABLE 0x00000004
#define CAP_SPLIT_MASK 0x0000000F
#define CAP_SPLIT_SIZE 0x00000001
#define CAP_SPLIT_ACCESS 0x00000002
#define CAP_SPLIT_RANGE 0x00000003 /* Returns -EPERM */
/*
* A capability is a unique representation of security
* qualifiers on a particular resource.
*
* In this structure:
*
* The capid denotes the unique capability ID.
* The resid denotes the unique ID of targeted resource.
* The owner denotes the unique ID of the one and only capability owner. This is
* almost always a thread ID.
*
* The type field contains two types:
* - The capability type,
* - The targeted resource type.
*
* The targeted resouce type denotes what type of resource the capability is
* allowed to operate on. For example a thread, a thread group, an address space
* or a memory can be of this type.
*
* The capability type defines the general set of operations allowed on a
* particular resource. For example a capability type may be thread_control,
* exchange_registers, ipc, or map operations. A resource type may be such as a
* thread, a thread group, a virtual or physical memory region.
*
* There are also quantitative capability types. While their names denote
* quantitative objects such as memory, threads, and address spaces, these
* types actually define the quantitative operations available on those
* resources such as creation and deletion of a thread, allocation and
* deallocation of a memory region etc.
*
* The access field denotes the fine-grain operations available on a particular
* resource. The meaning of each bitfield differs according to the type of the
* capability. For example, for a capability type thread_control, the bitfields
* may mean suspend, resume, create, delete etc.
*/
struct capability {
struct link list;
/* Capability identifiers */
l4id_t capid; /* Unique capability ID */
l4id_t owner; /* Capability owner ID */
l4id_t resid; /* Targeted resource ID */
unsigned int type; /* Capability and target resource type */
/* Capability limits/permissions */
u32 access; /* Permitted operations */
/* Limits on the resource (NOTE: must never have signed type) */
unsigned long start; /* Resource start value */
unsigned long end; /* Resource end value */
unsigned long size; /* Resource size */
/* Use count of resource */
unsigned long used;
/* Device attributes, if this is a device. */
unsigned int attr;
l4id_t irq;
};
#endif /* __API_CAPABILITY_H__ */

View File

@@ -0,0 +1,148 @@
#ifndef __ERRNO_H__
#define __ERRNO_H__
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOMSG 42 /* No message of desired type */
#define EIDRM 43 /* Identifier removed */
#define ECHRNG 44 /* Channel number out of range */
#define EL2NSYNC 45 /* Level 2 not synchronized */
#define EL3HLT 46 /* Level 3 halted */
#define EL3RST 47 /* Level 3 reset */
#define ELNRNG 48 /* Link number out of range */
#define EUNATCH 49 /* Protocol driver not attached */
#define ENOCSI 50 /* No CSI structure available */
#define EL2HLT 51 /* Level 2 halted */
#define EBADE 52 /* Invalid exchange */
#define EBADR 53 /* Invalid request descriptor */
#define EXFULL 54 /* Exchange full */
#define ENOANO 55 /* No anode */
#define EBADRQC 56 /* Invalid request code */
#define EBADSLT 57 /* Invalid slot */
#define EDEADLOCK EDEADLK
#define EBFONT 59 /* Bad font file format */
#define ENOSTR 60 /* Device not a stream */
#define ENODATA 61 /* No data available */
#define ETIME 62 /* Timer expired */
#define ENOSR 63 /* Out of streams resources */
#define ENONET 64 /* Machine is not on the network */
#define ENOPKG 65 /* Package not installed */
#define EREMOTE 66 /* Object is remote */
#define ENOLINK 67 /* Link has been severed */
#define EADV 68 /* Advertise error */
#define ESRMNT 69 /* Srmount error */
#define ECOMM 70 /* Communication error on send */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
#define EREMCHG 78 /* Remote address changed */
#define ELIBACC 79 /* Can not access a needed shared library */
#define ELIBBAD 80 /* Accessing a corrupted shared library */
#define ELIBSCN 81 /* .lib section in a.out corrupted */
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
#define EILSEQ 84 /* Illegal byte sequence */
#define ERESTART 85 /* Interrupted system call should be restarted */
#define ESTRPIPE 86 /* Streams pipe error */
#define EUSERS 87 /* Too many users */
#define ENOTSOCK 88 /* Socket operation on non-socket */
#define EDESTADDRREQ 89 /* Destination address required */
#define EMSGSIZE 90 /* Message too long */
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
#define ENOPROTOOPT 92 /* Protocol not available */
#define EPROTONOSUPPORT 93 /* Protocol not supported */
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define EADDRINUSE 98 /* Address already in use */
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define ENETDOWN 100 /* Network is down */
#define ENETUNREACH 101 /* Network is unreachable */
#define ENETRESET 102 /* Network dropped connection because of reset */
#define ECONNABORTED 103 /* Software caused connection abort */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EISCONN 106 /* Transport endpoint is already connected */
#define ENOTCONN 107 /* Transport endpoint is not connected */
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
#define ETIMEDOUT 110 /* Connection timed out */
#define ECONNREFUSED 111 /* Connection refused */
#define EHOSTDOWN 112 /* Host is down */
#define EHOSTUNREACH 113 /* No route to host */
#define EALREADY 114 /* Operation already in progress */
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale NFS file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
#define ECANCELED 125 /* Operation Canceled */
#define ENOKEY 126 /* Required key not available */
#define EKEYEXPIRED 127 /* Key has expired */
#define EKEYREVOKED 128 /* Key has been revoked */
#define EKEYREJECTED 129 /* Key was rejected by service */
/* Codezero specific error codes */
#define EACTIVE 132 /* Task active */
#define ENOIPC 133 /* General IPC error */
#define ENOCAP 134 /* None or insufficient capability */
#define ENOUTCB 135 /* Task has no utcb set up */
#define ENOMAP 136 /* The memory area has unmapped regions */
#define ENOIRQ 137 /* Irq cannot be registered */
#define EABORT 138 /* Abort cannot be handled */
#define ENOCHILD 139 /* Task is not paged by caller */
#endif /* __ERRNO_H__ */

View File

@@ -0,0 +1,50 @@
/*
* Exchange registers system call data.
*
* Copyright (C) 2008 Bahadir Balban
*/
#ifndef __EXREGS_H__
#define __EXREGS_H__
#include L4LIB_INC_GLUE(syscall.h)
#include L4LIB_INC_GLUE(context.h)
#include <l4lib/types.h>
#define EXREGS_SET_PAGER 1
#define EXREGS_SET_UTCB 2
#define EXREGS_READ 4
#define EXREGS_VALID_REGULAR_REGS \
(FIELD_TO_BIT(exregs_context_t, r0) | \
FIELD_TO_BIT(exregs_context_t, r1) | \
FIELD_TO_BIT(exregs_context_t, r2) | \
FIELD_TO_BIT(exregs_context_t, r3) | \
FIELD_TO_BIT(exregs_context_t, r4) | \
FIELD_TO_BIT(exregs_context_t, r5) | \
FIELD_TO_BIT(exregs_context_t, r6) | \
FIELD_TO_BIT(exregs_context_t, r7) | \
FIELD_TO_BIT(exregs_context_t, r8) | \
FIELD_TO_BIT(exregs_context_t, r9) | \
FIELD_TO_BIT(exregs_context_t, r10) | \
FIELD_TO_BIT(exregs_context_t, r11) | \
FIELD_TO_BIT(exregs_context_t, r12) | \
FIELD_TO_BIT(exregs_context_t, lr)) \
#define EXREGS_VALID_SP \
FIELD_TO_BIT(exregs_context_t, sp) \
#define EXREGS_VALID_PC \
FIELD_TO_BIT(exregs_context_t, pc) \
/* Structure passed by userspace pagers for exchanging registers */
struct exregs_data {
exregs_context_t context;
u32 valid_vect;
u32 flags;
l4id_t pagerid;
unsigned long utcb_address;
};
#endif /* __EXREGS_H__ */

View File

@@ -0,0 +1,27 @@
#ifndef __IPC_H__
#define __IPC_H__
#define L4_NILTHREAD 0xFFFFFFFF
#define L4_ANYTHREAD 0xFFFFFFFE
#define L4_IPC_TAG_MR_OFFSET 0
/* Pagefault */
#define L4_IPC_TAG_PFAULT 0
#define L4_IPC_TAG_UNDEF_FAULT 1
#define L4_IPC_FLAGS_TYPE_MASK 0x0000000F
#define L4_IPC_FLAGS_SHORT 0x00000000 /* Short IPC involves just primary message registers */
#define L4_IPC_FLAGS_FULL 0x00000001 /* Full IPC involves full UTCB copy */
#define L4_IPC_FLAGS_EXTENDED 0x00000002 /* Extended IPC can page-fault and copy up to 2KB */
/* Extended IPC extra fields */
#define L4_IPC_FLAGS_MSG_INDEX_MASK 0x00000FF0 /* Index of message register with buffer pointer */
#define L4_IPC_FLAGS_SIZE_MASK 0x0FFF0000
#define L4_IPC_FLAGS_SIZE_SHIFT 16
#define L4_IPC_FLAGS_MSG_INDEX_SHIFT 4
#define L4_IPC_EXTENDED_MAX_SIZE (SZ_1K*2)
#endif /* __IPC_H__ */

View File

@@ -0,0 +1,10 @@
#ifndef __API_IRQ_H__
#define __API_IRQ_H__
#define IRQ_CONTROL_REGISTER 0
#define IRQ_CONTROL_RELEASE 1
#define IRQ_CONTROL_WAIT 2
#endif /* __API_IRQ_H__ */

View File

@@ -0,0 +1,82 @@
/*
* Kernel Interface Page
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __KIP_H__
#define __KIP_H__
#include <l4lib/types.h>
#define __YEAR__ ((((__DATE__ [7] - '0') * 10 + (__DATE__ [8] - '0')) * 10 \
+ (__DATE__ [9] - '0')) * 10 + (__DATE__ [10] - '0'))
#define __MONTH__ (__DATE__ [2] == 'n' ? (__DATE__ [1] == 'a' ? 0 : 5) \
: __DATE__ [2] == 'b' ? 1 \
: __DATE__ [2] == 'r' ? (__DATE__ [0] == 'M' ? 2 : 3) \
: __DATE__ [2] == 'y' ? 4 \
: __DATE__ [2] == 'l' ? 6 \
: __DATE__ [2] == 'g' ? 7 \
: __DATE__ [2] == 'p' ? 8 \
: __DATE__ [2] == 't' ? 9 \
: __DATE__ [2] == 'v' ? 10 : 11)
#define __DAY__ ((__DATE__ [4] == ' ' ? 0 : __DATE__ [4] - '0') * 10 \
+ (__DATE__ [5] - '0'))
#define CODEZERO_VERSION 0
#define CODEZERO_SUBVERSION 2
#define KDESC_DATE_SIZE 12
#define KDESC_TIME_SIZE 9
struct kernel_descriptor {
u32 version;
u32 subversion;
u32 magic;
char date[KDESC_DATE_SIZE];
char time[KDESC_TIME_SIZE];
} __attribute__((__packed__));
/* Experimental KIP with non-standard offsets */
struct kip {
/* System descriptions */
u32 magic;
u16 version_rsrv;
u8 api_subversion;
u8 api_version;
u32 api_flags;
u32 container_control;
u32 time;
u32 irq_control;
u32 thread_control;
u32 ipc_control;
u32 map;
u32 ipc;
u32 capability_control;
u32 unmap;
u32 exchange_registers;
u32 thread_switch;
u32 schedule;
u32 getid;
u32 mutex_control;
u32 cache_control;
u32 arch_syscall0;
u32 arch_syscall1;
u32 arch_syscall2;
u32 utcb;
struct kernel_descriptor kdesc;
} __attribute__((__packed__));
#if defined (__KERNEL__)
extern struct kip kip;
#endif /* __KERNEL__ */
#endif /* __KIP_H__ */

View File

@@ -0,0 +1,60 @@
#ifndef __MUTEX_CONTROL_H__
#define __MUTEX_CONTROL_H__
#if !defined(__LINUX_CONTAINER__)
/* Request ids for mutex_control syscall */
#if defined (__KERNEL__)
#define MUTEX_CONTROL_LOCK L4_MUTEX_LOCK
#define MUTEX_CONTROL_UNLOCK L4_MUTEX_UNLOCK
#define MUTEX_CONTROL_OPMASK L4_MUTEX_OPMASK
#define mutex_operation(x) ((x) & MUTEX_CONTROL_OPMASK)
#define mutex_contenders(x) ((x) & ~MUTEX_CONTROL_OPMASK)
#include <l4lib/lib/wait.h>
#include <l4lib/lib/list.h>
#include <l4lib/lib/mutex.h>
/*
* Contender threashold is the total number of contenders
* who are expected to sleep on the mutex, and will be waited
* for a wakeup.
*/
struct mutex_queue {
int contenders;
unsigned long physical;
struct link list;
struct waitqueue_head wqh_contenders;
struct waitqueue_head wqh_holders;
};
/*
* Mutex queue head keeps the list of all userspace mutexes.
*
* Here, mutex_control_mutex is a single lock for:
* (1) Mutex_queue create/deletion
* (2) List add/removal.
* (3) Wait synchronization:
* - Both waitqueue spinlocks need to be acquired for
* rendezvous inspection to occur atomically. Currently
* it's not done since we rely on this mutex for that.
*/
struct mutex_queue_head {
struct link list;
struct mutex mutex_control_mutex;
int count;
};
void init_mutex_queue_head(struct mutex_queue_head *mqhead);
#endif
#define L4_MUTEX_OPMASK 0xF0000000
#define L4_MUTEX_LOCK 0x10000000
#define L4_MUTEX_UNLOCK 0x20000000
#endif /* __LINUX_CONTAINER__ */
#endif /* __MUTEX_CONTROL_H__*/

View File

@@ -0,0 +1,5 @@
#ifndef __API_SPACE_H__
#define __API_SPACE_H__
#endif /* __API_SPACE_H__ */

View File

@@ -0,0 +1,25 @@
#ifndef __API_THREAD_H__
#define __API_THREAD_H__
#define THREAD_ACTION_MASK 0xF0000000
#define THREAD_CREATE 0x00000000
#define THREAD_RUN 0x10000000
#define THREAD_SUSPEND 0x20000000
#define THREAD_DESTROY 0x30000000
#define THREAD_RECYCLE 0x40000000
#define THREAD_WAIT 0x50000000
#define THREAD_SHARE_MASK 0x00F00000
#define THREAD_SPACE_MASK 0x0F000000
#define THREAD_CREATE_MASK (THREAD_SHARE_MASK | THREAD_SPACE_MASK)
#define TC_SHARE_CAPS 0x00100000 /* Share all thread capabilities */
#define TC_SHARE_UTCB 0x00200000 /* Share utcb location (same space */
#define TC_SHARE_GROUP 0x00400000 /* Share thread group id */
#define TC_SHARE_SPACE 0x01000000 /* New thread, use given space */
#define TC_COPY_SPACE 0x02000000 /* New thread, copy given space */
#define TC_NEW_SPACE 0x04000000 /* New thread, new space */
/* #define THREAD_USER_MASK 0x000F0000 Reserved for userspace */
#define THREAD_EXIT_MASK 0x0000FFFF /* Thread exit code */
#endif /* __API_THREAD_H__ */

View File

@@ -0,0 +1,73 @@
/*
* Common definitions for exceptions
* across ARM sub-architectures.
*
* Copyright (C) 2010 B Labs Ltd.
*/
#ifndef __EXCEPTION_H__
#define __EXCEPTION_H__
//#include _INC_SUBARCH(exception.h)
#include L4LIB_INC_ARCH(asm.h)
/* Abort debugging conditions */
// #define DEBUG_ABORTS
#if defined (DEBUG_ABORTS)
#define dbg_abort(...) printk(__VA_ARGS__)
#else
#define dbg_abort(...)
#endif
/* Codezero-specific abort type */
#define ABORT_TYPE_PREFETCH 1
#define ABORT_TYPE_DATA 0
/* If abort is handled and resolved in check_aborts */
#define ABORT_HANDLED 1
/* Codezero makes use of bit 8 (Always Zero) of FSR to define which type of abort */
#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
/* Kernel's data about the fault */
typedef struct fault_kdata {
u32 faulty_pc; /* In DABT: Aborting PC, In PABT: Same as FAR */
u32 fsr; /* In DABT: DFSR, In PABT: IFSR */
u32 far; /* In DABT: DFAR, in PABT: IFAR */
pte_t pte; /* Faulty page table entry */
} __attribute__ ((__packed__)) fault_kdata_t;
/* This is filled on entry to irq handler, only if a process was interrupted.*/
extern unsigned int preempted_psr;
/* Implementing these as functions cause circular include dependency for tcb.h */
#define TASK_IN_KERNEL(tcb) (((tcb)->context.spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
#define TASK_IN_USER(tcb) (!TASK_IN_KERNEL(tcb))
static inline int is_user_mode(u32 spsr)
{
return ((spsr & ARM_MODE_MASK) == ARM_MODE_USR);
}
static inline int in_kernel()
{
return (((preempted_psr & ARM_MODE_MASK) == ARM_MODE_SVC)) ? 1 : 0;
}
static inline int in_user()
{
return !in_kernel();
}
int pager_pagein_request(unsigned long vaddr, unsigned long size,
unsigned int flags);
int fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag);
int is_kernel_abort(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
#endif /* __EXCEPTION_H__ */

View File

@@ -0,0 +1,25 @@
#ifndef __ARM_IO_H__
#define __ARM_IO_H__
/*
* Arch-specific io functions/macros.
*
* Copyright (C) 2007 Bahadir Balban
*/
#if defined (__KERNEL__) && !defined (__LINUX_CONTAINER__)
#include INC_GLUE(memlayout.h)
#define read(address) *((volatile unsigned int *) (address))
#define write(val, address) *((volatile unsigned int *) (address)) = val
#endif /* ends __KERNEL__ */
/*
* Generic uart virtual address until a file-based console access
* is available for userspace
*/
#define USERSPACE_CONSOLE_VBASE 0xF9800000
#endif /* __ARM_IO_H__ */

View File

@@ -0,0 +1,29 @@
#ifndef __ARM_IRQ_H__
#define __ARM_IRQ_H__
#include INC_SUBARCH(irq.h)
void irq_local_restore(unsigned long state);
void irq_local_disable_save(unsigned long *state);
int irqs_enabled();
static inline void irq_local_enable()
{
enable_irqs();
}
static inline void irq_local_disable()
{
disable_irqs();
}
/*
* Destructive atomic-read.
*
* Write 0 to byte at @location as its contents are read back.
*/
char l4_atomic_dest_readb(void *location);
#endif /* __ARM_IRQ_H__ */

View File

@@ -0,0 +1,16 @@
/*
* ARM specific low-level mutex interfaces
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARCH_MUTEX_H__
#define __ARCH_MUTEX_H__
/* TODO: The return types could be improved for debug checking */
void __spin_lock(unsigned int *s);
void __spin_unlock(unsigned int *s);
unsigned int __mutex_lock(unsigned int *m);
void __mutex_unlock(unsigned int *m);
#endif /* __ARCH_MUTEX_H__ */

View File

@@ -0,0 +1,137 @@
/*
* ARM v5-specific virtual memory details
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __V5_MM_H__
#define __V5_MM_H__
/* ARM specific definitions */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
#if !defined(__LINUX_CONTAINER__)
#define SECTION_SIZE SZ_1MB
#define SECTION_MASK (SECTION_SIZE - 1)
#endif
#define SECTION_ALIGN_MASK (~SECTION_MASK)
#define SECTION_BITS 20
#define ARM_PAGE_SIZE SZ_4K
#define ARM_PAGE_MASK 0xFFF
#define ARM_PAGE_BITS 12
#define PGD_SIZE SZ_4K * 4
#define PGD_ENTRY_TOTAL SZ_4K
#if !defined(__LINUX_CONTAINER__)
#define PMD_SIZE SZ_1K
#define PMD_TYPE_MASK 0x3
#define PMD_TYPE_FAULT 0
#define PTE_TYPE_MASK 0x3
#define PTE_TYPE_FAULT 0
#define PTE_TYPE_LARGE 1
#define PTE_TYPE_SMALL 2
#endif
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
#define PMD_TYPE_PMD 1
#define PMD_TYPE_SECTION 2
#define PTE_TYPE_TINY 3
/* Permission field offsets */
#define SECTION_AP0 10
/*
* These are indices into arrays with pgd_t or pmd_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) \
& 0x3FFC) / sizeof(pmd_t))
/*
* Strip out the page offset in this
* megabyte from a total of 256 pages.
*/
#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) \
& 0x3FC) / sizeof (pte_t))
/* We need this as print-early.S is including this file */
#ifndef __ASSEMBLY__
#if !defined(__LINUX_CONTAINER__)
/* Type-checkable page table elements */
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pgd_t entries */
typedef struct pgd_table {
pmd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pmd_t entries */
typedef struct pmd_table {
pte_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
extern pgd_table_t init_pgd;
#endif
/* Applies for both small and large pages */
#define PAGE_AP0 4
#define PAGE_AP1 6
#define PAGE_AP2 8
#define PAGE_AP3 10
/* Permission values with rom and sys bits ignored */
#define SVC_RW_USR_NONE 1
#define SVC_RW_USR_RO 2
#define SVC_RW_USR_RW 3
#define PTE_PROT_MASK (0xFF << 4)
#define CACHEABILITY 3
#define BUFFERABILITY 2
#define cacheable (1 << CACHEABILITY)
#define bufferable (1 << BUFFERABILITY)
#define uncacheable 0
#define unbufferable 0
/* Helper macros for common cases */
#define __MAP_USR_RW (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
#define __MAP_USR_RO (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
| (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
| (SVC_RW_USR_RO << PAGE_AP3))
#define __MAP_KERN_RW (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_KERN_IO (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
| (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
| (SVC_RW_USR_NONE << PAGE_AP3))
#define __MAP_USR_IO (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
| (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
| (SVC_RW_USR_RW << PAGE_AP3))
/* There is no execute bit in ARMv5, so we ignore it */
#define __MAP_USR_RWX __MAP_USR_RW
#define __MAP_USR_RX __MAP_USR_RO
#define __MAP_KERN_RWX __MAP_KERN_RW
#define __MAP_KERN_RX __MAP_KERN_RW /* We always have kernel RW */
#define __MAP_FAULT 0
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
unsigned int size, unsigned int flags);
void remove_section_mapping(unsigned long vaddr);
void arch_update_utcb(unsigned long utcb_address);
void system_identify(void);
#endif /* __ASSEMBLY__ */
#endif /* __V5_MM_H__ */

View File

@@ -0,0 +1,42 @@
/*
* ARMv7 specific abort handling definitions
*
* Copyright (C) 2010 B Labs Ltd.
*/
#ifndef __V7_ARCH_EXCEPTION_H__
#define __V7_ARCH_EXCEPTION_H__
/* Data and Prefetch abort encodings */
#define ABORT_TTBW_SYNC_EXTERNAL_LEVEL1 0x0C
#define ABORT_TTBW_SYNC_EXTERNAL_LEVEL2 0x0E
#define ABORT_TTBW_SYNC_PARITY_LEVEL1 0x1C
#define ABORT_TTBW_SYNC_PARITY_LEVEL2 0x1E
#define ABORT_TRANSLATION_SECTION 0x05
#define ABORT_TRANSLATION_PAGE 0x07
#define ABORT_AFLAG_SECTION 0x03
#define ABORT_AFLAG_PAGE 0x06
#define ABORT_DOMAIN_SECTION 0x09
#define ABORT_DOMAIN_PAGE 0x0B
#define ABORT_PERMISSION_SECTION 0x0D
#define ABORT_PERMISSION_PAGE 0x0F
#define ABORT_DEBUG_EVENT 0x02
#define ABORT_SYNC_EXTERNAL 0x08
#define ABORT_SYNC_PARITY 0x19
#define ABORT_ASYNC_PARITY 0x18 /* Only on Data aborts */
#define ABORT_ASYNC_EXTERNAL 0x16 /* Only on Data aborts */
#define ABORT_ICACHE_MAINTENANCE 0x04 /* Only in Data aborts */
#define ABORT_ALIGNMENT 0x01 /* Only in Data aborts */
/* IFSR/DFSR register bits */
#define FSR_FS_BIT4 10 /* 4th bit of fault status */
#define DFSR_WNR_BIT 11 /* Write-not-read bit */
#define FSR_EXT_BIT 12 /* External abort type bit */
#define FSR_FS_MASK 0xF
static inline u32 fsr_get_status(u32 fsr)
{
return (fsr & FSR_FS_MASK) |
(((fsr >> FSR_FS_BIT4) & 1) << 4);
}
#endif /* __V7_ARCH_EXCEPTION_H__ */

View File

@@ -0,0 +1,315 @@
/*
* v7 memory management definitions
*
* Copyright (C) 2010 B Labs Ltd.
* Written by Bahadir Balban
*/
#ifndef __V7_MM_H__
#define __V7_MM_H__
/* Generic definitions used across the kernel */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
/* Non-global first level descriptor definitions */
#define TASK_PGD_SIZE_MAP4GB SZ_16K
#define TASK_PGD_SIZE_MAP2GB SZ_8K
#define TASK_PGD_SIZE_MAP1GB SZ_4K
#define TASK_PGD_SIZE_MAP512MB (SZ_1K * 2)
#define TASK_PGD_SIZE_MAP256MB SZ_1K
#define TASK_PGD_SIZE_MAP128MB 512
#define TASK_PGD_SIZE_MAP64MB 256
#define TASK_PGD_SIZE_MAP32MB 128
/* Any virtual mapping above this value goes to the global table */
#define PGD_GLOBAL_BOUNDARY 0x80000000
/* Task-specific page table, userspace private + shared memory mappings */
#define PGD_ENTRY_TOTAL (TASK_PGD_SIZE_MAP2GB >> 2)
#define PGD_SIZE (TASK_PGD_SIZE_MAP2GB)
/* Global page table size UTCB + kernel + device mappings */
#define PGD_GLOBAL_SIZE SZ_16K
#define PGD_GLOBAL_ENTRY_TOTAL (PGD_GLOBAL_SIZE >> 2)
#if !defined(__LINUX_CONTAINER__)
#define PMD_SIZE SZ_1K
#endif
#define PMD_ENTRY_TOTAL 256
#define PMD_MAP_SIZE SZ_1MB
/* FIXME: Check these shifts/masks are correct */
#define PGD_INDEX_MASK 0x3FFC
#define PGD_INDEX_SHIFT 18
#define PMD_INDEX_MASK 0x3FC
#define PMD_INDEX_SHIFT 10
/*
* These are indices into arrays with pmd_t or pte_t sized elements,
* therefore the index must be divided by appropriate element size
*/
#define PGD_INDEX(x) (((((unsigned long)(x)) >> PGD_INDEX_SHIFT) \
& PGD_INDEX_MASK) / sizeof(pmd_t))
/* Strip out the page offset in this megabyte from a total of 256 pages. */
#define PMD_INDEX(x) (((((unsigned long)(x)) >> PMD_INDEX_SHIFT) \
& PMD_INDEX_MASK) / sizeof (pte_t))
#if !defined (__ASSEMBLY__) && !defined (__LINUX_CONTAINER__)
/* Type-checkable page table elements */
typedef u32 pmd_t;
typedef u32 pte_t;
/* Page global directory made up of pmd_t entries */
typedef struct page_table_directory {
pmd_t entry[PGD_GLOBAL_ENTRY_TOTAL];
} pgd_global_table_t;
/* Page non-global directory */
typedef struct task_page_table_directory {
pmd_t entry[PGD_ENTRY_TOTAL];
} pgd_table_t;
/* Page middle directory made up of pte_t entries */
typedef struct pmd_table {
pte_t entry[PMD_ENTRY_TOTAL];
} pmd_table_t;
extern pgd_table_t init_pgd;
extern pgd_global_table_t init_global_pgd;
#endif /* !defined(__ASSEMBLY__) */
/* PMD definitions (2nd level page tables) */
#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
#define PMD_TYPE_FAULT 0x0
#define PMD_TYPE_PMD 0x1
#define PMD_TYPE_SECTION 0x2
#define PMD_TYPE_MASK 0x3
#define PMD_DOMAIN_SHIFT 5 /* Domain field on PGD entry */
#define PMD_DOMAIN_MASK 0x000001E0 /* Domain mask on PGD entry */
#define PMD_NS_BIT 3 /* Non-secure memory */
/* First level Section definitions */
#define SECT_MAP_SIZE SZ_1MB /* Section base address alignment */
#define SECT_NS_BIT 19
#define SECT_SUPER_BIT 18
#define SECT_NG_BIT 17
#define SECT_SHAREABLE_BIT 16
#define SECT_AP2_BIT 15
#define SECT_TEX2_BIT 14
#define SECT_TEX1_BIT 13
#define SECT_TEX0_BIT 12
#define SECT_AP1_BIT 11
#define SECT_AP0_BIT 10
#define SECT_DOMAIN_SHIFT 5
#define SECT_XN_BIT 4
#define SECT_CACHE_BIT 3
#define SECT_BUFFER_BIT 2
#if !defined (__LINUX_CONTAINER__)
/* Second level entry (PTE) definitions */
#define PTE_TYPE_MASK 0x2
#define PTE_TYPE_FAULT 0
#define PTE_TYPE_LARGE 1
#define PTE_TYPE_SMALL 2
#endif
#define PTE_XN_BIT 0
#define PTE_BUFFER_BIT 2
#define PTE_CACHE_BIT 3
#define PTE_AP0_BIT 4
#define PTE_AP1_BIT 5
#define PTE_TEX0_BIT 6
#define PTE_TEX1_BIT 7
#define PTE_TEX2_BIT 8
#define PTE_AP2_BIT 9
#define PTE_AP01_SHIFT PTE_AP0_BIT
#define PTE_AP01_MASK 0x30
#define PTE_SHARE_BIT 10
#define PTE_NG_BIT 11
/* Domain access types */
#define DOMAIN_ACCESS_NONE 0
#define DOMAIN_ACCESS_CLIENT 1
#define DOMAIN_ACCESS_MANAGER 3
/* Simplified permission model definitions */
#define PTE_ACCESS_FLAG PTE_AP0_BIT
/* Bits [1:0] map as AP[2], AP[1] */
#define AP_SIMPLE_USER_NONE_KERN_RW 0
#define AP_SIMPLE_USER_RW_KERN_RW 1
#define AP_SIMPLE_USER_NONE_KERN_RO 2
#define AP_SIMPLE_USER_RO_KERN_RO 3
/*
* Generic page table flag meanings for v7:
*
* Note these are not hardware-defined bits,
* they are defined by the kernel for
* convenience.
*
* [WXCDU]
* W = write, X = Exec, C = Cached, D = Device
*
* If !D it means Normal memory.
* If !U it means kernel-only.
* If !W it means read-only.
*
* These are actually meaningful but unused
* individually, rather the combination of them
* are directly converted into HW pte.
*/
#define PTE_MAP_USER (1 << 0)
#define PTE_MAP_DEVICE (1 << 1)
#define PTE_MAP_CACHED (1 << 2)
#define PTE_MAP_EXEC (1 << 3)
#define PTE_MAP_WRITE (1 << 4)
/* 0 would mean normal, uncached, kernel mapping */
#define PTE_MAP_FAULT (1 << 5)
/*
* v7-specific conversion of map flags
*/
/* In ARMv7 normal, wbwa, shareable, user-rw/kern-rw, xn=1 */
#define __MAP_USR_RW (PTE_MAP_USER | PTE_MAP_WRITE | PTE_MAP_CACHED)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-ro/kern-ro, xn=1 */
#define __MAP_USR_RO (PTE_MAP_USER | PTE_MAP_CACHED)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-none/kern-rw, xn=1 */
#define __MAP_KERN_RW (PTE_MAP_CACHED | PTE_MAP_WRITE)
/* Uncached. In ARMv7 device, uncached, shareable, user-rw/kern-rw, xn=1 */
#define __MAP_USR_IO (PTE_MAP_USER | PTE_MAP_DEVICE | PTE_MAP_WRITE)
/* Uncached. In ARMv7 device, uncached, shareable, user-none/kern-rw, xn=1 */
#define __MAP_KERN_IO (PTE_MAP_DEVICE | PTE_MAP_WRITE)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-rw/kern-rw, xn=0 */
#define __MAP_USR_RWX (PTE_MAP_USER | PTE_MAP_CACHED \
| PTE_MAP_WRITE | PTE_MAP_EXEC)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-none/kern-rw, xn=0 */
#define __MAP_KERN_RWX (PTE_MAP_CACHED | PTE_MAP_WRITE | PTE_MAP_EXEC)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-ro/kern-ro, xn=0 */
#define __MAP_USR_RX (PTE_MAP_USER | PTE_MAP_CACHED | PTE_MAP_EXEC)
/* Writeback cached. In ARMv7 normal, wbwa, shareable, user-none/kern-ro, xn=0 */
#define __MAP_KERN_RX (PTE_MAP_CACHED | PTE_MAP_EXEC)
/* Fault/unmapped entry */
#define __MAP_FAULT PTE_MAP_FAULT
/*
* Shareability bit remapping on tex remap
*
* As an example to below, when a normal region has its
* shareability bit set to 1, PRRR_NORMAL_S1_BIT remaps
* and determines the final shareability status. E.g. if
* PRRR_NORMAL_S1_BIT is set to 0, the region becomes
* not shareable, even though the pte S bit == 1.
* On Tex Remap, PRRR is the final decision point.
*/
#define PRRR_DEVICE_S0_BIT 16 /* Meaning of all device memory when S == 0 */
#define PRRR_DEVICE_S1_BIT 17 /* Meaning of all device memory when S == 1 */
#define PRRR_NORMAL_S0_BIT 18 /* Meaning of all normal memory when S == 0 */
#define PRRR_NORMAL_S1_BIT 19 /* Meaning of all normal memory when S == 1 */
#define PRRR_NOS_START_BIT 24
#define NMRR_OUTER_START_BIT 16
#define CACHEABLE_NONE 0
#define CACHEABLE_WBWA 1
#define CACHEABLE_WT_NOWA 2
#define CACHEABLE_WB_NOWA 3
/* Memory type values for tex remap registers */
#define MEMTYPE_ST_ORDERED 0
#define MEMTYPE_DEVICE 1
#define MEMTYPE_NORMAL 2
/* User-defined tex remap slots */
#define TEX_SLOT_NORMAL_UNCACHED 0
#define TEX_SLOT_NORMAL 1
#define TEX_SLOT_DEVICE_UNCACHED 2
#define TEX_SLOT_ST_ORDERED_UNCACHED 3
#define ASID_MASK 0xFF
#define ASID_GROUP_SHIFT 8
#define PROCID_SHIFT 8
#define PROCID_MASK 0xFFFFFF
#define TASK_ASID(x) ((x)->space->spid & ASID_MASK)
#define SPACE_ASID(x) ((x)->spid & ASID_MASK)
#define TASK_PROCID(x) ((x)->tid & PROCID_MASK)
#define PGD_GLOBAL_GET() (kernel_resources.pgd_global)
/*
*
* Page table memory settings for translation table walk hardware:
*
* We assume write-back write-allocate, inner and outer
* cacheable, inner shareable, not outer-shareable,
* normal memory.
*
* ARMv7 VMSA (B3-114) says that the obscure IRGN[1:0]
* mapping ensures same bit values for SMP and v7 base architecture,
* however this is only partially true as seen by the WBWA bit
* mapping differences.
*
* RGN values:
* 00 Uncached
* 01 WBWA
* 10 WT
* 11 WB_NOWA
*
* On below definitions both inner and outer cacheability bits
* are assigned with the same cacheability values.
*/
/* I
* I R
* R R R G
* G N G G I N
* N O N N M 1
* 0 S 1|0 P S C */
#define PGD_MEMORY_NORMAL_WBWA_S_NOS 0x2B /* 00 1 0|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WBWA_S_NOS_SMP 0x6A /* 01 1 0|1 0 1 0 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_NOS 0x3B /* 00 1 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_NOS_SMP 0x7B /* 01 1 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_OS 0x1B /* 00 0 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WB_NOWA_S_OS_SMP 0x5B /* 01 0 1|1 0 1 1 */
#define PGD_MEMORY_NORMAL_UNCACHED_S_NOS 0x22 /* 00 1 0|0 0 1 0 */
#define PGD_MEMORY_NORMAL_UNCACHED_S_NOS_SMP 0x22 /* 00 1 0|0 0 1 0 */
#define PGD_MEMORY_NORMAL_WBWA_S_OS 0x0B /* 00 0 0|1 0 1 1 */
#define PGD_MEMORY_NORMAL_WBWA_S_OS_SMP 0x4A /* 01 0 0|1 0 1 0 */
/* Returns page table memory settings for ttb walk fetches */
unsigned int ttb_walk_mem_settings(void);
#if !defined (__ASSEMBLY__)
void v7_flags_prepare_pte(pte_t *pte, unsigned long phys,
unsigned long virt, unsigned int v7_pte_flags);
void section_set_access_simple(pmd_t *pmd, unsigned int perms);
void section_set_tex_remap_slot(pmd_t *pmd, int slot);
void v7_write_section(unsigned long paddr, unsigned long vaddr,
unsigned int section_flags, unsigned int asid);
int pte_get_access_simple(pte_t pte);
void tex_remap_setup_all_slots(void);
struct ktcb;
void arch_update_utcb(unsigned long utcb_address);
void arch_space_switch(struct ktcb *to);
void system_identify(void);
#endif /* !defined(__ASSEMBLY__) */
#endif /* __V7_MM_H__ */

View File

@@ -0,0 +1,148 @@
/*
* Types of capabilities and their operations
*
* Copyright (C) 2009 Bahadir Balban
*/
#ifndef __CAP_TYPES_H__
#define __CAP_TYPES_H__
/*
* Capability types
*/
#define CAP_TYPE_MASK 0x0000FFFF
#define CAP_TYPE_TCTRL (1 << 0)
#define CAP_TYPE_EXREGS (1 << 1)
#define CAP_TYPE_MAP_PHYSMEM (1 << 2)
#define CAP_TYPE_MAP_VIRTMEM (1 << 3)
#define CAP_TYPE_IPC (1 << 4)
#define CAP_TYPE_IRQCTRL (1 << 5)
#define CAP_TYPE_UMUTEX (1 << 6)
#define CAP_TYPE_QUANTITY (1 << 7)
#define CAP_TYPE_CAP (1 << 8)
#define cap_type(c) ((c)->type & CAP_TYPE_MASK)
/*
* Resource types
*/
#define CAP_RTYPE_MASK 0xFFFF0000
#define CAP_RTYPE_THREAD (1 << 16)
#define CAP_RTYPE_SPACE (1 << 17)
#define CAP_RTYPE_CONTAINER (1 << 18)
#define CAP_RTYPE_CPUPOOL (1 << 19)
#define CAP_RTYPE_THREADPOOL (1 << 20)
#define CAP_RTYPE_SPACEPOOL (1 << 21)
#define CAP_RTYPE_MUTEXPOOL (1 << 22)
#define CAP_RTYPE_MAPPOOL (1 << 23) /* For pmd spending */
#define CAP_RTYPE_CAPPOOL (1 << 24) /* For new cap generation */
#define cap_rtype(c) ((c)->type & CAP_RTYPE_MASK)
#define cap_set_rtype(c, rtype) \
{(c)->type &= ~CAP_RTYPE_MASK; \
(c)->type |= CAP_RTYPE_MASK & rtype;}
/*
* User-defined device-types
* (Kept in the user field)
*/
#define CAP_DEVTYPE_TIMER 1
#define CAP_DEVTYPE_UART 2
#define CAP_DEVTYPE_KEYBOARD 3
#define CAP_DEVTYPE_MOUSE 4
#define CAP_DEVTYPE_CLCD 5
#define CAP_DEVTYPE_OTHER 0xF
#define CAP_DEVTYPE_MASK 0xFFFF
#define CAP_DEVNUM_MASK 0xFFFF0000
#define CAP_DEVNUM_SHIFT 16
#define cap_is_devmem(c) ((c)->attr)
#define cap_set_devtype(c, devtype) \
{(c)->attr &= ~CAP_DEVTYPE_MASK; \
(c)->attr |= CAP_DEVTYPE_MASK & devtype;}
#define cap_set_devnum(c, devnum) \
{(c)->attr &= ~CAP_DEVNUM_MASK; \
(c)->attr |= CAP_DEVNUM_MASK & (devnum << CAP_DEVNUM_SHIFT);}
#define cap_devnum(c) \
(((c)->attr & CAP_DEVNUM_MASK) >> CAP_DEVNUM_SHIFT)
#define cap_devtype(c) ((c)->attr & CAP_DEVTYPE_MASK)
/*
* Access permissions
*/
/* Generic permissions */
#define CAP_CHANGEABLE (1 << 28) /* Can modify contents */
#define CAP_TRANSFERABLE (1 << 29) /* Can grant or share it */
#define CAP_REPLICABLE (1 << 30) /* Can create copies */
#define CAP_GENERIC_MASK 0xF0000000
#define CAP_IMMUTABLE 0
#define cap_generic_perms(c) \
((c)->access & CAP_GENERIC_MASK)
/* Thread control capability */
#define CAP_TCTRL_CREATE (1 << 0)
#define CAP_TCTRL_DESTROY (1 << 1)
#define CAP_TCTRL_RUN (1 << 2)
#define CAP_TCTRL_SUSPEND (1 << 3)
#define CAP_TCTRL_RECYCLE (1 << 4)
#define CAP_TCTRL_WAIT (1 << 5)
/* Exchange registers capability */
#define CAP_EXREGS_RW_PAGER (1 << 0)
#define CAP_EXREGS_RW_UTCB (1 << 1)
#define CAP_EXREGS_RW_SP (1 << 2)
#define CAP_EXREGS_RW_PC (1 << 3)
#define CAP_EXREGS_RW_REGS (1 << 4) /* Other regular regs */
#define CAP_EXREGS_RW_CPU (1 << 5)
#define CAP_EXREGS_RW_CPUTIME (1 << 6)
/* Map capability */
#define CAP_MAP_READ (1 << 0)
#define CAP_MAP_WRITE (1 << 1)
#define CAP_MAP_EXEC (1 << 2)
#define CAP_MAP_CACHED (1 << 3)
#define CAP_MAP_UNCACHED (1 << 4)
#define CAP_MAP_UNMAP (1 << 5)
#define CAP_MAP_UTCB (1 << 6)
/* Cache operations, applicable to (virtual) memory regions */
#define CAP_CACHE_INVALIDATE (1 << 7)
#define CAP_CACHE_CLEAN (1 << 8)
/*
* IRQ Control capability
*/
#define CAP_IRQCTRL_WAIT (1 << 8)
/*
* This is a common one and it applies to both
* CAP_TYPE_IRQCTRL and CAP_TYPE_MAP_PHYSMEM
*/
#define CAP_IRQCTRL_REGISTER (1 << 7)
/* Ipc capability */
#define CAP_IPC_SEND (1 << 0)
#define CAP_IPC_RECV (1 << 1)
#define CAP_IPC_SHORT (1 << 2)
#define CAP_IPC_FULL (1 << 3)
#define CAP_IPC_EXTENDED (1 << 4)
#define CAP_IPC_ASYNC (1 << 5)
/* Userspace mutex capability */
#define CAP_UMUTEX_LOCK (1 << 0)
#define CAP_UMUTEX_UNLOCK (1 << 1)
/* Capability control capability */
#define CAP_CAP_GRANT (1 << 0)
#define CAP_CAP_READ (1 << 1)
#define CAP_CAP_SHARE (1 << 2)
#define CAP_CAP_REPLICATE (1 << 3)
#define CAP_CAP_SPLIT (1 << 4)
#define CAP_CAP_DEDUCE (1 << 5)
#define CAP_CAP_DESTROY (1 << 6)
#define CAP_CAP_MODIFY (CAP_CAP_DEDUCE | CAP_CAP_SPLIT \
| CAP_CAP_DESTROY)
#endif /* __CAP_TYPES_H__ */

View File

@@ -0,0 +1,19 @@
/*
* Kernel preemption functions.
*/
#ifndef __PREEMPT_H__
#define __PREEMPT_H__
#if !defined(__LINUX_CONTAINER__)
void preempt_enable(void);
void preempt_disable(void);
int preemptive(void);
int preempt_count(void);
int in_nested_irq_context(void);
int in_irq_context(void);
int in_task_context(void);
#endif /* __LINUX_CONTAINER__ */
#endif /* __PREEMPT_H__ */

View File

@@ -0,0 +1,30 @@
/*
* Generic address space related information.
*
* Copyright (C) 2007-2010 Bahadir Balban
*/
#ifndef __SPACE_H__
#define __SPACE_H__
/*
* Generic mapping flags.
*/
#define MAP_FAULT 0
#define MAP_USR_RW 1
#define MAP_USR_RO 2
#define MAP_KERN_RW 3
#define MAP_USR_IO 4
#define MAP_KERN_IO 5
#define MAP_USR_RWX 6
#define MAP_KERN_RWX 7
#define MAP_USR_RX 8
#define MAP_KERN_RX 9
#define MAP_UNMAP 10 /* For unmap syscall */
#define MAP_INVALID_FLAGS (1 << 31)
/* Some default aliases */
#define MAP_USR_DEFAULT MAP_USR_RW
#define MAP_KERN_DEFAULT MAP_KERN_RW
#define MAP_IO_DEFAULT MAP_KERN_IO
#endif /* __SPACE_H__ */

View File

@@ -0,0 +1,43 @@
/*
* Thread Control Block, kernel portion.
*
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#ifndef __TCB_H__
#define __TCB_H__
/*
* These are a mixture of flags that indicate the task is
* in a transitional state that could include one or more
* scheduling states.
*/
#define TASK_INTERRUPTED (1 << 0)
#define TASK_SUSPENDING (1 << 1)
#define TASK_RESUMING (1 << 2)
#define TASK_PENDING_SIGNAL (TASK_SUSPENDING)
#define TASK_REALTIME (1 << 5)
/*
* This is to indicate a task (either current or one of
* its children) exit has occured and cleanup needs to be
* called
*/
#define TASK_EXITED (1 << 3)
/* Task states */
enum task_state {
TASK_INACTIVE = 0,
TASK_SLEEPING = 1,
TASK_RUNNABLE = 2,
};
#define TASK_CID_MASK 0xFF000000
#define TASK_ID_MASK 0x00FFFFFF
#define TASK_CID_SHIFT 24
/* Values that rather have special meaning instead of an id value */
#define TASK_ID_INVALID 0xFFFFFFFF
#endif /* __TCB_H__ */

View File

@@ -0,0 +1,26 @@
/*
* Generic cache api calls
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#ifndef __GLUE_CACHE_H__
#define __GLUE_CACHE_H__
//#include INC_SUBARCH(mmu_ops.h)
/* Lowest byte is reserved for and used by capability permissions */
#define ARCH_INVALIDATE_ICACHE 0x10
#define ARCH_INVALIDATE_DCACHE 0x20
#define ARCH_CLEAN_DCACHE 0x30
#define ARCH_CLEAN_INVALIDATE_DCACHE 0x40
#define ARCH_INVALIDATE_TLB 0x50
void arch_invalidate_dcache(unsigned long start, unsigned long end);
void arch_clean_invalidate_dcache(unsigned long start, unsigned long end);
void arch_invalidate_icache(unsigned long start, unsigned long end);
void arch_invalidate_tlb(unsigned long start, unsigned long end);
void arch_clean_dcache(unsigned long start, unsigned long end);
#endif /* __GLUE_CACHE_H__ */

View File

@@ -0,0 +1,53 @@
#ifndef __ARM_CONTEXT_H__
#define __ARM_CONTEXT_H__
#include <l4lib/types.h>
/*
* This describes the register context of each task. Simply set
* them and they'll be copied onto real registers upon a context
* switch to that task. exchange_registers() system call is
* designed for this, whose input structure is defined further
* below.
*/
typedef struct arm_context {
u32 spsr; /* 0x0 */
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) task_context_t;
typedef struct arm_exregs_context {
u32 r0; /* 0x4 */
u32 r1; /* 0x8 */
u32 r2; /* 0xC */
u32 r3; /* 0x10 */
u32 r4; /* 0x14 */
u32 r5; /* 0x18 */
u32 r6; /* 0x1C */
u32 r7; /* 0x20 */
u32 r8; /* 0x24 */
u32 r9; /* 0x28 */
u32 r10; /* 0x2C */
u32 r11; /* 0x30 */
u32 r12; /* 0x34 */
u32 sp; /* 0x38 */
u32 lr; /* 0x3C */
u32 pc; /* 0x40 */
} __attribute__((__packed__)) exregs_context_t;
#endif /* __ARM_CONTEXT_H__ */

View File

@@ -0,0 +1,61 @@
/*
* Virtual memory layout of ARM systems.
*/
#ifndef __MEMLAYOUT_H__
#define __MEMLAYOUT_H__
#ifndef __ASSEMBLY__
#include L4LIB_INC_GLUE(memory.h)
#endif
#define KERNEL_AREA_START 0xF0000000
#define KERNEL_AREA_END 0xF8000000 /* 128 MB */
#define KERNEL_AREA_SIZE (KERNEL_AREA_END - KERNEL_AREA_START)
#define KERNEL_AREA_SECTIONS (KERNEL_AREA_SIZE / ARM_SECTION_SIZE)
#define UTCB_SIZE (sizeof(int) * 64)
#define IO_AREA_START 0xF9000000
#define IO_AREA_END 0xFF000000
#define IO_AREA_SIZE (IO_AREA_END - IO_AREA_START)
#define IO_AREA_SECTIONS (IO_AREA_SIZE / ARM_SECTION_SIZE)
#define USER_KIP_PAGE 0xFF000000
/* ARM-specific offset in KIP that tells the address of UTCB page */
#define UTCB_KIP_OFFSET 0x50
#define IO_AREA0_VADDR IO_AREA_START
#define IO_AREA1_VADDR (IO_AREA_START + (SZ_1MB*1))
#define IO_AREA2_VADDR (IO_AREA_START + (SZ_1MB*2))
#define IO_AREA3_VADDR (IO_AREA_START + (SZ_1MB*3))
#define IO_AREA4_VADDR (IO_AREA_START + (SZ_1MB*4))
#define IO_AREA5_VADDR (IO_AREA_START + (SZ_1MB*5))
#define IO_AREA6_VADDR (IO_AREA_START + (SZ_1MB*6))
#define IO_AREA7_VADDR (IO_AREA_START + (SZ_1MB*7))
/*
* IO_AREA8_VADDR
* The beginning page in this slot is used for userspace uart mapping
*/
#define ARM_HIGH_VECTOR 0xFFFF0000
#define ARM_SYSCALL_VECTOR 0xFFFFFF00
#if !defined(__LINUX_CONTAINER__)
#define KERNEL_OFFSET (KERNEL_AREA_START - PLATFORM_PHYS_MEM_START)
#endif
/* User tasks define them differently */
#if defined (__KERNEL__) && !defined(__LINUX_CONTAINER__)
#define phys_to_virt(addr) ((unsigned int)(addr) + KERNEL_OFFSET)
#define virt_to_phys(addr) ((unsigned int)(addr) - KERNEL_OFFSET)
#endif
#define KERN_ADDR(x) ((x >= KERNEL_AREA_START) && (x < KERNEL_AREA_END))
#define UTCB_ADDR(x) ((x >= UTCB_AREA_START) && (x < UTCB_AREA_END))
#define is_kernel_address(x) (KERN_ADDR(x) || (x >= ARM_HIGH_VECTOR) || \
(x >= IO_AREA_START && x < IO_AREA_END))
#endif /* __MEMLAYOUT_H__ */

View File

@@ -0,0 +1,85 @@
/*
* Includes memory-related architecture specific definitions and their
* corresponding generic wrappers.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __GLUE_ARM_MEMORY_H__
#define __GLUE_ARM_MEMORY_H__
#include L4LIB_INC_GLUE(memlayout.h) /* Important generic definitions */
#include L4LIB_INC_SUBARCH(mm.h)
/* Generic definitions */
#define PFN_SHIFT 12
#define PAGE_BITS PFN_SHIFT
#if !defined(__LINUX_CONTAINER__)
#define PAGE_SIZE SZ_4K
#define PAGE_MASK (PAGE_SIZE - 1)
#endif
/* Aligns to the upper page (ceiling) FIXME: Must add a wraparound checker. */
#define page_align_up(addr) ((((unsigned long)(addr)) + PAGE_MASK) & \
(~PAGE_MASK))
/* Aligns to the lower page (floor) */
#define page_align(addr) (((unsigned long)(addr)) & \
(~PAGE_MASK))
#define is_aligned(val, size) (!(((unsigned long)(val)) & (((unsigned long)size) - 1)))
#define is_page_aligned(val) (!(((unsigned long)(val)) & PAGE_MASK))
#define page_boundary(x) is_page_aligned(x)
/*
* Align to given size.
*
* Note it must be an alignable size i.e. one that is a power of two.
* E.g. 0x1000 would work but 0x1010 would not.
*/
#define align(addr, size) (((unsigned int)(addr)) & (~((unsigned long)size-1)))
#define align_up(addr, size) ((((unsigned long)(addr)) + \
((size) - 1)) & (~(((unsigned long)size) - 1)))
/* The bytes left until the end of the page that x is in */
#define TILL_PAGE_ENDS(x) (PAGE_SIZE - ((unsigned long)(x) & PAGE_MASK))
/* Extract page frame number from address and vice versa. */
#define __pfn(x) (((unsigned long)(x)) >> PAGE_BITS)
#define __pfn_to_addr(x) (((unsigned long)(x)) << PAGE_BITS)
/* Extract physical address from page table entry (pte) */
#define __pte_to_addr(x) (((unsigned long)(x)) & ~PAGE_MASK)
/* Minimum excess needed for word alignment */
#define SZ_WORD sizeof(unsigned int)
#define WORD_BITS 32
#define WORD_BITS_LOG2 5
#define BITWISE_GETWORD(x) ((x) >> WORD_BITS_LOG2) /* Divide by 32 */
#define BITWISE_GETBIT(x) (1 << ((x) % WORD_BITS))
/* Minimum stack alignment restriction across functions, exceptions */
#define STACK_ALIGNMENT 8
#if !defined(__LINUX_CONTAINER__)
/* Endianness conversion */
static inline void be32_to_cpu(unsigned int x)
{
char *p = (char *)&x;
char tmp;
/* Swap bytes */
tmp = p[0];
p[0] = p[3];
p[3] = tmp;
tmp = p[1];
p[1] = p[2];
p[2] = tmp;
}
struct ktcb;
void task_init_registers(struct ktcb *task, unsigned long pc);
#endif /* !_LINUX_CONTAINER__ */
#endif /* __GLUE_ARM_MEMORY_H__ */

View File

@@ -0,0 +1,95 @@
/*
* Userspace thread control block
*
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#ifndef __GLUE_ARM_MESSAGE_H__
#define __GLUE_ARM_MESSAGE_H__
/*
* Here's a summary of how ARM registers are used during IPC:
*
* System registers:
* r0 - r2: Passed as arguments to ipc() call. They are the registers
* the microkernel will read and they have system-wide meaning.
*
* Primary message registers:
* r3 - r8: These 6 registers are the primary message registers MR0-MR6.
* Their format is application-specific, i.e. the microkernel imposes no
* format restrictions on them.
*
* TODO: The only exception is that, for ANYTHREAD receivers the predefined
* MR_SENDER is touched by the kernel to indicate the sender. This register
* is among the primary MRs and it may be better fit to put it into one of
* the system registers.
*
* l4lib registers: (MR_TAG, MR_SENDER, MR_RETURN)
* Some of the primary message registers are used by the l4lib convenience
* library for operations necessary on most or all common ipcs. For example
* every ipc has a tag that specifies the ipc reason. Also send/receive
* operations require a return value. Threads that are open to receive from
* all threads require the sender id. These values are passed in predefined
* primary message registers, but the microkernel has no knowledge about them.
*
* System call registers: L4SYS_ARG0 to ARG4.(See syslib.h for definitions)
* Finally the rest of the primary message registers are available for
* implementing system call arguments. For example the POSIX services use
* these arguments to pass posix system call information.
*
* Secondary Message Registers:
* These are non-real registers and are present in the UTCB memory region.
* Both real and non-real message registers have a location in the UTCB, but
* non-real ones are copied only if the FULL IPC flag is set.
*
* The big picture:
*
* r0 System register
* r1 System register
* r2 System register
* r3 Primary MR0 MR_RETURN, MR_TAG Present in UTCB, Short IPC
* r4 Primary MR1 MR_SENDER Present in UTCB, Short IPC
* r5 Primary MR2 L4SYS_ARG0 Present in UTCB, Short IPC
* r6 Primary MR3 L4SYS_ARG1 Present in UTCB, Short IPC
* r7 Primary MR4 L4SYS_ARG2 Present in UTCB, Short IPC
* r8 Primary MR5 L4SYS_ARG3 Present in UTCB, Short IPC
* x Secondary MR6 Present in UTCB, Full IPC only
* x Secondary MR64 Present in UTCB, Full IPC only
*
* Complicated for you? Suggest a simpler design and it shall be implemented!
*/
#define MR_REST ((UTCB_SIZE >> 2) - MR_TOTAL - 4) /* -4 is for fields on utcb */
#define MR_TOTAL 6
#define MR_TAG 0 /* Contains the purpose of message */
#define MR_SENDER 1 /* For anythread receivers to discover sender */
#define MR_RETURN 0 /* Contains the posix return value. */
/* These define the mr start - end range that isn't used by userspace syslib */
#define MR_UNUSED_START 2 /* The first mr that's not used by syslib.h */
#define MR_UNUSED_TOTAL (MR_TOTAL - MR_UNUSED_START)
#define MR_USABLE_TOTAL MR_UNUSED_TOTAL
/* These are defined so that we don't hard-code register names */
#define MR0_REGISTER r3
#define MR_RETURN_REGISTER r3
#define TASK_NOTIFY_SLOTS 8
#define TASK_NOTIFY_MAXVALUE 255
/* Primaries aren't used for memcopy. Those ops use this as a parameter */
#define L4_UTCB_FULL_BUFFER_SIZE (MR_REST * sizeof(int))
#include L4LIB_INC_GLUE(memlayout.h)
#if !defined (__ASSEMBLY__)
struct utcb {
u32 mr[MR_TOTAL]; /* MRs that are mapped to real registers */
u32 saved_tag; /* Saved tag field for stacked ipcs */
u32 saved_sender; /* Saved sender field for stacked ipcs */
u8 notify[TASK_NOTIFY_SLOTS]; /* Irq notification slots */
u32 mr_rest[MR_REST]; /* Complete the utcb for up to 64 words */
};
#endif
#endif /* __GLUE_ARM_MESSAGE_H__ */

View File

@@ -0,0 +1,78 @@
/*
* ARM-specific system call details.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARM_GLUE_SYSCALL_H__
#define __ARM_GLUE_SYSCALL_H__
#include <l4lib/types.h>
#include L4LIB_INC_GLUE(message.h)
/* Only specific call is the trap that gives back the kip address
* from which other system calls can be discovered. */
#define L4_TRAP_KIP 0xB4
/* Used in the kernel to refer to virtual address of this page.
* User space discovers it from the KIP */
#define ARM_SYSCALL_PAGE 0xFFFFF000
extern unsigned int __syscall_page_start;
/*
* This structure is saved on the kernel stack
* just after entering a system call exception.
*/
typedef struct syscall_context {
u32 spsr;
u32 r0;
u32 r1;
u32 r2;
u32 r3; /* MR0 */
u32 r4; /* MR1 */
u32 r5; /* MR2 */
u32 r6; /* MR3 */
u32 r7; /* MR4 */
u32 r8; /* MR5 */
u32 r9;
u32 r10;
u32 r11;
u32 r12;
u32 sp_usr;
u32 lr_usr;
} __attribute__((__packed__)) syscall_context_t;
typedef struct msg_regs {
u32 mr0;
u32 mr1;
u32 mr2;
u32 mr3;
u32 mr4;
u32 mr5;
} msg_regs_t;
/* NOTE:
* These references are valid only when they have been explicitly set
* by a kernel entry point, e.g. a system call, a data abort handler
* that imitates a page fault ipc etc.
*
* Second note:
* _If_ these refer to real utcb's in the future, make sure to have
* utcb_map_lazily() check so that they're safe accesses.
*/
#define KTCB_REF_ARG0(ktcb) (&(ktcb)->syscall_regs->r0)
#define KTCB_REF_MR0(ktcb) (&(ktcb)->syscall_regs->MR0_REGISTER)
/* Represents each syscall. We get argument registers
* from stack for now. This is slower but the simplest. */
typedef int (*syscall_fn_t)(struct syscall_context *regs);
/* Entry point for syscall dispatching. Called from asm */
int syscall(struct syscall_context *regs, unsigned long);
/* Syscall-related initialiser called during system init. */
void syscall_init(void);
void kip_init_syscalls(void);
#endif /* __ARM_GLUE_SYSCALL_H__ */

View File

@@ -0,0 +1,131 @@
#ifndef __LIST_H__
#define __LIST_H__
#define L4_DEADWORD 0xDEADCCCC
struct link {
struct link *next;
struct link *prev;
};
static inline void link_init(struct link *l)
{
l->next = l;
l->prev = l;
}
#define LINK_INIT(link) { &(link), &(link) }
#define LINK_DECLARE(l) \
struct link l = LINK_INIT(l)
#if !defined(__LINUX_CONTAINER__)
static inline void list_insert(struct link *new, struct link *list)
{
struct link *next = list->next;
/*
* The new link goes between the
* current and next links on the list e.g.
* list -> new -> next
*/
new->next = next;
next->prev = new;
list->next = new;
new->prev = list;
}
static inline void list_insert_tail(struct link *new, struct link *list)
{
struct link *prev = list->prev;
/*
* The new link goes between the
* current and prev links on the list, e.g.
* prev -> new -> list
*/
new->next = list;
list->prev = new;
new->prev = prev;
prev->next = new;
}
static inline void list_remove(struct link *link)
{
struct link *prev = link->prev;
struct link *next = link->next;
prev->next = next;
next->prev = prev;
link->next = (struct link *)L4_DEADWORD;
link->prev = (struct link *)L4_DEADWORD;
}
static inline void list_remove_init(struct link *link)
{
struct link *prev = link->prev;
struct link *next = link->next;
//BUG_ON(prev == NULL || next == NULL || link == NULL);
prev->next = next;
next->prev = prev;
link->next = link;
link->prev = link;
}
/* Cuts the whole list from head and returns it */
static inline struct link *list_detach(struct link *head)
{
struct link *next = head->next;
/* Detach head from rest of the list */
list_remove_init(head);
/* Return detached list */
return next;
}
/* append new_list to list given by head/end pair */
static inline void list_attach(struct link *new_list, struct link *head, struct link *end)
{
/* attach new list at the end of original list */
end->next = new_list;
new_list->prev = end;
/* go to the end of list to be attached */
while (new_list->next != end->next)
new_list = new_list->next;
/* set end nodes properly */
new_list->next = head;
head->prev = new_list;
/* set end to new end */
end = new_list;
}
static inline int list_empty(struct link *list)
{
return list->prev == list && list->next == list;
}
#define link_to_struct(link, struct_type, link_field) \
container_of(link, struct_type, link_field)
#define list_foreach_struct(struct_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field); \
&struct_ptr->link_field != (link_start); \
struct_ptr = link_to_struct(struct_ptr->link_field.next, typeof(*struct_ptr), link_field))
#define list_foreach_removable_struct(struct_ptr, temp_ptr, link_start, link_field) \
for (struct_ptr = link_to_struct((link_start)->next, typeof(*struct_ptr), link_field), \
temp_ptr = link_to_struct((struct_ptr)->link_field.next, typeof(*struct_ptr), link_field);\
&struct_ptr->link_field != (link_start); \
struct_ptr = temp_ptr, temp_ptr = link_to_struct(temp_ptr->link_field.next, typeof(*temp_ptr), link_field))
#endif /* __LINUX_CONTAINER__ */
#endif /* __LIST_H__ */

View File

@@ -0,0 +1,44 @@
#ifndef __LIB_MATH_H__
#define __LIB_MATH_H__
#if !defined (__LINUX_CONTAINER__)
#if !defined pow
static inline int pow(int val, int exp)
{
int res = 1;
for (int i = 0; i < exp; i++)
res *= val;
return res;
}
#endif
#if !defined min
static inline int min(int x, int y)
{
return x < y ? x : y;
}
static inline int max(int x, int y)
{
return x > y ? x : y;
}
#endif
#endif /* !__LINUX_CONTAINER__ */
/* Tests if ranges a-b intersect with range c-d */
static inline int set_intersection(unsigned long a, unsigned long b,
unsigned long c, unsigned long d)
{
/*
* Below is the complement set (') of the intersection
* of 2 ranges, much simpler ;-)
*/
if (b <= c || a >= d)
return 0;
/* The rest is always intersecting */
return 1;
}
#endif /* __LIB_MATH_H__ */

View File

@@ -0,0 +1,45 @@
/*
* The elementary concurrency constructs.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __LIB_MUTEX_H__
#define __LIB_MUTEX_H__
#include <l4lib/lib/string.h>
#include <l4lib/lib/spinlock.h>
#include <l4lib/lib/list.h>
#include <l4lib/lib/printk.h>
#include <l4lib/lib/wait.h>
#include INC_ARCH(mutex.h)
/* A mutex is a binary semaphore that can sleep. */
struct mutex {
struct waitqueue_head wqh;
unsigned int lock;
};
static inline void mutex_init(struct mutex *mutex)
{
memset(mutex, 0, sizeof(struct mutex));
waitqueue_head_init(&mutex->wqh);
}
int mutex_trylock(struct mutex *mutex);
int mutex_lock(struct mutex *mutex);
void mutex_unlock(struct mutex *mutex);
void mutex_unlock_async(struct mutex *mutex);
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */
static inline int mutex_inc(unsigned int *cnt)
{
return ++*cnt;
}
static inline int mutex_dec(unsigned int *cnt)
{
return --*cnt;
}
#endif /* __LIB_MUTEX_H__ */

View File

@@ -0,0 +1,90 @@
#ifndef __LIB_SPINLOCK_H__
#define __LIB_SPINLOCK_H__
#include <l4lib/lib/string.h>
#include <l4lib/generic/preempt.h>
#include L4LIB_INC_ARCH(irq.h)
#include L4LIB_INC_ARCH(mutex.h)
#if !defined(__LINUX_CONTAINER__)
struct spinlock {
unsigned int lock;
};
#if !defined(__LINUX_CONTAINER__)
#define DECLARE_SPINLOCK(lockname) \
struct spinlock lockname = { \
.lock = 0, \
}
void spin_lock_record_check(void *lock_addr);
void spin_unlock_delete_check(void *lock_addr);
static inline void spin_lock_init(struct spinlock *s)
{
memset(s, 0, sizeof(struct spinlock));
}
/*
* - Guards from deadlock against local processes, but not local irqs.
* - To be used for synchronising against processes on *other* cpus.
*/
static inline void spin_lock(struct spinlock *s)
{
preempt_disable(); /* This must disable local preempt */
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_lock_record_check(s);
#endif
__spin_lock(&s->lock);
#endif
}
static inline void spin_unlock(struct spinlock *s)
{
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_unlock_delete_check(s);
#endif
__spin_unlock(&s->lock);
#endif
preempt_enable();
}
/*
* - Guards from deadlock against local processes *and* local irqs.
* - To be used for synchronising against processes and irqs
* on other cpus.
*/
static inline void spin_lock_irq(struct spinlock *s,
unsigned long *state)
{
irq_local_disable_save(state);
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_lock_record_check(s);
#endif
__spin_lock(&s->lock);
#endif
}
static inline void spin_unlock_irq(struct spinlock *s,
unsigned long state)
{
#if defined(CONFIG_SMP)
#if defined (CONFIG_DEBUG_SPINLOCKS)
spin_unlock_delete_check(s);
#endif
__spin_unlock(&s->lock);
#endif
irq_local_restore(state);
}
#endif
#endif /* __LINUX_CONTAINER__ */
#endif /* __LIB__SPINLOCK_H__ */

View File

@@ -0,0 +1,86 @@
#ifndef __LIB_WAIT_H__
#define __LIB_WAIT_H__
#include <l4lib/lib/list.h>
#include <l4lib/lib/spinlock.h>
struct ktcb;
struct waitqueue {
struct link task_list;
struct ktcb *task;
};
#define WAKEUP_ASYNC 0
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0), /* Set interrupt flag for task */
WAKEUP_SYNC = (1 << 1), /* Wake it up synchronously */
};
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
struct waitqueue wq = { \
.task_list = { &wq.task_list, &wq.task_list }, \
.task = tsk, \
};
struct waitqueue_head {
int sleepers;
struct spinlock slock;
struct link task_list;
};
static inline void waitqueue_head_init(struct waitqueue_head *head)
{
memset(head, 0, sizeof(struct waitqueue_head));
link_init(&head->task_list);
}
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
struct waitqueue *wq);
void task_unset_wqh(struct ktcb *task);
/*
* Sleep if the given condition isn't true.
* ret will tell whether condition was met
* or we got interrupted.
*/
#define WAIT_EVENT(wqh, condition, ret) \
do { \
ret = 0; \
for (;;) { \
unsigned long irqsave; \
spin_lock_irq(&(wqh)->slock, &irqsave); \
if (condition) { \
spin_unlock_irq(&(wqh)->slock, irqsave);\
break; \
} \
CREATE_WAITQUEUE_ON_STACK(wq, current); \
task_set_wqh(current, wqh, &wq); \
(wqh)->sleepers++; \
list_insert_tail(&wq.task_list, \
&(wqh)->task_list); \
/* printk("(%d) waiting...\n", current->tid); */\
sched_prepare_sleep(); \
spin_unlock_irq(&(wqh)->slock, irqsave); \
schedule(); \
/* Did we wake up normally or get interrupted */\
if (current->flags & TASK_INTERRUPTED) { \
current->flags &= ~TASK_INTERRUPTED; \
ret = -EINTR; \
break; \
} \
} \
} while(0);
void wake_up(struct waitqueue_head *wqh, unsigned int flags);
int wake_up_task(struct ktcb *task, unsigned int flags);
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
int wait_on(struct waitqueue_head *wqh);
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq);
int wait_on_prepared_wait(void);
#endif /* __LIB_WAIT_H__ */