Kernel updates since December 2009

This commit is contained in:
Bahadir Balban
2010-03-25 01:12:40 +02:00
parent 16818191b3
commit 74b5963fcb
487 changed files with 22477 additions and 3857 deletions

View File

@@ -0,0 +1,15 @@
#ifndef __ARM_ASM_H__
#define __ARM_ASM_H__
#define BEGIN_PROC(name) \
.global name; \
.type name,function; \
.align; \
name:
#define END_PROC(name) \
.fend_##name: \
.size name,.fend_##name - name;
#endif /* __ARM_ASM_H__ */

View File

@@ -0,0 +1,11 @@
#ifndef __L4LIB_ARCH_IRQ_H__
#define __L4LIB_ARCH_IRQ_H__
/*
* Destructive atomic-read.
*
* Write 0 to byte at @location as its contents are read back.
*/
char l4_atomic_dest_readb(void *location);
#endif

View File

@@ -0,0 +1,95 @@
/*
* System call prototypes.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __ARM_SYSCALLS_H__
#define __ARM_SYSCALLS_H__
#include L4LIB_INC_ARCH(types.h)
#include L4LIB_INC_ARCH(utcb.h)
#include <l4/generic/space.h>
#include <l4/api/space.h>
#include <l4/api/kip.h>
#include <l4/api/ipc.h>
#include <l4/api/thread.h>
struct task_ids {
l4id_t tid;
l4id_t spid;
l4id_t tgid;
};
static inline void *
l4_kernel_interface(unsigned int *api_version, unsigned int *api_flags,
unsigned int *kernel_id)
{
return (void *)L4_KIP_ADDRESS;
}
typedef unsigned int (*__l4_thread_switch_t)(u32);
extern __l4_thread_switch_t __l4_thread_switch;
unsigned int l4_thread_switch (u32 dest);
typedef int (*__l4_getid_t)(struct task_ids *ids);
extern __l4_getid_t __l4_getid;
int l4_getid(struct task_ids *ids);
typedef int (*__l4_ipc_t)(l4id_t to, l4id_t from, u32 flags);
extern __l4_ipc_t __l4_ipc;
int l4_ipc(l4id_t to, l4id_t from, u32 flags);
typedef int (*__l4_capability_control_t)(unsigned int req, unsigned int flags, void *buf);
extern __l4_capability_control_t __l4_capability_control;
int l4_capability_control(unsigned int req, unsigned int flags, void *buf);
typedef int (*__l4_map_t)(void *phys, void *virt,
u32 npages, u32 flags, l4id_t tid);
extern __l4_map_t __l4_map;
int l4_map(void *p, void *v, u32 npages, u32 flags, l4id_t tid);
typedef int (*__l4_unmap_t)(void *virt, unsigned long npages, l4id_t tid);
extern __l4_unmap_t __l4_unmap;
int l4_unmap(void *virtual, unsigned long numpages, l4id_t tid);
typedef int (*__l4_thread_control_t)(unsigned int action, struct task_ids *ids);
extern __l4_thread_control_t __l4_thread_control;
int l4_thread_control(unsigned int action, struct task_ids *ids);
typedef int (*__l4_irq_control_t)(unsigned int req, unsigned int flags, l4id_t id);
extern __l4_irq_control_t __l4_irq_control;
int l4_irq_control(unsigned int req, unsigned int flags, l4id_t id);
typedef int (*__l4_ipc_control_t)(unsigned int action, l4id_t blocked_sender,
u32 blocked_tag);
extern __l4_ipc_control_t __l4_ipc_control;
int l4_ipc_control(unsigned int, l4id_t blocked_sender, u32 blocked_tag);
typedef int (*__l4_exchange_registers_t)(void *exregs_struct, l4id_t tid);
extern __l4_exchange_registers_t __l4_exchange_registers;
int l4_exchange_registers(void *exregs_struct, l4id_t tid);
typedef int (*__l4_container_control_t)(unsigned int req, unsigned int flags, void *buf);
extern __l4_container_control_t __l4_container_control;
int l4_container_control(unsigned int req, unsigned int flags, void *buf);
typedef int (*__l4_time_t)(void *timeval, int set);
extern __l4_time_t __l4_time;
int l4_time(void *timeval, int set);
typedef int (*__l4_mutex_control_t)(void *mutex_word, int op);
extern __l4_mutex_control_t __l4_mutex_control;
int l4_mutex_control(void *mutex_word, int op);
typedef int (*__l4_cache_control_t)(void *start, void *end, unsigned int flags);
extern __l4_cache_control_t __l4_cache_control;
int l4_cache_control(void *start, void *end, unsigned int flags);
/* To be supplied by server tasks. */
void *virt_to_phys(void *);
void *phys_to_virt(void *);
#endif /* __ARM_SYSCALLS_H__ */

View File

@@ -0,0 +1,366 @@
/*
* Helper functions that wrap raw l4 syscalls.
*
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#ifndef __L4LIB_SYSLIB_H__
#define __L4LIB_SYSLIB_H__
#include <stdio.h>
#include <l4/macros.h>
#include L4LIB_INC_ARCH(syscalls.h)
/*
* NOTE:
* Its best to use these wrappers because they generalise the way
* common ipc data like sender id, error, ipc tag are passed
* between ipc parties.
*
* The arguments to l4_ipc() are used by the microkernel to initiate
* the ipc. Any data passed in message registers may or may not be
* a duplicate of this data, but the distinction is that anything
* that is passed via the mrs are meant to be used by the other party
* participating in the ipc.
*/
/* For system call arguments */
#define L4SYS_ARG0 (MR_UNUSED_START)
#define L4SYS_ARG1 (MR_UNUSED_START + 1)
#define L4SYS_ARG2 (MR_UNUSED_START + 2)
#define L4SYS_ARG3 (MR_UNUSED_START + 3)
#define L4_IPC_TAG_MASK 0x00000FFF
/*
* Servers get sender.
*/
static inline l4id_t l4_get_sender(void)
{
return (l4id_t)read_mr(MR_SENDER);
}
/*
* When doing an ipc the sender never has to be explicitly set in
* the utcb via this function since this information is found out
* by the microkernel by checking the system caller's id. This is
* only used for restoring the sender on the utcb in order to
* complete an earlier ipc.
*/
static inline void l4_set_sender(l4id_t sender)
{
write_mr(MR_SENDER, sender);
}
static inline unsigned int l4_set_ipc_size(unsigned int word, unsigned int size)
{
word &= ~L4_IPC_FLAGS_SIZE_MASK;
word |= ((size << L4_IPC_FLAGS_SIZE_SHIFT) & L4_IPC_FLAGS_SIZE_MASK);
return word;
}
static inline unsigned int l4_get_ipc_size(unsigned int word)
{
return (word & L4_IPC_FLAGS_SIZE_MASK) >> L4_IPC_FLAGS_SIZE_SHIFT;
}
static inline unsigned int l4_set_ipc_msg_index(unsigned int word, unsigned int index)
{
/* FIXME: Define MR_PRIMARY_TOTAL, MR_TOTAL etc. and use MR_TOTAL HERE! */
BUG_ON(index > UTCB_SIZE);
word &= ~L4_IPC_FLAGS_MSG_INDEX_MASK;
word |= (index << L4_IPC_FLAGS_MSG_INDEX_SHIFT) &
L4_IPC_FLAGS_MSG_INDEX_MASK;
return word;
}
static inline unsigned int l4_get_ipc_msg_index(unsigned int word)
{
return (word & L4_IPC_FLAGS_MSG_INDEX_MASK)
>> L4_IPC_FLAGS_MSG_INDEX_SHIFT;
}
static inline unsigned int l4_set_ipc_flags(unsigned int word, unsigned int flags)
{
word &= ~L4_IPC_FLAGS_TYPE_MASK;
word |= flags & L4_IPC_FLAGS_TYPE_MASK;
return word;
}
static inline unsigned int l4_get_ipc_flags(unsigned int word)
{
return word & L4_IPC_FLAGS_TYPE_MASK;
}
static inline unsigned int l4_get_tag(void)
{
return read_mr(MR_TAG) & L4_IPC_TAG_MASK;
}
static inline void l4_set_tag(unsigned int tag)
{
unsigned int tag_flags = read_mr(MR_TAG);
tag_flags &= ~L4_IPC_TAG_MASK;
tag_flags |= tag & L4_IPC_TAG_MASK;
write_mr(MR_TAG, tag_flags);
}
/* Servers:
* Sets the message register for returning errors back to client task.
* These are usually posix error codes.
*/
static inline void l4_set_retval(int retval)
{
write_mr(MR_RETURN, retval);
}
/* Clients:
* Learn result of request.
*/
static inline int l4_get_retval(void)
{
return read_mr(MR_RETURN);
}
/*
* This is useful for stacked IPC. A stacked IPC happens
* when a new IPC is initiated before concluding the current
* one.
*
* This saves the last ipc's parameters such as the sender
* and tag information. Any previously saved data in save
* slots are destroyed. This is fine as IPC stacking is only
* useful if done once.
*/
static inline void l4_save_ipcregs(void)
{
l4_get_utcb()->saved_sender = l4_get_sender();
l4_get_utcb()->saved_tag = l4_get_tag();
}
static inline void l4_restore_ipcregs(void)
{
l4_set_tag(l4_get_utcb()->saved_tag);
l4_set_sender(l4_get_utcb()->saved_sender);
}
#define TASK_CID_MASK 0xFF000000
#define TASK_ID_MASK 0x00FFFFFF
#define TASK_CID_SHIFT 24
static inline l4id_t __raw_tid(l4id_t tid)
{
return tid & TASK_ID_MASK;
}
static inline l4id_t __cid(l4id_t tid)
{
return (tid & TASK_CID_MASK) >> TASK_CID_SHIFT;
}
static inline l4id_t self_tid(void)
{
struct task_ids ids;
l4_getid(&ids);
return ids.tid;
}
static inline l4id_t __raw_self_tid(void)
{
return __raw_tid(self_tid());
}
static inline int l4_send_full(l4id_t to, unsigned int tag)
{
l4_set_tag(tag);
return l4_ipc(to, L4_NILTHREAD, L4_IPC_FLAGS_FULL);
}
static inline int l4_receive_full(l4id_t from)
{
return l4_ipc(L4_NILTHREAD, from, L4_IPC_FLAGS_FULL);
}
static inline int l4_sendrecv_full(l4id_t to, l4id_t from, unsigned int tag)
{
int err;
BUG_ON(to == L4_NILTHREAD || from == L4_NILTHREAD);
l4_set_tag(tag);
err = l4_ipc(to, from, L4_IPC_FLAGS_FULL);
return err;
}
static inline int l4_send_extended(l4id_t to, unsigned int tag,
unsigned int size, void *buf)
{
unsigned int flags = 0;
l4_set_tag(tag);
/* Set up flags word for extended ipc */
flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
flags = l4_set_ipc_size(flags, size);
flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
/* Write buffer pointer to MR index that we specified */
write_mr(L4SYS_ARG0, (unsigned long)buf);
return l4_ipc(to, L4_NILTHREAD, flags);
}
static inline int l4_receive_extended(l4id_t from, unsigned int size, void *buf)
{
unsigned int flags = 0;
/* Indicate extended receive */
flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
/* How much data is accepted */
flags = l4_set_ipc_size(flags, size);
/* Indicate which MR index buffer pointer is stored */
flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
/* Set MR with buffer to receive data */
write_mr(L4SYS_ARG0, (unsigned long)buf);
return l4_ipc(L4_NILTHREAD, from, flags);
}
/*
* Return result value as extended IPC.
*
* Extended IPC copies up to 2KB user address space buffers.
* Along with such an ipc, a return value is sent using a primary
* mr that is used as the return register.
*
* It may not be desirable to return a payload on certain conditions,
* (such as an error return value) So a nopayload field is provided.
*/
static inline int l4_return_extended(int retval, unsigned int size,
void *buf, int nopayload)
{
unsigned int flags = 0;
l4id_t sender = l4_get_sender();
l4_set_retval(retval);
/* Set up flags word for extended ipc */
flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
/* Write buffer pointer to MR index that we specified */
write_mr(L4SYS_ARG0, (unsigned long)buf);
if (nopayload)
flags = l4_set_ipc_size(flags, 0);
else
flags = l4_set_ipc_size(flags, size);
return l4_ipc(sender, L4_NILTHREAD, flags);
}
static inline int l4_sendrecv_extended(l4id_t to, l4id_t from,
unsigned int tag, void *buf)
{
/* Need to imitate sendrecv but with extended send/recv flags */
return 0;
}
static inline int l4_send(l4id_t to, unsigned int tag)
{
l4_set_tag(tag);
return l4_ipc(to, L4_NILTHREAD, 0);
}
static inline int l4_sendrecv(l4id_t to, l4id_t from, unsigned int tag)
{
int err;
BUG_ON(to == L4_NILTHREAD || from == L4_NILTHREAD);
l4_set_tag(tag);
err = l4_ipc(to, from, 0);
return err;
}
static inline int l4_receive(l4id_t from)
{
return l4_ipc(L4_NILTHREAD, from, 0);
}
static inline void l4_print_mrs()
{
printf("Message registers: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
read_mr(0), read_mr(1), read_mr(2), read_mr(3),
read_mr(4), read_mr(5));
}
/* Servers:
* Return the ipc result back to requesting task.
*/
static inline int l4_ipc_return(int retval)
{
l4id_t sender = l4_get_sender();
l4_set_retval(retval);
/* Setting the tag would overwrite retval so we l4_send without tagging */
return l4_ipc(sender, L4_NILTHREAD, 0);
}
void *l4_new_virtual(int npages);
void *l4_del_virtual(void *virt, int npages);
/* A helper that translates and maps a physical address to virtual */
static inline void *l4_map_helper(void *phys, int npages)
{
struct task_ids ids;
int err;
void *virt = l4_new_virtual(npages);
l4_getid(&ids);
if ((err = l4_map(phys, virt, npages,
MAP_USR_DEFAULT, ids.tid)) < 0)
return PTR_ERR(err);
return virt;
}
/* A helper that translates and maps a physical address to virtual */
static inline void *l4_unmap_helper(void *virt, int npages)
{
struct task_ids ids;
l4_getid(&ids);
l4_unmap(virt, npages, ids.tid);
l4_del_virtual(virt, npages);
return 0;
}
#define L4_EXIT_MASK 0xFFFF
static inline void l4_exit(unsigned int exit_code)
{
struct task_ids ids;
l4_getid(&ids);
l4_thread_control(THREAD_DESTROY |
(exit_code & L4_EXIT_MASK),
&ids);
}
#endif /* __L4LIB_SYSLIB_H__ */

View File

@@ -0,0 +1,8 @@
#ifndef __L4LIB_ARM_TYPES_H___
#define __L4LIB_ARM_TYPES_H__
#define TASK_ID_INVALID 0xFFFFFFFF
#include <l4/arch/arm/types.h>
#endif /* __L4LIB_ARM_TYPES_H__ */

View File

@@ -0,0 +1,78 @@
/*
* Copyright (C) 2009 Bahadir Bilgehan Balban
*/
#ifndef __ARM_UTCB_H__
#define __ARM_UTCB_H__
#define USER_UTCB_REF 0xFF000050
#define L4_KIP_ADDRESS 0xFF000000
#define UTCB_KIP_OFFSET 0x50
#ifndef __ASSEMBLY__
#include <l4lib/types.h>
#include <l4/macros.h>
#include <l4/lib/math.h>
#include INC_GLUE(message.h)
#include INC_GLUE(memory.h)
#include <string.h>
#include <stdio.h>
#include L4LIB_INC_SUBARCH(utcb.h)
/*
* See kernel glue/arch/message.h for utcb details
*/
extern struct kip *kip;
/* Functions to read/write utcb registers */
static inline unsigned int read_mr(int offset)
{
if (offset < MR_TOTAL)
return l4_get_utcb()->mr[offset];
else
return l4_get_utcb()->mr_rest[offset - MR_TOTAL];
}
static inline void write_mr(unsigned int offset, unsigned int val)
{
if (offset < MR_TOTAL)
l4_get_utcb()->mr[offset] = val;
else
l4_get_utcb()->mr_rest[offset - MR_TOTAL] = val;
}
static inline void *utcb_full_buffer()
{
return &l4_get_utcb()->mr_rest[0];
}
static inline char *utcb_full_strcpy_from(const char *src)
{
return strncpy((char *)&l4_get_utcb()->mr_rest[0], src,
L4_UTCB_FULL_BUFFER_SIZE);
}
static inline void *utcb_full_memcpy_from(const char *src, int size)
{
return memcpy(&l4_get_utcb()->mr_rest[0], src,
min(size, L4_UTCB_FULL_BUFFER_SIZE));
}
static inline char *utcb_full_strcpy_to(char *dst)
{
return strncpy(dst, (char *)&l4_get_utcb()->mr_rest[0],
L4_UTCB_FULL_BUFFER_SIZE);
}
static inline void *utcb_full_memcpy_to(char *dst, int size)
{
return memcpy(dst, &l4_get_utcb()->mr_rest[0],
min(size, L4_UTCB_FULL_BUFFER_SIZE));
}
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_UTCB_H__ */

View File

@@ -0,0 +1,3 @@
#ifndef __PERFMON_H__
#endif

View File

@@ -0,0 +1,21 @@
#ifndef __ARM_V5_UTCB_H__
#define __ARM_V5_UTCB_H__
/*
* Pointer to Kernel Interface Page's UTCB pointer offset.
*/
extern struct utcb **kip_utcb_ref;
static inline struct utcb *l4_get_utcb()
{
/*
* By double dereferencing, we get the private TLS
* (aka UTCB). First reference is to the KIP's utcb
* offset, second is to the utcb itself, to which
* the KIP's utcb reference had been updated during
* context switch.
*/
return *kip_utcb_ref;
}
#endif /* __ARM_V5_UTCB_H__ */

View File

@@ -0,0 +1,405 @@
/*
* ARMv7 Performance Monitor operations
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#ifndef __PERFMON_H__
#define __PERFMON_H__
#include <l4lib/types.h>
/* Perfmon control register */
#define PMCR_DP_BIT 5 /* Disable prohibited */
#define PMCR_X_BIT 4 /* Export event enable */
#define PMCR_D_BIT 3 /* 64-cycle granularity */
#define PMCR_C_BIT 2 /* PMCCNTR reset */
#define PMCR_P_BIT 1 /* Events all reset */
#define PMCR_E_BIT 0 /* Enable all */
/* Obtain number of event counters */
#define PMCR_N_SHIFT 11
#define PMCR_N_MASK 0x1F
/* Special bit for cycle counter */
#define PMCCNTR_BIT 31
/*
* Performance Events
*/
/* Generic v7 events */
#define PERFMON_EVENT_SOFTINC 0
#define PERFMON_EVENT_IFETCH_L1CREFILL 1
#define PERFMON_EVENT_IFETCH_TLBREFILL 2
#define PERFMON_EVENT_DFETCH_L1CREFILL 3
#define PERFMON_EVENT_DFETCH_L1CACCESS 4
#define PERFMON_EVENT_DFETCH_TLBREFILL 5
#define PERFMON_EVENT_MEMREAD_INSTR 6
#define PERFMON_EVENT_MEMWRITE_INSTR 7
#define PERFMON_EVENT_ALL_INSTR 8
#define PERFMON_EVENT_EXCEPTION 9
#define PERFMON_EVENT_EXCEPTION_RETURN 10
#define PERFMON_EVENT_CONTEXTIDR_CHANGE 11
#define PERFMON_EVENT_PC_CHANGE 12
#define PERFMON_EVENT_IMM_BRANCH 13
#define PERFMON_EVENT_FUNCTION_RETURN 14
#define PERFMON_EVENT_UNALIGNED_ACCESS 15
#define PERFMON_EVENT_BRANCH_MISS 16
#define PERFMON_EVENT_RAW_CYCLE_COUNT 17
#define PERFMON_EVENT_BRANCH_MAYBEHIT 18
/*
* Cortex-A9 events (only relevant ones)
* 0x40-2, 0x6E, 0x70, 0x71-4, 0x80-0x81, 0x8A-8B
* 0xA0-5 omitted
*/
/*
* Linefill not satisfied from other cpu caches but
* has to go to external memory
*/
#define PERFMON_EVENT_SMP_LINEFILL_MISS 0x50
/* Linefill satisfied from other cpu caches */
#define PERFMON_EVENT_SMP_LINEFILL_HIT 0x51
/* Icache refill stall cycles on cpu pipeline */
#define PERFMON_EVENT_ICACHE_CPU_STALL 0x60
/* Dcache refill stall cycles on cpu pipeline */
#define PERFMON_EVENT_DCACHE_CPU_STALL 0x61
/* TLB miss stall cycles on cpu pipeline */
#define PERFMON_EVENT_TLBMISS_CPU_STALL 0x62
#define PERFMON_EVENT_STREX_SUCCESS 0x63
#define PERFMON_EVENT_STREX_FAIL 0x64
#define PERFMON_EVENT_DCACHE_EVICTION 0x65
/* Issue stage can't proceed to dispatch any instruction */
#define PERFMON_EVENT_PIPELINE_CANT_ISSUE 0x66
/* Issue stage empty */
#define PERFMON_EVENT_PIPELINE_ISSUE_EMPTY 0x67
/* Register renamed instructions */
#define PERFMON_EVENT_REGRENAMED_INSTR 0x68
#define PERFMON_EVENT_CPUSTALL_ITLB_MISS 0x82
#define PERFMON_EVENT_CPUSTALL_DTLB_MISS 0x83
#define PERFMON_EVENT_CPUSTALL_IUTLB_MISS 0x84
#define PERFMON_EVENT_CPUSTALL_DUTLB_MISS 0x85
#define PERFMON_EVENT_CPUSTALL_DMB 0x86
#define PERFMON_EVENT_ISB_COUNT 0x90
#define PERFMON_EVENT_DSB_COUNT 0x91
#define PERFMON_EVENT_DMB_COUNT 0x92
#define PERFMON_EVENT_EXTIRQ_COUNT 0x93
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_ctrl(void)
{
volatile u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 0\n"
"isb\n"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_ctrl(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 0"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_cntenset(void)
{
volatile u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 1\n"
"isb\n"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_cntenset(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 1"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_cntenclr(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 2"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_cntenclr(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 2"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_overflow(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 3"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_overflow(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 3"
:
: "r" (word)
);
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_softinc(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 4"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_evcntsel(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c12, 5"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_evcntsel(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c12, 5"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_cyccnt(void)
{
volatile u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c13, 0\n"
"isb\n"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_cyccnt(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c13, 0"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_evtypesel(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c13, 1"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_evtypesel(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c13, 1"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_evcnt(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c13, 2"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_evcnt(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c13, 2"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_useren(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c14, 0"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_useren(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c14, 0"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_intenset(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c14, 1"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_intenset(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c14, 1"
:
: "r" (word)
);
}
static inline u32 __attribute__((always_inline))
cp15_read_perfmon_intenclr(void)
{
u32 val = 0;
__asm__ __volatile__ (
"mrc p15, 0, %0, c9, c14, 2"
: "=r" (val)
:
);
return val;
}
static inline void __attribute__((always_inline))
cp15_write_perfmon_intenclr(volatile u32 word)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c9, c14, 2"
:
: "r" (word)
);
}
#include <stdio.h>
#if defined (CONFIG_DEBUG_PERFMON_USER)
static inline
u32 perfmon_read_cyccnt()
{
u32 cnt = cp15_read_perfmon_cyccnt();
u32 ovfl = cp15_read_perfmon_overflow();
/* Detect overflow and signal something was wrong */
if (ovfl & (1 << PMCCNTR_BIT))
printf("%s: Overflow.\n", __FUNCTION__);
return cnt;
}
void perfmon_reset_start_cyccnt();
u32 perfmon_read_reset_start_cyccnt();
#endif
void perfmon_init();
#endif /* __PERFMON_H__ */

View File

@@ -0,0 +1,59 @@
#ifndef __ARM_V5_UTCB_H__
#define __ARM_V5_UTCB_H__
/*
* NOTE: Any changes you make here, you *MUST* change
* utcb_address() macro in syscall.S assembler.
*/
/* Read Thread ID User RW register */
static inline u32 l4_cp15_read_tid_usr_rw(void)
{
volatile u32 val;
__asm__ __volatile__ (
"mrc p15, 0, %0, c13, c0, 2"
: "=r" (val)
:
);
return val;
}
/* Write Thread ID User RW register */
static inline void l4_cp15_write_tid_usr_rw(volatile u32 val)
{
__asm__ __volatile__ (
"mcr p15, 0, %0, c13, c0, 2"
:
: "r" (val)
);
}
/* Read Thread ID User RO register */
static inline u32 l4_cp15_read_tid_usr_ro(void)
{
volatile u32 val;
__asm__ __volatile__ (
"mrc p15, 0, %0, c13, c0, 3"
: "=r" (val)
:
);
return val;
}
/*
* In ARMv7, utcb resides in the userspace read-only
* thread register. This adds the benefit of avoiding
* dirtying the cache and extra management for smp since
* it is per-cpu.
*/
static inline struct utcb *l4_get_utcb()
{
// printf("%s: UTCB Adddress: 0x%x\n", __FUNCTION__, l4_cp15_read_tid_usr_ro());
return (struct utcb *)l4_cp15_read_tid_usr_ro();
}
#endif /* __ARM_V5_UTCB_H__ */