Kernel updates since December 2009

This commit is contained in:
Bahadir Balban
2010-03-25 01:12:40 +02:00
parent 16818191b3
commit 74b5963fcb
487 changed files with 22477 additions and 3857 deletions

View File

@@ -0,0 +1,99 @@
/*
* Generic to arch-specific interface for
* exchange_registers()
*
* Copyright (C) 2008 Bahadir Balban
*/
#include <l4/macros.h>
#include <l4lib/exregs.h>
#include L4LIB_INC_ARCH(syslib.h)
#include INC_GLUE(message.h)
void exregs_set_read(struct exregs_data *exregs)
{
exregs->flags |= EXREGS_READ;
}
void exregs_print_registers(void)
{
struct exregs_data exregs;
/* Read registers */
memset(&exregs, 0, sizeof(exregs));
exregs.valid_vect = ~0; /* Set all flags */
exregs.flags |= EXREGS_READ;
exregs.flags |= EXREGS_SET_UTCB;
exregs.flags |= EXREGS_SET_PAGER;
BUG_ON(l4_exchange_registers(&exregs, self_tid()) < 0);
/* Print out registers */
printf("Task (%x) register state upon fault:\n", self_tid());
printf("R0: 0x%x\n", exregs.context.r0);
printf("R1: 0x%x\n", exregs.context.r1);
printf("R2: 0x%x\n", exregs.context.r2);
printf("R3: 0x%x\n", exregs.context.r3);
printf("R4: 0x%x\n", exregs.context.r4);
printf("R5: 0x%x\n", exregs.context.r5);
printf("R6: 0x%x\n", exregs.context.r6);
printf("R7: 0x%x\n", exregs.context.r7);
printf("R8: 0x%x\n", exregs.context.r8);
printf("R9: 0x%x\n", exregs.context.r9);
printf("R10: 0x%x\n", exregs.context.r10);
printf("R11: 0x%x\n", exregs.context.r11);
printf("R12: 0x%x\n", exregs.context.r12);
printf("R13: 0x%x\n", exregs.context.sp);
printf("R14: 0x%x\n", exregs.context.lr);
printf("R15: 0x%x\n", exregs.context.pc);
printf("Pager: 0x%x\n", exregs.pagerid);
printf("Utcb @ 0x%lx\n", exregs.utcb_address);
}
void exregs_set_mr(struct exregs_data *s, int offset, unsigned long val)
{
/* Get MR0 */
u32 *mr = &s->context.MR0_REGISTER;
/* Sanity check */
BUG_ON(offset > MR_TOTAL || offset < 0);
/* Set MR */
mr[offset] = val;
/* Set valid bit for mr register */
s->valid_vect |= FIELD_TO_BIT(exregs_context_t, MR0_REGISTER) << offset;
}
void exregs_set_pager(struct exregs_data *s, l4id_t pagerid)
{
s->pagerid = pagerid;
s->flags |= EXREGS_SET_PAGER;
}
unsigned long exregs_get_utcb(struct exregs_data *s)
{
return s->utcb_address;
}
unsigned long exregs_get_stack(struct exregs_data *s)
{
return s->context.sp;
}
void exregs_set_utcb(struct exregs_data *s, unsigned long virt)
{
s->utcb_address = virt;
s->flags |= EXREGS_SET_UTCB;
}
void exregs_set_stack(struct exregs_data *s, unsigned long sp)
{
s->context.sp = sp;
s->valid_vect |= FIELD_TO_BIT(exregs_context_t, sp);
}
void exregs_set_pc(struct exregs_data *s, unsigned long pc)
{
s->context.pc = pc;
s->valid_vect |= FIELD_TO_BIT(exregs_context_t, pc);
}

View File

@@ -0,0 +1,21 @@
/*
* Set up new thread's argument and call its function.
* Return would be made to thread_exit with the return code.
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include <l4lib/macros.h>
#include L4LIB_INC_ARCH(asm.h)
BEGIN_PROC(setup_new_thread)
ldr r0, [sp, #-4]! @ Load first argument
mov lr, pc @ Save return address
ldr pc, [sp, #-4]! @ Load function pointer from stack
b thread_exit @ Call l4_thread_exit for cleanup
1:
b 1b @ Never reaches here
END_PROC(setup_new_thread)

View File

@@ -0,0 +1,235 @@
/*
* Userspace system call interface.
*
* Copyright (C) 2007 - 2009 Bahadir Balban
*/
#include L4LIB_INC_ARCH(asm.h)
#include L4LIB_INC_ARCH(utcb.h)
#include <l4/generic/space.h>
#include <l4/macros.h>
#include INC_GLUE(message.h)
#if defined (CONFIG_ARCH_ARM) && defined (CONFIG_SUBARCH_V7)
/* ARMv7 uses a special per-cpu register to keep thread-local utcb pointer */
.macro utcb_address rx
mrc p15, 0, \rx, c13, c0, 3 @ Read user-RO thread register TPIDRURO
.endm
#else /* End of ARMv7 */
/* Get it from KIP page by double dereference */
.macro utcb_address rx
ldr \rx, =kip_utcb_ref @ First get pointer to utcb pointer in KIP
ldr \rx, [\rx] @ Get pointer to UTCB address from UTCB pointer in KIP
ldr \rx, [\rx] @ Get the utcb address
.endm
#endif
BEGIN_PROC(l4_thread_switch)
ldr r12, =__l4_thread_switch
ldr pc, [r12] @ Jump into the SWI. Kernel returns to LR_USR, which is the caller.
END_PROC(l4_thread_switch)
/*
* The syscall returns process ids. This function saves the returned values in the
* arguments passed by reference. @r0 = struct task_ids *
*/
BEGIN_PROC(l4_getid)
ldr r12, =__l4_getid @ See l4_kdata_read for why its so simple.
ldr pc, [r12] @ Return.
END_PROC(l4_getid)
/*
* For clone() we need special assembler handling
* Same signature as ipc(): @r0 = to, @r1 = from @r2 = flags
*
* NOTE: Note that this breaks l4 system call interface,
* this should be moved elsewhere and modified using existing l4 mechanisms.
*/
BEGIN_PROC(arch_clone)
stmfd sp!, {r4-r8,lr} @ Save context.
utcb_address r12 @ Get utcb address.
ldmia r12!, {r3-r8} @ Load 6 Message registers from utcb. MR0-MR5
ldr r12, =__l4_ipc
mov lr, pc
ldr pc, [r12] @ Perform the ipc()
/*
* At this moment:
* - MR_RETURN tells us whether we are parent or child (or have failed).
* - Child has new SP set, with |func_ptr|arg1|{End of stack}SP<-| on stack.
* - Child needs exit logic when its function is finished.
*/
cmp r0, #0 @ Check ipc success
blt ipc_failed
cmp MR_RETURN_REGISTER, #0 @ Check ipc return register MR_RETURN.
blt clone_failed @ Ipc was ok but clone() failed.
bgt parent_return @ It has child pid, goto parent return.
child:
ldr r0, [sp, #-4]! @ Load child's first argument.
mov lr, pc @ Save return address
ldr pc, [sp, #-4]! @ Load function pointer from stack
child_exit:
b child_exit @ We infinitely loop for now.
@ Return with normal ipc return sequence
parent_return:
clone_failed:
ipc_failed:
utcb_address r12 @ Get utcb
stmia r12, {r3-r8} @ Store mrs.
ldmfd sp!, {r4-r8,pc} @ Return restoring pc and context.
END_PROC(arch_clone)
/*
* Inter-process communication. Loads message registers as arguments before the call,
* and stores them as results after the call. @r0 = to, @r1 = from.
*/
BEGIN_PROC(l4_ipc)
stmfd sp!, {r4-r8,lr} @ Save context.
utcb_address r12 @ Get utcb address.
ldmia r12!, {r3-r8} @ Load 6 Message registers from utcb. MR0-MR5
ldr r12, =__l4_ipc
mov lr, pc
ldr pc, [r12]
utcb_address r12 @ Get utcb address.
stmia r12, {r3-r8} @ Store 6 Message registers to utcb. MR0-MR5
ldmfd sp!, {r4-r8,pc} @ Return restoring pc, and context.
END_PROC(l4_ipc)
/*
* System call that maps an area of memory into the given address space.
* @r0 = physical address, @r1 = virtual address, @r2 = map size in pages,
* @r3 = map flags, @r4 = The tgid of the address space to map.
*/
BEGIN_PROC(l4_map)
stmfd sp!, {r4, lr}
ldr r4, [sp, #8] @ FIXME: Is this right?
ldr r12, =__l4_map
mov lr, pc @ We must return here to restore r4.
ldr pc, [r12]
ldmfd sp!, {r4, pc}
END_PROC(l4_map)
/*
* Reads/manipulates capabilities of a thread, particularly a pager.
* @r0 = request type, @r1 = request flags, @r2 = Capability buffer pointer
*/
BEGIN_PROC(l4_capability_control)
stmfd sp!, {lr}
ldr r12, =__l4_capability_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_capability_control)
/*
* System call that unmaps an area of memory into the given address space.
* @r0 = virtual, @r1 = pages, @r2 = tid of address space to unmap
*/
BEGIN_PROC(l4_unmap)
stmfd sp!, {lr}
ldr r12, =__l4_unmap
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_unmap)
/*
* System call that controls containers and their parameters.
* @r0 = request type, @r1 = request flags, @r2 = io buffer ptr
*/
BEGIN_PROC(l4_container_control)
stmfd sp!, {lr}
ldr r12, =__l4_container_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_container_control)
/*
* System call that gets or sets the time info structure.
* @r0 = ptr to time structure @r1 = set or get. set = 1, get = 0.
*/
BEGIN_PROC(l4_time)
stmfd sp!, {lr}
ldr r12, =__l4_time
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_time)
/*
* System call that controls thread creation, destruction and modification.
* @r0 = thread action, @r1 = &ids, @r2 = utcb address
*/
BEGIN_PROC(l4_thread_control)
stmfd sp!, {lr}
ldr r12, =__l4_thread_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_thread_control)
/*
* System call that modifies ipc blocked sender lists of receivers.
* @r0 = Action (e.g. block/unblock), @r1 = sender id, @r2 = sender tag
*/
BEGIN_PROC(l4_ipc_control)
stmfd sp!, {lr}
ldr r12, =__l4_ipc_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_ipc_control)
/*
* Manipulates address spaces, e.g. sets up shared memory areas between threads
* @r0 = operation code, @r1 = operation flags, @r2 = An id (irqnum, or capid)
*/
BEGIN_PROC(l4_irq_control)
stmfd sp!, {lr}
ldr r12, =__l4_irq_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_irq_control)
/*
* Locks/unlocks a userspace mutex.
* @r0 = mutex virtual address, @r1 = mutex operation code
*/
BEGIN_PROC(l4_mutex_control)
stmfd sp!, {lr}
ldr r12, =__l4_mutex_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_mutex_control)
/*
* Sets registers of a thread and its pager.
* @r0 = ptr to exregs_data structure, @r1 = tid of thread.
*/
BEGIN_PROC(l4_exchange_registers)
stmfd sp!, {lr}
ldr r12, =__l4_exchange_registers
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_exchange_registers)
/*
* System call that manipulates caches and tlbs.
*
* @r0 = starting virtual address (inclusive),
* @r1 = ending virtual address (exclusive),
* @r3 = cache operation
*/
BEGIN_PROC(l4_cache_control)
stmfd sp!, {lr}
ldr r12, =__l4_cache_control
mov lr, pc
ldr pc, [r12]
ldmfd sp!, {pc} @ Restore original lr and return.
END_PROC(l4_cache_control)

View File

@@ -0,0 +1,103 @@
/*
* Copyright (C) 2009-2010 B Labs Ltd.
* Author: Bahadir Balban
*/
#include L4LIB_INC_ARCH(asm.h)
#include <l4lib/mutex.h>
#include <l4lib/types.h>
#include INC_SUBARCH(irq.h)
#include L4LIB_INC_ARCH(syslib.h) /* for BUG/BUG_ON, */
/*
* NOTES:
*
* Recap on swp:
*
* swp rx, ry, [rz]
*
* In one instruction:
*
* 1) Stores the value in ry into location pointed by rz.
* 2) Loads the value in the location of rz into rx.
* By doing so, in one instruction one can attempt to lock
* a word, and discover whether it was already locked.
*
* Why use tid of thread to lock mutex instead of
* a single lock value?
*
* Because in one atomic instruction, not only the locking attempt
* should be able to indicate whether it is locked, but also
* the contentions. A unified lock value would not be sufficient.
* The only way to indicate a contended lock is to store the
* unique TID of the locker.
*/
/*
* Any non-negative value that is a potential TID
* (including 0) means mutex is locked.
*/
int __l4_mutex_lock(void *m, l4id_t tid)
{
unsigned int tmp;
__asm__ __volatile__(
"swp %0, %1, [%2]"
: "=r" (tmp)
: "r"(tid), "r" (m)
: "memory"
);
if (tmp == L4_MUTEX_UNLOCKED)
return L4_MUTEX_SUCCESS;
return L4_MUTEX_CONTENDED;
}
int __l4_mutex_unlock(void *m, l4id_t tid)
{
unsigned int tmp, tmp2 = L4_MUTEX_UNLOCKED;
__asm__ __volatile__(
"swp %0, %1, [%2]"
: "=r" (tmp)
: "r" (tmp2), "r"(m)
: "memory"
);
BUG_ON(tmp == L4_MUTEX_UNLOCKED);
if (tmp == tid)
return L4_MUTEX_SUCCESS;
return L4_MUTEX_CONTENDED;
}
u8 l4_atomic_dest_readb(unsigned long *location)
{
#if 0
unsigned int tmp;
__asm__ __volatile__ (
"swpb r0, r2, [r1] \n"
: "=r"(tmp)
: "r"(location), "r"(0)
: "memory"
);
return (u8)tmp;
#endif
unsigned int tmp;
// unsigned long state;
// irq_local_disable_save(&state);
tmp = *location;
*location = 0;
//irq_local_restore(state);
return (u8)tmp;
}

View File

@@ -0,0 +1,97 @@
/*
* Copyright (C) 2010 B Labs Ltd.
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
*/
#include <l4lib/mutex.h>
#include <l4lib/types.h>
#include L4LIB_INC_ARCH(syslib.h) /* for BUG/BUG_ON, */
#include L4LIB_INC_ARCH(asm.h)
#include INC_SUBARCH(mmu_ops.h)
int __l4_mutex_lock(void *m, l4id_t tid)
{
int tmp, ret;
loop:
__asm__ __volatile__(
"ldrex %0, [%1]\n"
: "=r"(tmp)
: "r"(m)
);
if(tmp != L4_MUTEX_UNLOCKED)
ret = L4_MUTEX_CONTENDED;
else
ret = L4_MUTEX_SUCCESS;
/* Store our 'tid' */
__asm__ __volatile__(
"strex %0, %1, [%2]\n"
:"=&r"(tmp)
:"r"(tid), "r"(m)
);
if (tmp != 0) {
/* We couldn't succeed the store, we retry */
#ifdef CONFIG_SMP
/* don't hog the CPU, sleep till an event */
__asm__ __volatile__("wfe\n");
#endif
goto loop;
}
dsb();
return ret;
}
int __l4_mutex_unlock(void *m, l4id_t tid)
{
int tmp, ret;
loop:
/* Load and see if the lock had our tid */
__asm__ __volatile__(
"ldrex %0, [%1]\n"
: "=r"(tmp)
: "r"(m)
);
if(tmp != tid)
ret = L4_MUTEX_CONTENDED;
else
ret = L4_MUTEX_SUCCESS;
/* We store unlock value '0' */
__asm__ __volatile__(
"strex %0, %1, [%2]\n"
:"=&r"(tmp)
:"rI"(L4_MUTEX_UNLOCKED), "r"(m)
);
if(tmp != 0) {
/* The store wasn't successfull, retry */
goto loop;
}
dsb();
#ifdef CONFIG_SMP
__asm__ __volatile__("sev\n");
#endif
return ret;
}
u8 l4_atomic_dest_readb(u8 *location)
{
unsigned int tmp, res;
__asm__ __volatile__ (
"1: \n"
"ldrex %0, [%2] \n"
"strex %1, %3, [%2] \n"
"teq %1, #0 \n"
"bne 1b \n"
: "=&r"(tmp), "=&r"(res)
: "r"(location), "r"(0)
: "cc", "memory"
);
return (u8)tmp;
}

View File

@@ -0,0 +1,48 @@
/*
* Copyright (C) 2009 Bahadir Balban
*/
#include <l4lib/arch/arm/asm.h>
#include <l4lib/mutex.h>
/*
* @r0 = address of mutex word
* @r1 = unique tid of current thread
*/
BEGIN_PROC(__l4_mutex_lock)
1:
ldrex r2, [r0] @ Load value
cmp r2, #L4_MUTEX_UNLOCKED @ Decide what state lock will be if we succeed in a store
movne r2, #L4_MUTEX_CONTENDED
moveq r2, #L4_MUTEX_SUCCESS
strex r3, r1, [r0] @ Store prospective lock state
cmp r3, #0 @ If not successful
@ No WFE. Whatif this were between 2 threads running on the same cpu
bne 1b @ Retry and decide again on the prospective lock state.
dsb
mov r0, r2
mov pc, lr
END_PROC(__l4_mutex_lock)
/*
* @r0 = address of mutex word
* @r1 = unique tid of current thread
*/
BEGIN_PROC(__l4_mutex_unlock)
dsb
push {r4}
mov r4, #L4_MUTEX_UNLOCKED
1:
ldrex r2, [r0]
cmp r2, r1
moveq r3, #L4_MUTEX_SUCCESS
movne r3, #L4_MUTEX_CONTENDED
strex r2, r4, [r0]
cmp r2, #0
bne 1b
mov r0, r3
pop {r4}
mov pc, lr
END_PROC(__l4_mutex_unlock)

View File

@@ -0,0 +1,45 @@
/*
* Performance monitoring
*
* Copyright (C) 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include <l4lib/perfmon.h>
#if defined (CONFIG_DEBUG_PERFMON_USER)
/*
* Resets/restarts cycle counter
*/
void perfmon_reset_start_cyccnt()
{
volatile u32 pmcctrl;
/* Disable the cycle counter register */
cp15_write_perfmon_cntenclr(1 << PMCCNTR_BIT);
/* Clear the cycle counter on ctrl register */
pmcctrl = cp15_read_perfmon_ctrl();
pmcctrl |= (1 << PMCR_C_BIT);
cp15_write_perfmon_ctrl(pmcctrl);
/* Clear overflow register */
cp15_write_perfmon_overflow(1 << PMCCNTR_BIT);
/* Enable the cycle count */
cp15_write_perfmon_cntenset(1 << PMCCNTR_BIT);
}
/*
* Reads current counter, clears and restarts it
*/
u32 perfmon_read_reset_start_cyccnt()
{
volatile u32 cyccnt = cp15_read_perfmon_cyccnt();
perfmon_reset_start_cyccnt();
return cyccnt;
}
#endif /* End of !CONFIG_DEBUG_PERFMON_USER */

View File

@@ -4,8 +4,8 @@
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
*/
#include <l4lib/kip.h>
#include <l4lib/arch/syslib.h>
#include <l4lib/arch/utcb.h>
#include L4LIB_INC_ARCH(syslib.h)
#include L4LIB_INC_ARCH(utcb.h)
#include <l4lib/ipcdefs.h>
#include <l4/macros.h>
#include INC_GLUE(memlayout.h)
@@ -24,6 +24,7 @@ __l4_container_control_t __l4_container_control = 0;
__l4_capability_control_t __l4_capability_control = 0;
__l4_time_t __l4_time = 0;
__l4_mutex_control_t __l4_mutex_control = 0;
__l4_cache_control_t __l4_cache_control = 0;
struct kip *kip;
@@ -60,5 +61,6 @@ void __l4_init(void)
(__l4_container_control_t)kip->container_control;
__l4_time = (__l4_time_t)kip->time;
__l4_mutex_control = (__l4_mutex_control_t)kip->mutex_control;
__l4_cache_control = (__l4_cache_control_t)kip->cache_control;
}

View File

@@ -3,8 +3,8 @@
*
* Copyright (C) 2009 B Labs Ltd.
*/
#include <l4lib/arch/irq.h>
#include <l4lib/arch/syscalls.h>
#include L4LIB_INC_ARCH(irq.h)
#include L4LIB_INC_ARCH(syscalls.h)
#include <l4/api/irq.h>
/*
@@ -13,7 +13,7 @@
*/
int l4_irq_wait(int slot, int irqnum)
{
int irqval = l4_atomic_dest_readb(&l4_get_utcb()->notify[slot]);
int irqval = l4_atomic_dest_readb(&(l4_get_utcb()->notify[slot]));
if (!irqval)
return l4_irq_control(IRQ_CONTROL_WAIT, 0, irqnum);

View File

@@ -0,0 +1,62 @@
/*
* This module allocates an unused address range from
* a given memory region defined as the pool range.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4lib/lib/addr.h>
#include <stdio.h>
/*
* Initializes an address pool, but uses an already
* allocated id pool for it.
*/
int address_pool_init(struct address_pool *pool,
struct id_pool *idpool,
unsigned long start, unsigned long end)
{
pool->idpool = idpool;
pool->start = start;
pool->end = end;
id_pool_init(idpool, __pfn(end - start));
return 0;
}
/*
* Allocates an id pool and initializes it
*/
int address_pool_alloc_init(struct address_pool *pool,
unsigned long start, unsigned long end,
unsigned int size)
{
if ((pool->idpool = id_pool_new_init(__pfn(end - start) )) < 0)
return (int)pool->idpool;
pool->start = start;
pool->end = end;
return 0;
}
void *address_new(struct address_pool *pool, int nitems, int size)
{
unsigned int idx;
if ((int)(idx = ids_new_contiguous(pool->idpool, nitems)) < 0)
return 0;
return (void *)(idx * size) + pool->start;
}
int address_del(struct address_pool *pool, void *addr, int nitems, int size)
{
unsigned long idx = (addr - (void *)pool->start) / size;
if (ids_del_contiguous(pool->idpool, idx, nitems) < 0) {
printf("%s: Invalid address range returned to "
"virtual address pool.\n", __FUNCTION__);
return -1;
}
return 0;
}

109
conts/libl4/src/lib/bit.c Normal file
View File

@@ -0,0 +1,109 @@
/*
* Bit manipulation functions.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4lib/lib/bit.h>
#include <stdio.h>
#include <l4/macros.h>
#include INC_GLUE(memory.h)
/* Emulation of ARM's CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}
int find_and_set_first_free_bit(u32 *word, unsigned int limit)
{
int success = 0;
int i;
for(i = 0; i < limit; i++) {
/* Find first unset bit */
if (!(word[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i))) {
/* Set it */
word[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
success = 1;
break;
}
}
/* Return bit just set */
if (success)
return i;
else
return -1;
}
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits)
{
int i = 0, first = 0, last = 0, found = 0;
/* Can't allocate more than the limit */
if (nbits > limit)
return -1;
/* This is a state machine that checks n contiguous free bits. */
while (i + nbits <= limit) {
first = i;
last = i;
while (!(word[BITWISE_GETWORD(last)] & BITWISE_GETBIT(last))) {
last++;
i++;
if (last == first + nbits) {
found = 1;
break;
}
}
if (found)
break;
i++;
}
/* If found, set the bits */
if (found) {
for (int x = first; x < first + nbits; x++)
word[BITWISE_GETWORD(x)] |= BITWISE_GETBIT(x);
return first;
} else
return -1;
}
int check_and_clear_bit(u32 *word, int bit)
{
/* Check that bit was set */
if (word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit)) {
word[BITWISE_GETWORD(bit)] &= ~BITWISE_GETBIT(bit);
return 0;
} else {
printf("Trying to clear already clear bit\n");
return -1;
}
}
int check_and_set_bit(u32 *word, int bit)
{
/* Check that bit was clear */
if (!(word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit))) {
word[BITWISE_GETWORD(bit)] |= BITWISE_GETBIT(bit);
return 0;
} else {
//printf("Trying to set already set bit\n");
return -1;
}
}
int check_and_clear_contig_bits(u32 *word, int first, int nbits)
{
for (int i = first; i < first + nbits; i++)
if (check_and_clear_bit(word, i) < 0)
return -1;
return 0;
}

View File

@@ -0,0 +1,179 @@
/*
* Capability-related userspace helpers
*
* Copyright (C) 2009 B Labs Ltd.
*/
#include <l4lib/macros.h>
#include L4LIB_INC_ARCH(syscalls.h)
#include <l4lib/lib/cap.h>
#include <stdio.h>
/* A static limit to total capabilities held by the library */
#define CAPS_TOTAL 64
static struct capability cap_array[CAPS_TOTAL];
static int total_caps = 0;
struct capability *cap_get_by_type(unsigned int cap_type)
{
for (int i = 0; i < total_caps; i++)
if (cap_type(&cap_array[i]) == cap_type)
return &cap_array[i];
return 0;
}
struct capability *cap_get_physmem(unsigned int cap_type)
{
for (int i = 0; i < total_caps; i++)
if ((cap_type(&cap_array[i]) == CAP_TYPE_MAP_PHYSMEM) &&
!cap_is_devmem(&cap_array[i])) {
return &cap_array[i];
}
return 0;
}
/*
* Read all capabilities
*/
int caps_read_all(void)
{
int err;
/* Read number of capabilities */
if ((err = l4_capability_control(CAP_CONTROL_NCAPS,
0, &total_caps)) < 0) {
printf("l4_capability_control() reading # of"
" capabilities failed.\n Could not "
"complete CAP_CONTROL_NCAPS request.\n");
return err;
}
if (total_caps > CAPS_TOTAL) {
printf("FATAL: More capabilities defined for the "
"container than the libl4 static limit. libl4 "
"limit=%d, actual = %d\n", CAPS_TOTAL, total_caps);
BUG();
}
/* Read all capabilities */
if ((err = l4_capability_control(CAP_CONTROL_READ,
0, cap_array)) < 0) {
printf("l4_capability resource_control() reading of "
"capabilities failed.\n Could not "
"complete CAP_CONTROL_READ request.\n");
return err;
}
//cap_array_print(ncaps, caparray);
return 0;
}
void __l4_capability_init(void)
{
caps_read_all();
}
void cap_dev_print(struct capability *cap)
{
switch (cap_devtype(cap)) {
case CAP_DEVTYPE_UART:
printf("Device type:\t\t\t%s%d\n", "UART", cap_devnum(cap));
break;
case CAP_DEVTYPE_TIMER:
printf("Device type:\t\t\t%s%d\n", "Timer", cap_devnum(cap));
break;
default:
return;
}
printf("Device Irq:\t\t%d\n", cap->irq);
}
void cap_print(struct capability *cap)
{
printf("Capability id:\t\t\t%d\n", cap->capid);
printf("Capability resource id:\t\t%d\n", cap->resid);
printf("Capability owner id:\t\t%d\n",cap->owner);
switch (cap_type(cap)) {
case CAP_TYPE_TCTRL:
printf("Capability type:\t\t%s\n", "Thread Control");
break;
case CAP_TYPE_EXREGS:
printf("Capability type:\t\t%s\n", "Exchange Registers");
break;
case CAP_TYPE_MAP_PHYSMEM:
if (!cap_is_devmem(cap)) {
printf("Capability type:\t\t%s\n", "Map/Physmem");
} else {
printf("Capability type:\t\t%s\n", "Map/Physmem/Device");
cap_dev_print(cap);
}
break;
case CAP_TYPE_MAP_VIRTMEM:
printf("Capability type:\t\t%s\n", "Map/Virtmem");
break;
case CAP_TYPE_IPC:
printf("Capability type:\t\t%s\n", "Ipc");
break;
case CAP_TYPE_UMUTEX:
printf("Capability type:\t\t%s\n", "Mutex");
break;
case CAP_TYPE_IRQCTRL:
printf("Capability type:\t\t%s\n", "IRQ Control");
break;
case CAP_TYPE_QUANTITY:
printf("Capability type:\t\t%s\n", "Quantitative");
break;
default:
printf("Capability type:\t\t%s\n", "Unknown");
break;
}
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
printf("Capability resource type:\t%s\n", "Thread");
break;
case CAP_RTYPE_SPACE:
printf("Capability resource type:\t%s\n", "Space");
break;
case CAP_RTYPE_CONTAINER:
printf("Capability resource type:\t%s\n", "Container");
break;
case CAP_RTYPE_THREADPOOL:
printf("Capability resource type:\t%s\n", "Thread Pool");
break;
case CAP_RTYPE_SPACEPOOL:
printf("Capability resource type:\t%s\n", "Space Pool");
break;
case CAP_RTYPE_MUTEXPOOL:
printf("Capability resource type:\t%s\n", "Mutex Pool");
break;
case CAP_RTYPE_MAPPOOL:
printf("Capability resource type:\t%s\n", "Map Pool (PMDS)");
break;
case CAP_RTYPE_CPUPOOL:
printf("Capability resource type:\t%s\n", "Cpu Pool");
break;
case CAP_RTYPE_CAPPOOL:
printf("Capability resource type:\t%s\n", "Capability Pool");
break;
default:
printf("Capability resource type:\t%s\n", "Unknown");
break;
}
printf("\n");
}
void cap_array_print(int total_caps, struct capability *caparray)
{
printf("Capabilities\n"
"~~~~~~~~~~~~\n");
for (int i = 0; i < total_caps; i++)
cap_print(&caparray[i]);
printf("\n");
}

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,93 @@
/*
* Used for thread and space ids, and also for
* utcb tracking in page-sized-chunks.
*
* Copyright (C) 2009 B Labs Ltd.
*/
#include <stdio.h>
#include <l4lib/lib/idpool.h>
#include <l4/api/errno.h>
#include <malloc/malloc.h>
void id_pool_init(struct id_pool *pool, int totalbits)
{
pool->nwords = BITWISE_GETWORD(totalbits) + 1;
pool->bitlimit = totalbits;
}
struct id_pool *id_pool_new_init(int totalbits)
{
int nwords = BITWISE_GETWORD(totalbits) + 1;
struct id_pool *new = kzalloc((nwords * SZ_WORD)
+ sizeof(struct id_pool));
if (!new)
return PTR_ERR(-ENOMEM);
new->nwords = nwords;
new->bitlimit = totalbits;
return new;
}
/* Search for a free slot up to the limit given */
int id_new(struct id_pool *pool)
{
return find_and_set_first_free_bit(pool->bitmap, pool->bitlimit);
}
/* This finds n contiguous free ids, allocates and returns the first one */
int ids_new_contiguous(struct id_pool *pool, int numids)
{
int id = find_and_set_first_free_contig_bits(pool->bitmap,
pool->bitlimit,
numids);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This deletes a list of contiguous ids given the first one and number of ids */
int ids_del_contiguous(struct id_pool *pool, int first, int numids)
{
int ret;
if (pool->nwords * WORD_BITS < first + numids)
return -1;
if ((ret = check_and_clear_contig_bits(pool->bitmap, first, numids)))
printf("%s: Error: Invalid argument range.\n", __FUNCTION__);
return ret;
}
int id_del(struct id_pool *pool, int id)
{
int ret;
if (pool->nwords * WORD_BITS < id)
return -1;
if ((ret = check_and_clear_bit(pool->bitmap, id) < 0))
printf("%s: Error: Could not delete id.\n", __FUNCTION__);
return ret;
}
/* Return a specific id, if available */
int id_get(struct id_pool *pool, int id)
{
int ret;
ret = check_and_set_bit(pool->bitmap, id);
if (ret < 0)
return ret;
else
return id;
}
int id_is_empty(struct id_pool *pool)
{
for (int i = 0; i < pool->nwords; i++)
if (pool->bitmap[i])
return 0;
return 1;
}

View File

@@ -0,0 +1,71 @@
#include <l4lib/mutex.h>
#include <l4lib/lib/thread.h>
#include <memcache/memcache.h>
/*
* Static stack and utcb for same-space threads.
* +1 is a good approximation for allocating for bitmap
* structures in the memcache.
*/
static char stack[THREADS_TOTAL * (STACK_SIZE + 1)] ALIGN(STACK_SIZE);
static char utcb[THREADS_TOTAL * (UTCB_SIZE + 1)] ALIGN(UTCB_SIZE);
struct mem_cache *utcb_cache;
struct mem_cache *stack_cache;
struct l4_thread_list l4_thread_list;
/* Number of thread structs + allowance for memcache internal data */
#define L4_THREAD_LIST_BUFFER_SIZE (THREADS_TOTAL * \
(sizeof(struct l4_thread_list)) + 256)
static char l4_thread_list_buf[L4_THREAD_LIST_BUFFER_SIZE];
void l4_thread_list_init(void)
{
struct l4_thread_list *tlist = &l4_thread_list;
/* Initialize the head struct */
memset(tlist, 0, sizeof (*tlist));
link_init(&tlist->thread_list);
l4_mutex_init(&tlist->lock);
/* Initialize a cache of l4_thread_list structs */
if (!(tlist->thread_cache =
mem_cache_init(&l4_thread_list_buf,
L4_THREAD_LIST_BUFFER_SIZE,
sizeof(struct l4_thread), 0))) {
printf("FATAL: Could not initialize internal "
"thread struct cache.\n");
BUG();
}
}
void l4_stack_alloc_init(void)
{
BUG_ON(!(stack_cache =
mem_cache_init((void *)stack, STACK_SIZE *
(THREADS_TOTAL + 1),
STACK_SIZE, STACK_SIZE)));
}
/*
* Initialize a memcache that is aligned to utcb size
*/
void l4_utcb_alloc_init(void)
{
BUG_ON(!(utcb_cache =
mem_cache_init((void *)utcb, UTCB_SIZE *
(THREADS_TOTAL + 1),
UTCB_SIZE, UTCB_SIZE)));
}
void __l4_threadlib_init(void)
{
l4_utcb_alloc_init();
l4_stack_alloc_init();
l4_thread_list_init();
l4_parent_thread_init();
}

View File

@@ -0,0 +1,286 @@
/*
* Thread creation userspace helpers
*
* Copyright (C) 2009 - 2010 B Labs Ltd.
*
* Author: Bahadir Balban
*/
#include <l4lib/lib/thread.h>
#include <l4lib/exregs.h>
#include <l4lib/mutex.h>
#include <l4/api/errno.h>
#include <l4/api/thread.h>
#include <memcache/memcache.h>
void *l4_utcb_alloc(void)
{
return mem_cache_alloc(utcb_cache);
}
void l4_utcb_free(void *utcb)
{
BUG_ON(mem_cache_free(utcb_cache, utcb) < 0);
}
void *l4_stack_alloc(void)
{
void *stack = mem_cache_alloc(stack_cache);
/* Since it will grow downwards */
stack += (unsigned long)STACK_SIZE;
return stack;
}
/*
* NOTE: may be unaligned
*/
void l4_stack_free(void *stack)
{
/* Allocation pointer was from beginning of stack */
stack -= (unsigned long)STACK_SIZE;
BUG_ON(mem_cache_free(stack_cache, stack) < 0);
}
/*
* Allocate and init a thread struct for same space
*/
struct l4_thread *l4_thread_init(struct l4_thread *thread)
{
/*
* Allocate stack and utcb
*/
if (!(thread->utcb = l4_utcb_alloc()))
return PTR_ERR(-ENOMEM);
if (!(thread->stack = l4_stack_alloc())) {
l4_utcb_free(thread->utcb);
return PTR_ERR(-ENOMEM);
}
return thread;
}
void l4_thread_free(struct l4_thread *thread)
{
struct l4_thread_list *tlist = &l4_thread_list;
/* Lock the list */
l4_mutex_lock(&tlist->lock);
/* Lock the thread */
l4_mutex_lock(&thread->lock);
/* Remove the thread from its list */
list_remove(&thread->list);
tlist->total--;
/* Unlock list */
l4_mutex_unlock(&tlist->lock);
/* Free thread's stack and utcb if they exist */
if (thread->stack)
l4_stack_free(thread->stack);
if (thread->utcb)
l4_utcb_free(thread->utcb);
/* Free the thread itself */
BUG_ON(mem_cache_free(tlist->thread_cache, thread) < 0);
}
/*
* No locking version
*/
void l4_thread_free_nolock(struct l4_thread *thread)
{
struct l4_thread_list *tlist = &l4_thread_list;
/* Free thread's stack and utcb if they exist */
if (thread->stack)
l4_stack_free(thread->stack);
if (thread->utcb)
l4_utcb_free(thread->utcb);
/* Free the thread itself */
BUG_ON(mem_cache_free(tlist->thread_cache, thread) < 0);
}
/*
* Destroys a child thread and reclaims its
* stack and utcb.
*
* NOTE: This function is to be called with caution:
* The destroyed child must be in a state that will
* not compromise the system integrity, i.e. not holding
* any locks, not in the middle of an operation.
*
* We usually don't know whether a synchronous destruction
* would cause the thread to leave structures prematurely
* (e.g. need to figure out a way of knowing if the thread
* is holding any locks, busy, has children ...)
*/
int thread_destroy(struct l4_thread *thread)
{
struct l4_thread_list *tlist = &l4_thread_list;
int err;
/* Lock the list */
l4_mutex_lock(&tlist->lock);
/* Lock the thread */
l4_mutex_lock(&thread->lock);
/* Remove the thread from its list */
list_remove(&thread->list);
tlist->total--;
/* Unlock list */
l4_mutex_unlock(&tlist->lock);
/* Destroy the thread */
if ((err = l4_thread_control(THREAD_DESTROY, &thread->ids)) < 0)
return err;
/* Reclaim l4_thread structure */
l4_thread_free_nolock(thread);
return 0;
}
struct l4_thread *l4_thread_alloc_init(void)
{
struct l4_thread_list *tlist = &l4_thread_list;
struct l4_thread *thread;
if (!(thread = mem_cache_zalloc(tlist->thread_cache)))
return PTR_ERR(-ENOMEM);
link_init(&thread->list);
l4_mutex_init(&thread->lock);
if (IS_ERR(thread = l4_thread_init(thread))) {
mem_cache_free(tlist->thread_cache, thread);
return PTR_ERR(thread);
}
list_insert(&tlist->thread_list, &thread->list);
tlist->total++;
return thread;
}
/*
* Called during initialization for setting up the
* existing runnable thread
*/
void l4_parent_thread_init(void)
{
struct l4_thread *thread;
struct exregs_data exregs;
int err;
/* Allocate structures for the first thread */
thread = l4_thread_alloc_init();
/* Free the allocated stack since its unnecessary */
l4_stack_free(thread->stack);
/* Read thread ids */
l4_getid(&thread->ids);
/* Set up utcb via exregs */
memset(&exregs, 0, sizeof(exregs));
exregs_set_utcb(&exregs, (unsigned long)thread->utcb);
if ((err = l4_exchange_registers(&exregs,
thread->ids.tid)) < 0) {
printf("FATAL: Initialization of structures for "
"currently runnable thread has failed.\n"
"exregs err=%d\n", err);
l4_thread_free(thread);
}
}
/* For threads to exit on their own without any library maintenance */
void thread_exit(int exit_code)
{
struct task_ids ids;
/* FIXME: Find this from utcb */
l4_getid(&ids);
l4_thread_control(THREAD_DESTROY | exit_code, &ids);
}
int thread_wait(struct l4_thread *thread)
{
int ret;
/* Wait for the thread to exit */
if ((ret = l4_thread_control(THREAD_WAIT, &thread->ids)) < 0)
return ret;
/* Claim its library structures */
l4_thread_free(thread);
/* Return zero or positive thread exit code */
return ret;
}
/*
* Create a new thread in the same address space as caller
*/
int thread_create(int (*func)(void *), void *args, unsigned int flags,
struct l4_thread **tptr)
{
struct exregs_data exregs;
struct l4_thread *thread;
int err;
/* Shared space only */
if (!(TC_SHARE_SPACE & flags)) {
printf("%s: Warning - This function allows only "
"shared space thread creation.\n",
__FUNCTION__);
return -EINVAL;
}
/* Allocate a thread struct */
if (IS_ERR(thread = l4_thread_alloc_init()))
return (int)thread;
/* Assign own space id since TC_SHARE_SPACE requires it */
l4_getid(&thread->ids);
/* Create thread in kernel */
if ((err = l4_thread_control(THREAD_CREATE |
flags, &thread->ids)) < 0)
goto out_err;
/* First word of new stack is arg */
thread->stack[-1] = (unsigned long)args;
/* Second word of new stack is function address */
thread->stack[-2] = (unsigned long)func;
/* Setup new thread pc, sp, utcb */
memset(&exregs, 0, sizeof(exregs));
exregs_set_stack(&exregs, (unsigned long)thread->stack);
exregs_set_utcb(&exregs, (unsigned long)thread->utcb);
exregs_set_pc(&exregs, (unsigned long)setup_new_thread);
if ((err = l4_exchange_registers(&exregs, thread->ids.tid)) < 0)
goto out_err;
/* Start the new thread, unless specified otherwise */
if (!(flags & TC_NOSTART))
if ((err = l4_thread_control(THREAD_RUN,
&thread->ids)) < 0)
goto out_err;
/* Set pointer to thread structure */
*tptr = thread;
return 0;
out_err:
l4_thread_free(thread);
return err;
}

View File

@@ -5,8 +5,8 @@
*/
#include <l4lib/mutex.h>
#include <l4lib/types.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include L4LIB_INC_ARCH(syscalls.h)
#include L4LIB_INC_ARCH(syslib.h)
/*
* NOTES: