mirror of
https://github.com/drasko/codezero.git
synced 2026-01-13 11:23:16 +01:00
Added mutex_control syscall for userspace mutexes.
- Compiles and Codezero runs as normal without touching mutex implementation - Mutex implementation needs testing. The mutex control syscall allows userspace programs to declare any virtual address as a mutex lock and ask for help from the kernel syscall for resolving locking contentions.
This commit is contained in:
@@ -78,6 +78,10 @@ typedef int (*__l4_time_t)(void *timeval, int set);
|
||||
extern __l4_time_t __l4_time;
|
||||
int l4_time(void *timeval, int set);
|
||||
|
||||
typedef int (*__l4_mutex_control_t)(void *mutex_word, int op);
|
||||
extern __l4_mutex_control_t __l4_mutex_control;
|
||||
int l4_mutex_control(void *mutex_word, int op);
|
||||
|
||||
|
||||
/* To be supplied by server tasks. */
|
||||
void *virt_to_phys(void *);
|
||||
|
||||
38
tasks/libl4/include/l4lib/mutex.h
Normal file
38
tasks/libl4/include/l4lib/mutex.h
Normal file
@@ -0,0 +1,38 @@
|
||||
|
||||
/*
|
||||
* User space locking
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Bilgehan Balban
|
||||
*/
|
||||
|
||||
#ifndef __L4_MUTEX_H__
|
||||
#define __L4_MUTEX_H__
|
||||
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
|
||||
#include <l4/api/mutex.h>
|
||||
|
||||
struct l4_mutex {
|
||||
unsigned int lock;
|
||||
} __attribute__((aligned(sizeof(int))));
|
||||
|
||||
|
||||
void l4_mutex_init(struct l4_mutex *m);
|
||||
int l4_mutex_lock(struct l4_mutex *m);
|
||||
int l4_mutex_unlock(struct l4_mutex *m);
|
||||
|
||||
#endif
|
||||
|
||||
/* Mutex return value - don't mix up with mutes state */
|
||||
#define L4_MUTEX_CONTENDED -1
|
||||
#define L4_MUTEX_SUCCESS 0
|
||||
|
||||
/* Mutex states - Any valid tid value is a locked state */
|
||||
#define L4_MUTEX_UNLOCKED -1
|
||||
#define L4_MUTEX(m) \
|
||||
struct l4_mutex m = { L4_MUTEX_UNLOCKED }
|
||||
|
||||
|
||||
|
||||
#endif /* __L4_MUTEX_H__ */
|
||||
66
tasks/libl4/src/arm/mutex.S
Normal file
66
tasks/libl4/src/arm/mutex.S
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4lib/arch/asm.h>
|
||||
#include <l4lib/mutex.h>
|
||||
|
||||
/*
|
||||
* NOTES:
|
||||
*
|
||||
* Recap on swp:
|
||||
*
|
||||
* swp rx, ry, [rz]
|
||||
*
|
||||
* In one instruction:
|
||||
*
|
||||
* 1) Stores the value in ry into location pointed by rz.
|
||||
* 2) Loads the value in the location of rz into rx.
|
||||
* By doing so, in one instruction one can attempt to lock
|
||||
* a word, and discover whether it was already locked.
|
||||
*
|
||||
* Why use tid of thread to lock mutex instead of
|
||||
* a single lock value?
|
||||
*
|
||||
* Because in one atomic instruction, not only the locking attempt
|
||||
* should be able to indicate whether it is locked, but also
|
||||
* the contentions. A unified lock value would not be sufficient.
|
||||
* The only way to indicate a contended lock is to store the
|
||||
* unique TID of the locker.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Any non-negative value that is a potential TID
|
||||
* (including 0) means mutex is locked.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @r0 = address of mutex word
|
||||
* @r1 = unique tid of current thread
|
||||
*/
|
||||
BEGIN_PROC(__l4_mutex_lock)
|
||||
swp r2, r1, [r0]
|
||||
cmp r2, #L4_MUTEX_UNLOCKED @ Was the lock available?
|
||||
movne r0, #L4_MUTEX_CONTENDED @ Indicate failure
|
||||
moveq r0, #L4_MUTEX_SUCCESS @ Indicate success
|
||||
mov pc, lr
|
||||
END_PROC(__l4_mutex_lock)
|
||||
|
||||
/*
|
||||
* @r0 = address of mutex word
|
||||
* @r1 = unique tid of current thread
|
||||
*/
|
||||
BEGIN_PROC(__l4_mutex_unlock)
|
||||
mov r3, #L4_MUTEX_UNLOCKED
|
||||
swp r2, r3, [r0]
|
||||
cmp r2, r1 @ Check lock had original tid value
|
||||
movne r0, #L4_MUTEX_CONTENDED @ Indicate contention
|
||||
movne r0, #L4_MUTEX_SUCCESS @ Indicate no contention
|
||||
cmp r2, #L4_MUTEX_UNLOCKED @ Or - was it already unlocked?
|
||||
1:
|
||||
beq 1b @ If so busy-spin to indicate bug.
|
||||
mov pc, lr
|
||||
END_PROC(__l4_mutex_unlock)
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ __l4_space_control_t __l4_space_control = 0;
|
||||
__l4_exchange_registers_t __l4_exchange_registers = 0;
|
||||
__l4_kmem_control_t __l4_kmem_control = 0;
|
||||
__l4_time_t __l4_time = 0;
|
||||
__l4_mutex_control_t __l4_mutex_control = 0;
|
||||
|
||||
struct kip *kip;
|
||||
|
||||
@@ -54,5 +55,6 @@ void __l4_init(void)
|
||||
(__l4_exchange_registers_t)kip->exchange_registers;
|
||||
__l4_kmem_control = (__l4_kmem_control_t)kip->kmem_control;
|
||||
__l4_time = (__l4_time_t)kip->time;
|
||||
__l4_mutex_control= (__l4_mutex_control_t)kip->mutex_control;
|
||||
}
|
||||
|
||||
|
||||
66
tasks/libl4/src/mutex.c
Normal file
66
tasks/libl4/src/mutex.c
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Userspace mutex implementation
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Bilgehan Balban
|
||||
*/
|
||||
#include <l4lib/mutex.h>
|
||||
#include <l4lib/types.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
|
||||
/*
|
||||
* NOTES:
|
||||
* l4_mutex_lock() locks an initialized mutex.
|
||||
* If it contends, it calls the mutex syscall.
|
||||
* l4_mutex_unlock() unlocks an acquired mutex.
|
||||
* If there was contention, mutex syscall is called
|
||||
* to resolve by the kernel.
|
||||
*
|
||||
* Internals:
|
||||
*
|
||||
* (1) The kernel creates a waitqueue for every unique
|
||||
* mutex in the system, i.e. every unique physical
|
||||
* address that is contended as a mutex. In that respect
|
||||
* virtual mutex addresses are translated to physical
|
||||
* and checked for match.
|
||||
*
|
||||
* (2) If a mutex is contended, and kernel is called by the
|
||||
* locker. The syscall simply wakes up any waiters on
|
||||
* the mutex in FIFO order and returns.
|
||||
*
|
||||
* Issues:
|
||||
* - The kernel action is to merely wake up sleepers. If
|
||||
* a new thread acquires the lock meanwhile, all those woken
|
||||
* up threads would have to sleep again.
|
||||
* - All sleepers are woken up (aka thundering herd). This
|
||||
* must be done because if a single task is woken up, there
|
||||
* is no guarantee that that would in turn wake up others.
|
||||
* It might even quit attempting to take the lock.
|
||||
* - Whether this is the best design - time will tell.
|
||||
*/
|
||||
|
||||
extern int __l4_mutex_lock(void *word, l4id_t tid);
|
||||
extern int __l4_mutex_unlock(void *word, l4id_t tid);
|
||||
|
||||
void l4_mutex_init(struct l4_mutex *m)
|
||||
{
|
||||
m->lock = L4_MUTEX_UNLOCKED;
|
||||
}
|
||||
|
||||
int l4_mutex_lock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
|
||||
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED)
|
||||
l4_mutex_control(&m->lock, L4_MUTEX_LOCK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int l4_mutex_unlock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
|
||||
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED)
|
||||
l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user