Added mutex_control syscall for userspace mutexes.

- Compiles and Codezero runs as normal without touching mutex implementation
- Mutex implementation needs testing.

The mutex control syscall allows userspace programs to declare any virtual
address as a mutex lock and ask for help from the kernel syscall
for resolving locking contentions.
This commit is contained in:
Bahadir Balban
2009-05-29 15:34:04 +03:00
parent ab9e036cb7
commit b11d4c4607
16 changed files with 433 additions and 14 deletions

View File

@@ -62,11 +62,11 @@ struct kip {
u32 thread_switch;
u32 schedule;
u32 getid;
u32 mutex_control;
u32 arch_syscall0;
u32 arch_syscall1;
u32 arch_syscall2;
u32 arch_syscall3;
u32 utcb;

14
include/l4/api/mutex.h Normal file
View File

@@ -0,0 +1,14 @@
#ifndef __MUTEX_CONTROL_H__
#define __MUTEX_CONTROL_H__
/* Request ids for mutex_control syscall */
#if defined (__KERNEL__)
#define MUTEX_CONTROL_LOCK L4_MUTEX_LOCK
#define MUTEX_CONTROL_UNLOCK L4_MUTEX_UNLOCK
#endif
#define L4_MUTEX_LOCK 0
#define L4_MUTEX_UNLOCK 1
#endif /* __MUTEX_CONTROL_H__*/

View File

@@ -23,7 +23,8 @@
#define sys_kread_offset 0x28
#define sys_kmem_control_offset 0x2C
#define sys_time_offset 0x30
#define syscalls_end_offset sys_time_offset
#define sys_mutex_control_offset 0x34
#define syscalls_end_offset sys_mutex_control_offset
#define SYSCALLS_TOTAL ((syscalls_end_offset >> 2) + 1)
void print_syscall_context(struct ktcb *t);
@@ -41,5 +42,6 @@ int sys_getid(struct syscall_context *);
int sys_kread(struct syscall_context *);
int sys_kmem_control(struct syscall_context *);
int sys_time(struct syscall_context *);
int sys_mutex_control(struct syscall_context *);
#endif /* __SYSCALL_H__ */

View File

@@ -65,9 +65,12 @@
#define virt_to_phys(addr) ((unsigned int)(addr) - KERNEL_OFFSET)
#endif
#define PAGER_ADDR(x) ((x >= INITTASK_AREA_START) && (x < INITTASK_AREA_END))
#define KERN_ADDR(x) ((x >= KERNEL_AREA_START) && (x < KERNEL_AREA_END))
#define USER_ADDR(x) ((x >= USER_AREA_START) && (x < USER_AREA_END))
#define UTCB_ADDR(x) ((x >= UTCB_AREA_START) && (x < UTCB_AREA_END))
#define SHM_ADDR(x) ((x >= SHM_AREA_START) && (x < SHM_AREA_END))
#define USER_ADDR(x) (((x >= USER_AREA_START) && (x < USER_AREA_END)) || \
UTCB_ADDR(x) || SHM_ADDR(x) || PAGER_ADDR(x))
#define PRIVILEGED_ADDR(x) (KERN_ADDR(x) || (x >= ARM_HIGH_VECTOR) || \
(x >= IO_AREA_START && x < IO_AREA_END))

View File

@@ -10,6 +10,8 @@ struct waitqueue {
struct ktcb *task;
};
#define WAKEUP_ASYNC 0
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0),
WAKEUP_SYNC = (1 << 1)
@@ -74,5 +76,7 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags);
int wake_up_task(struct ktcb *task, unsigned int flags);
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
int wait_on(struct waitqueue_head *wqh);
#endif /* __LIB_WAIT_H__ */

View File

@@ -3,7 +3,7 @@ Import('env')
Import('config_symbols')
# The set of source files associated with this SConscript file.
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'space.c']
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'space.c', 'mutex.c']
obj = env.Object(src_local)

213
src/api/mutex.c Normal file
View File

@@ -0,0 +1,213 @@
/*
* Userspace mutex implementation
*
* Copyright (C) 2009 Bahadir Bilgehan Balban
*/
#include <l4/lib/wait.h>
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/kmalloc.h>
#include <l4/generic/tcb.h>
#include <l4/api/kip.h>
#include <l4/api/errno.h>
#include <l4/api/mutex.h>
#include INC_API(syscall.h)
#include INC_ARCH(exception.h)
#include INC_GLUE(memory.h)
struct mutex_queue {
unsigned long physical;
struct list_head list;
struct waitqueue_head wqh;
};
struct mutex_queue_head {
struct list_head list;
int count;
} mutex_queue_head;
/*
* Lock for mutex_queue create/deletion and also list add/removal.
* Both operations are done jointly so a single lock is enough.
*/
struct mutex mutex_control_mutex;
void mutex_queue_head_lock()
{
mutex_lock(&mutex_control_mutex);
}
void mutex_queue_head_unlock()
{
mutex_unlock(&mutex_control_mutex);
}
void mutex_queue_init(struct mutex_queue *mq, unsigned long physical)
{
/* This is the unique key that describes this mutex */
mq->physical = physical;
INIT_LIST_HEAD(&mq->list);
waitqueue_head_init(&mq->wqh);
}
void mutex_control_add(struct mutex_queue *mq)
{
BUG_ON(!list_empty(&mq->list));
list_add(&mq->list, &mutex_queue_head.list);
mutex_queue_head.count++;
}
void mutex_control_remove(struct mutex_queue *mq)
{
list_del_init(&mq->list);
mutex_queue_head.count--;
}
/* Note, this has ptr/negative error returns instead of ptr/zero. */
struct mutex_queue *mutex_control_find(unsigned long mutex_physical)
{
struct mutex_queue *mutex_queue;
/* Find the mutex queue with this key */
list_for_each_entry(mutex_queue, &mutex_queue_head.list, list)
if (mutex_queue->physical == mutex_physical)
return mutex_queue;
return 0;
}
struct mutex_queue *mutex_control_create(unsigned long mutex_physical)
{
struct mutex_queue *mutex_queue;
/* Allocate the mutex queue structure */
if (!(mutex_queue = kzalloc(sizeof(struct mutex_queue))))
return 0;
/* Init and return */
mutex_queue_init(mutex_queue, mutex_physical);
return mutex_queue;
}
void mutex_control_delete(struct mutex_queue *mq)
{
BUG_ON(!list_empty(&mq->list));
/* Test internals of waitqueue */
BUG_ON(&mq->wqh.sleepers);
BUG_ON(!list_empty(&mq->wqh.task_list));
kfree(mq);
}
/*
* A contended thread is expected to show up with the
* contended mutex address here.
*
* (1) The mutex is converted into its physical form and
* searched for in the existing mutex list. If it does not
* appear there, it gets added.
* (2) The thread is put to sleep in the mutex wait queue
* until a wake up event occurs.
*/
int mutex_control_lock(unsigned long mutex_address)
{
struct mutex_queue *mutex_queue;
mutex_queue_head_lock();
/* Search for the mutex queue */
if (!(mutex_queue = mutex_control_find(mutex_address))) {
/* Create a new one */
if (!(mutex_queue = mutex_control_create(mutex_address))) {
mutex_queue_head_unlock();
return -ENOMEM;
}
/* Add the queue to mutex queue list */
mutex_control_add(mutex_queue);
}
mutex_queue_head_unlock();
/* Now sleep on the queue */
wait_on(&mutex_queue->wqh);
return 0;
}
/*
* A thread that has detected a contention on a mutex that
* it had locked but has just released is expected to show up with
* that mutex here.
*
* (1) The mutex is converted into its physical form and
* searched for in the existing mutex list. If not found,
* the call returns an error.
* (2) All the threads waiting on this mutex are woken up. This may
* cause a thundering herd, but user threads cannot be trusted
* to acquire the mutex, waking up all of them increases the
* chances that some thread may acquire it.
*/
int mutex_control_unlock(unsigned long mutex_address)
{
struct mutex_queue *mutex_queue;
mutex_queue_head_lock();
/* Search for the mutex queue */
if (!(mutex_queue = mutex_control_find(mutex_address))) {
mutex_queue_head_unlock();
/* No such mutex */
return -ESRCH;
}
/* Found it, now wake all waiters up in FIFO order */
wake_up_all(&mutex_queue->wqh, WAKEUP_ASYNC);
/* Since noone is left, delete the mutex queue */
mutex_control_remove(mutex_queue);
mutex_control_delete(mutex_queue);
/* Release lock and return */
mutex_queue_head_unlock();
return 0;
}
int sys_mutex_control(syscall_context_t *regs)
{
unsigned long mutex_address = (unsigned long)regs->r0;
int mutex_op = (int)regs->r1;
unsigned long mutex_physical;
int ret = 0;
/* Check valid operation */
if (mutex_op != MUTEX_CONTROL_LOCK &&
mutex_op != MUTEX_CONTROL_UNLOCK)
return -EINVAL;
/* Check valid user virtual address */
if (!USER_ADDR(mutex_address))
return -EINVAL;
/* Find and check physical address for virtual mutex address */
if (!(mutex_physical =
virt_to_phys_by_pgd(mutex_address,
TASK_PGD(current))))
return -EINVAL;
switch (mutex_op) {
case MUTEX_CONTROL_LOCK:
ret = mutex_control_lock(mutex_physical);
break;
case MUTEX_CONTROL_UNLOCK:
ret = mutex_control_unlock(mutex_physical);
break;
}
return ret;
}

View File

@@ -178,6 +178,17 @@ pte_t virt_to_pte(unsigned long virtual)
return virt_to_pte_from_pgd(virtual, TASK_PGD(current));
}
unsigned long virt_to_phys_by_pgd(unsigned long vaddr, pgd_table_t *pgd)
{
pte_t pte = virt_to_pte_from_pgd(vaddr, pgd);
return pte & ~PAGE_MASK;
}
unsigned long virt_to_phys_by_task(unsigned long vaddr, struct ktcb *task)
{
return virt_to_phys_by_pgd(vaddr, TASK_PGD(task));
}
void attach_pmd(pgd_table_t *pgd, pmd_table_t *pmd, unsigned int vaddr)
{
u32 pgd_i = PGD_INDEX(vaddr);
@@ -268,12 +279,6 @@ int check_mapping_pgd(unsigned long vaddr, unsigned long size,
return 1;
}
unsigned long virt_to_phys_by_pgd(unsigned long vaddr, pgd_table_t *pgd)
{
pte_t pte = virt_to_pte_from_pgd(vaddr, pgd);
return pte & ~PAGE_MASK;
}
int check_mapping(unsigned long vaddr, unsigned long size,
unsigned int flags)
{

View File

@@ -21,7 +21,7 @@ struct address_space_list {
/* Lock for list add/removal */
struct spinlock list_lock;
/* To manage refcounting of *all* spaces in the list */
/* Used when delete/creating spaces */
struct mutex ref_lock;
int count;
};

View File

@@ -29,6 +29,7 @@ void kip_init_syscalls(void)
kip.getid = ARM_SYSCALL_PAGE + sys_getid_offset;
kip.kmem_control = ARM_SYSCALL_PAGE + sys_kmem_control_offset;
kip.time = ARM_SYSCALL_PAGE + sys_time_offset;
kip.mutex_control = ARM_SYSCALL_PAGE + sys_mutex_control_offset;
}
/* Jump table for all system calls. */
@@ -53,6 +54,7 @@ void syscall_init()
syscall_table[sys_kread_offset >> 2] = (syscall_fn_t)sys_kread;
syscall_table[sys_kmem_control_offset >> 2] = (syscall_fn_t)sys_kmem_control;
syscall_table[sys_time_offset >> 2] = (syscall_fn_t)sys_time;
syscall_table[sys_mutex_control_offset >> 2] = (syscall_fn_t)sys_mutex_control;
add_mapping(virt_to_phys(&__syscall_page_start),
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS);

View File

@@ -78,6 +78,10 @@ typedef int (*__l4_time_t)(void *timeval, int set);
extern __l4_time_t __l4_time;
int l4_time(void *timeval, int set);
typedef int (*__l4_mutex_control_t)(void *mutex_word, int op);
extern __l4_mutex_control_t __l4_mutex_control;
int l4_mutex_control(void *mutex_word, int op);
/* To be supplied by server tasks. */
void *virt_to_phys(void *);

View File

@@ -0,0 +1,38 @@
/*
* User space locking
*
* Copyright (C) 2009 Bahadir Bilgehan Balban
*/
#ifndef __L4_MUTEX_H__
#define __L4_MUTEX_H__
#if !defined(__ASSEMBLY__)
#include <l4/api/mutex.h>
struct l4_mutex {
unsigned int lock;
} __attribute__((aligned(sizeof(int))));
void l4_mutex_init(struct l4_mutex *m);
int l4_mutex_lock(struct l4_mutex *m);
int l4_mutex_unlock(struct l4_mutex *m);
#endif
/* Mutex return value - don't mix up with mutes state */
#define L4_MUTEX_CONTENDED -1
#define L4_MUTEX_SUCCESS 0
/* Mutex states - Any valid tid value is a locked state */
#define L4_MUTEX_UNLOCKED -1
#define L4_MUTEX(m) \
struct l4_mutex m = { L4_MUTEX_UNLOCKED }
#endif /* __L4_MUTEX_H__ */

View File

@@ -0,0 +1,66 @@
/*
* Copyright (C) 2009 Bahadir Balban
*/
#include <l4lib/arch/asm.h>
#include <l4lib/mutex.h>
/*
* NOTES:
*
* Recap on swp:
*
* swp rx, ry, [rz]
*
* In one instruction:
*
* 1) Stores the value in ry into location pointed by rz.
* 2) Loads the value in the location of rz into rx.
* By doing so, in one instruction one can attempt to lock
* a word, and discover whether it was already locked.
*
* Why use tid of thread to lock mutex instead of
* a single lock value?
*
* Because in one atomic instruction, not only the locking attempt
* should be able to indicate whether it is locked, but also
* the contentions. A unified lock value would not be sufficient.
* The only way to indicate a contended lock is to store the
* unique TID of the locker.
*/
/*
* Any non-negative value that is a potential TID
* (including 0) means mutex is locked.
*/
/*
* @r0 = address of mutex word
* @r1 = unique tid of current thread
*/
BEGIN_PROC(__l4_mutex_lock)
swp r2, r1, [r0]
cmp r2, #L4_MUTEX_UNLOCKED @ Was the lock available?
movne r0, #L4_MUTEX_CONTENDED @ Indicate failure
moveq r0, #L4_MUTEX_SUCCESS @ Indicate success
mov pc, lr
END_PROC(__l4_mutex_lock)
/*
* @r0 = address of mutex word
* @r1 = unique tid of current thread
*/
BEGIN_PROC(__l4_mutex_unlock)
mov r3, #L4_MUTEX_UNLOCKED
swp r2, r3, [r0]
cmp r2, r1 @ Check lock had original tid value
movne r0, #L4_MUTEX_CONTENDED @ Indicate contention
movne r0, #L4_MUTEX_SUCCESS @ Indicate no contention
cmp r2, #L4_MUTEX_UNLOCKED @ Or - was it already unlocked?
1:
beq 1b @ If so busy-spin to indicate bug.
mov pc, lr
END_PROC(__l4_mutex_unlock)

View File

@@ -23,6 +23,7 @@ __l4_space_control_t __l4_space_control = 0;
__l4_exchange_registers_t __l4_exchange_registers = 0;
__l4_kmem_control_t __l4_kmem_control = 0;
__l4_time_t __l4_time = 0;
__l4_mutex_control_t __l4_mutex_control = 0;
struct kip *kip;
@@ -54,5 +55,6 @@ void __l4_init(void)
(__l4_exchange_registers_t)kip->exchange_registers;
__l4_kmem_control = (__l4_kmem_control_t)kip->kmem_control;
__l4_time = (__l4_time_t)kip->time;
__l4_mutex_control= (__l4_mutex_control_t)kip->mutex_control;
}

66
tasks/libl4/src/mutex.c Normal file
View File

@@ -0,0 +1,66 @@
/*
* Userspace mutex implementation
*
* Copyright (C) 2009 Bahadir Bilgehan Balban
*/
#include <l4lib/mutex.h>
#include <l4lib/types.h>
#include <l4lib/arch/syscalls.h>
/*
* NOTES:
* l4_mutex_lock() locks an initialized mutex.
* If it contends, it calls the mutex syscall.
* l4_mutex_unlock() unlocks an acquired mutex.
* If there was contention, mutex syscall is called
* to resolve by the kernel.
*
* Internals:
*
* (1) The kernel creates a waitqueue for every unique
* mutex in the system, i.e. every unique physical
* address that is contended as a mutex. In that respect
* virtual mutex addresses are translated to physical
* and checked for match.
*
* (2) If a mutex is contended, and kernel is called by the
* locker. The syscall simply wakes up any waiters on
* the mutex in FIFO order and returns.
*
* Issues:
* - The kernel action is to merely wake up sleepers. If
* a new thread acquires the lock meanwhile, all those woken
* up threads would have to sleep again.
* - All sleepers are woken up (aka thundering herd). This
* must be done because if a single task is woken up, there
* is no guarantee that that would in turn wake up others.
* It might even quit attempting to take the lock.
* - Whether this is the best design - time will tell.
*/
extern int __l4_mutex_lock(void *word, l4id_t tid);
extern int __l4_mutex_unlock(void *word, l4id_t tid);
void l4_mutex_init(struct l4_mutex *m)
{
m->lock = L4_MUTEX_UNLOCKED;
}
int l4_mutex_lock(struct l4_mutex *m)
{
l4id_t tid = self_tid();
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED)
l4_mutex_control(&m->lock, L4_MUTEX_LOCK);
return 0;
}
int l4_mutex_unlock(struct l4_mutex *m)
{
l4id_t tid = self_tid();
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED)
l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK);
return 0;
}

View File

@@ -1,7 +1,7 @@
cd build
#arm-none-eabi-insight &
/opt/qemu/bin/qemu-system-arm -s -kernel final.axf -serial stdio -m 128 -M versatilepb &
#arm-none-linux-gnueabi-insight ; pkill qemu-system-arm
/opt/qemu/bin/qemu-system-arm -s -kernel final.axf -nographic -m 128 -M versatilepb &
arm-none-linux-gnueabi-insight ; pkill qemu-system-arm
#arm-none-eabi-gdb ; pkill qemu-system-arm
cd ..