mirror of
https://github.com/drasko/codezero.git
synced 2026-01-17 13:23:16 +01:00
- Fixed a wrong instruction in mutex.S user library - Added support for blocking lock/unlock - Divided waiting into wait_on_prepare and wait_on_prepared_wait so that mutex_control lock is released after getting in the waitqueue. - Declaring waitqueue on the stack should be done outside wait_on_prepare Issues: - Tests can be simplified for atomic data access instead of producer/consumer. - kmalloc variable sized memory caches are not freed properly. Currently only the last slot can be freed, occupied correctly. it should be done in any slot, i.e. 1, 2, 3, 4 instead of just 5. - Need to add a mutex to kmalloc.
78 lines
2.1 KiB
C
78 lines
2.1 KiB
C
/*
|
|
* Userspace mutex implementation
|
|
*
|
|
* Copyright (C) 2009 Bahadir Bilgehan Balban
|
|
*/
|
|
#include <l4lib/mutex.h>
|
|
#include <l4lib/types.h>
|
|
#include <l4lib/arch/syscalls.h>
|
|
#include <l4lib/arch/syslib.h>
|
|
|
|
/*
|
|
* NOTES:
|
|
* l4_mutex_lock() locks an initialized mutex.
|
|
* If it contends, it calls the mutex syscall.
|
|
* l4_mutex_unlock() unlocks an acquired mutex.
|
|
* If there was contention, mutex syscall is called
|
|
* to resolve by the kernel.
|
|
*
|
|
* Internals:
|
|
*
|
|
* (1) The kernel creates a waitqueue for every unique
|
|
* mutex in the system, i.e. every unique physical
|
|
* address that is contended as a mutex. In that respect
|
|
* virtual mutex addresses are translated to physical
|
|
* and checked for match.
|
|
*
|
|
* (2) If a mutex is contended, and kernel is called by the
|
|
* locker. The syscall simply wakes up any waiters on
|
|
* the mutex in FIFO order and returns.
|
|
*
|
|
* Issues:
|
|
* - The kernel action is to merely wake up sleepers. If
|
|
* a new thread acquires the lock meanwhile, all those woken
|
|
* up threads would have to sleep again.
|
|
* - All sleepers are woken up (aka thundering herd). This
|
|
* must be done because if a single task is woken up, there
|
|
* is no guarantee that that would in turn wake up others.
|
|
* It might even quit attempting to take the lock.
|
|
* - Whether this is the best design - time will tell.
|
|
*/
|
|
|
|
extern int __l4_mutex_lock(void *word, l4id_t tid);
|
|
extern int __l4_mutex_unlock(void *word, l4id_t tid);
|
|
|
|
void l4_mutex_init(struct l4_mutex *m)
|
|
{
|
|
m->lock = L4_MUTEX_UNLOCKED;
|
|
}
|
|
|
|
int l4_mutex_lock(struct l4_mutex *m)
|
|
{
|
|
l4id_t tid = self_tid();
|
|
int err;
|
|
|
|
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED) {
|
|
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_LOCK)) < 0) {
|
|
printf("%s: Error: %d\n", __FUNCTION__, err);
|
|
return err;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int l4_mutex_unlock(struct l4_mutex *m)
|
|
{
|
|
l4id_t tid = self_tid();
|
|
int err;
|
|
|
|
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED) {
|
|
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK)) < 0) {
|
|
printf("%s: Error: %d\n", __FUNCTION__, err);
|
|
return err;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|