mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Mutex implementation almost working.
- Fixed a wrong instruction in mutex.S user library - Added support for blocking lock/unlock - Divided waiting into wait_on_prepare and wait_on_prepared_wait so that mutex_control lock is released after getting in the waitqueue. - Declaring waitqueue on the stack should be done outside wait_on_prepare Issues: - Tests can be simplified for atomic data access instead of producer/consumer. - kmalloc variable sized memory caches are not freed properly. Currently only the last slot can be freed, occupied correctly. it should be done in any slot, i.e. 1, 2, 3, 4 instead of just 5. - Need to add a mutex to kmalloc.
This commit is contained in:
@@ -77,6 +77,7 @@ int wake_up_task(struct ktcb *task, unsigned int flags);
|
||||
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags);
|
||||
|
||||
int wait_on(struct waitqueue_head *wqh);
|
||||
|
||||
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq);
|
||||
int wait_on_prepared_wait(void);
|
||||
#endif /* __LIB_WAIT_H__ */
|
||||
|
||||
|
||||
@@ -19,15 +19,21 @@
|
||||
struct mutex_queue {
|
||||
unsigned long physical;
|
||||
struct list_head list;
|
||||
struct waitqueue_head wqh;
|
||||
struct waitqueue_head wqh_waiters;
|
||||
struct waitqueue_head wqh_wakers;
|
||||
};
|
||||
|
||||
struct mutex_queue_head {
|
||||
struct list_head list;
|
||||
|
||||
/*
|
||||
* Lock for mutex_queue create/deletion and also list add/removal.
|
||||
* Both operations are done jointly so a single lock is enough.
|
||||
* Single lock for:
|
||||
* (1) Mutex_queue create/deletion
|
||||
* (2) List add/removal.
|
||||
* (3) Inspection of waitqueues:
|
||||
* - Both waitqueue spinlocks need to be acquired for
|
||||
* rendezvous inspection to occur atomically. Currently
|
||||
* it's not done since we rely on this mutex for that.
|
||||
*/
|
||||
struct mutex mutex_control_mutex;
|
||||
int count;
|
||||
@@ -56,7 +62,8 @@ void mutex_queue_init(struct mutex_queue *mq, unsigned long physical)
|
||||
mq->physical = physical;
|
||||
|
||||
INIT_LIST_HEAD(&mq->list);
|
||||
waitqueue_head_init(&mq->wqh);
|
||||
waitqueue_head_init(&mq->wqh_wakers);
|
||||
waitqueue_head_init(&mq->wqh_waiters);
|
||||
}
|
||||
|
||||
void mutex_control_add(struct mutex_queue *mq)
|
||||
@@ -105,8 +112,10 @@ void mutex_control_delete(struct mutex_queue *mq)
|
||||
BUG_ON(!list_empty(&mq->list));
|
||||
|
||||
/* Test internals of waitqueue */
|
||||
BUG_ON(&mq->wqh.sleepers);
|
||||
BUG_ON(!list_empty(&mq->wqh.task_list));
|
||||
BUG_ON(mq->wqh_waiters.sleepers);
|
||||
BUG_ON(mq->wqh_wakers.sleepers);
|
||||
BUG_ON(!list_empty(&mq->wqh_waiters.task_list));
|
||||
BUG_ON(!list_empty(&mq->wqh_wakers.task_list));
|
||||
|
||||
kfree(mq);
|
||||
}
|
||||
@@ -119,7 +128,8 @@ void mutex_control_delete(struct mutex_queue *mq)
|
||||
* searched for in the existing mutex list. If it does not
|
||||
* appear there, it gets added.
|
||||
* (2) The thread is put to sleep in the mutex wait queue
|
||||
* until a wake up event occurs.
|
||||
* until a wake up event occurs. If there is already an asleep
|
||||
* waker (i.e. unlocker) that is woken up and we return.
|
||||
*/
|
||||
int mutex_control_lock(unsigned long mutex_address)
|
||||
{
|
||||
@@ -136,13 +146,36 @@ int mutex_control_lock(unsigned long mutex_address)
|
||||
}
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mutex_queue);
|
||||
} else {
|
||||
/* See if there is an unlocker */
|
||||
if (mutex_queue->wqh_wakers.sleepers) {
|
||||
/*
|
||||
* If yes, wake it up async and we can *hope*
|
||||
* to acquire the lock before the waker
|
||||
*/
|
||||
wake_up(&mutex_queue->wqh_wakers, WAKEUP_ASYNC);
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare to wait on the waiters queue */
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
wait_on_prepare(&mutex_queue->wqh_waiters, &wq);
|
||||
|
||||
/* Release lock */
|
||||
mutex_queue_head_unlock();
|
||||
|
||||
/* Now sleep on the queue */
|
||||
wait_on(&mutex_queue->wqh);
|
||||
/* Initiate prepared wait */
|
||||
return wait_on_prepared_wait();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -152,7 +185,7 @@ int mutex_control_lock(unsigned long mutex_address)
|
||||
*
|
||||
* (1) The mutex is converted into its physical form and
|
||||
* searched for in the existing mutex list. If not found,
|
||||
* the call returns an error.
|
||||
* a new one is created and the thread sleeps there as a waker.
|
||||
* (2) All the threads waiting on this mutex are woken up. This may
|
||||
* cause a thundering herd, but user threads cannot be trusted
|
||||
* to acquire the mutex, waking up all of them increases the
|
||||
@@ -167,11 +200,32 @@ int mutex_control_unlock(unsigned long mutex_address)
|
||||
/* Search for the mutex queue */
|
||||
if (!(mutex_queue = mutex_control_find(mutex_address))) {
|
||||
mutex_queue_head_unlock();
|
||||
/* No such mutex */
|
||||
return -ESRCH;
|
||||
|
||||
/* No such mutex, create one and sleep on it */
|
||||
if (!(mutex_queue = mutex_control_create(mutex_address))) {
|
||||
mutex_queue_head_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mutex_queue);
|
||||
|
||||
/* Prepare to wait on the wakers queue */
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
wait_on_prepare(&mutex_queue->wqh_wakers, &wq);
|
||||
|
||||
/* Release lock */
|
||||
mutex_queue_head_unlock();
|
||||
|
||||
/* Initiate prepared wait */
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
/* Found it, now wake all waiters up in FIFO order */
|
||||
wake_up_all(&mutex_queue->wqh, WAKEUP_ASYNC);
|
||||
|
||||
/*
|
||||
* Found it, if it exists, there are waiters,
|
||||
* now wake all of them up in FIFO order
|
||||
*/
|
||||
wake_up(&mutex_queue->wqh_waiters, WAKEUP_ASYNC);
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mutex_queue);
|
||||
@@ -191,12 +245,16 @@ int sys_mutex_control(syscall_context_t *regs)
|
||||
|
||||
/* Check valid operation */
|
||||
if (mutex_op != MUTEX_CONTROL_LOCK &&
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK)
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK) {
|
||||
printk("Invalid args to %s.\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check valid user virtual address */
|
||||
if (!USER_ADDR(mutex_address))
|
||||
if (!USER_ADDR(mutex_address)) {
|
||||
printk("Invalid args to %s.\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Find and check physical address for virtual mutex address */
|
||||
if (!(mutex_physical =
|
||||
@@ -206,11 +264,16 @@ int sys_mutex_control(syscall_context_t *regs)
|
||||
|
||||
switch (mutex_op) {
|
||||
case MUTEX_CONTROL_LOCK:
|
||||
printk("(%d): MUTEX_LOCK\n", current->tid);
|
||||
ret = mutex_control_lock(mutex_physical);
|
||||
break;
|
||||
case MUTEX_CONTROL_UNLOCK:
|
||||
printk("(%d): MUTEX_UNLOCK\n", current->tid);
|
||||
ret = mutex_control_unlock(mutex_physical);
|
||||
break;
|
||||
default:
|
||||
printk("%s: Invalid operands\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
struct kmalloc_mempool {
|
||||
int total;
|
||||
struct list_head pool_head[KMALLOC_POOLS_MAX];
|
||||
struct mutex kmalloc_mutex;
|
||||
};
|
||||
struct kmalloc_mempool km_pool;
|
||||
|
||||
@@ -21,6 +22,7 @@ void init_kmalloc()
|
||||
{
|
||||
for (int i = 0; i < KMALLOC_POOLS_MAX; i++)
|
||||
INIT_LIST_HEAD(&km_pool.pool_head[i]);
|
||||
mutex_init(&km_pool.kmalloc_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -69,6 +71,7 @@ void *kmalloc(int size)
|
||||
BUG_ON(size >= PAGE_SIZE);
|
||||
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
|
||||
size, 0)));
|
||||
printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
|
||||
list_add(&cache->list, &km_pool.pool_head[index]);
|
||||
return mem_cache_alloc(cache);
|
||||
}
|
||||
@@ -87,7 +90,12 @@ int kfree(void *p)
|
||||
if (mem_cache_is_empty(cache)) {
|
||||
list_del(&cache->list);
|
||||
free_page(cache);
|
||||
/* Total remains the same. */
|
||||
/*
|
||||
* Total remains the same unless all
|
||||
* caches are freed on that pool
|
||||
*/
|
||||
if (list_empty(&km_pool.pool_head[i]))
|
||||
km_pool.total--;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -35,14 +35,51 @@ void task_unset_wqh(struct ktcb *task)
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Initiate wait on current task that
|
||||
* has already been placed in a waitqueue
|
||||
*/
|
||||
int wait_on_prepared_wait(void)
|
||||
{
|
||||
/* Simply scheduling should initate wait */
|
||||
schedule();
|
||||
|
||||
/* Did we wake up normally or get interrupted */
|
||||
if (current->flags & TASK_INTERRUPTED) {
|
||||
current->flags &= ~TASK_INTERRUPTED;
|
||||
return -EINTR;
|
||||
}
|
||||
/* No errors */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do all preparations to sleep but return without sleeping.
|
||||
* This is useful if the task needs to get in the waitqueue before
|
||||
* it releases a lock.
|
||||
*/
|
||||
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq)
|
||||
{
|
||||
spin_lock(&wqh->slock);
|
||||
wqh->sleepers++;
|
||||
list_add_tail(&wq->task_list, &wqh->task_list);
|
||||
task_set_wqh(current, wqh, wq);
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) waiting on wqh at: 0x%p\n",
|
||||
current->tid, wqh);
|
||||
spin_unlock(&wqh->slock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Sleep without any condition */
|
||||
int wait_on(struct waitqueue_head *wqh)
|
||||
{
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
spin_lock(&wqh->slock);
|
||||
task_set_wqh(current, wqh, &wq);
|
||||
wqh->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqh->task_list);
|
||||
task_set_wqh(current, wqh, &wq);
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) waiting on wqh at: 0x%p\n",
|
||||
current->tid, wqh);
|
||||
@@ -95,10 +132,10 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
|
||||
struct waitqueue,
|
||||
task_list);
|
||||
struct ktcb *sleeper = wq->task;
|
||||
task_unset_wqh(sleeper);
|
||||
BUG_ON(list_empty(&wqh->task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
wqh->sleepers--;
|
||||
task_unset_wqh(sleeper);
|
||||
if (flags & WAKEUP_INTERRUPT)
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
|
||||
@@ -56,7 +56,7 @@ BEGIN_PROC(__l4_mutex_unlock)
|
||||
swp r2, r3, [r0]
|
||||
cmp r2, r1 @ Check lock had original tid value
|
||||
movne r0, #L4_MUTEX_CONTENDED @ Indicate contention
|
||||
movne r0, #L4_MUTEX_SUCCESS @ Indicate no contention
|
||||
moveq r0, #L4_MUTEX_SUCCESS @ Indicate no contention
|
||||
cmp r2, #L4_MUTEX_UNLOCKED @ Or - was it already unlocked?
|
||||
1:
|
||||
beq 1b @ If so busy-spin to indicate bug.
|
||||
|
||||
@@ -50,18 +50,28 @@ void l4_mutex_init(struct l4_mutex *m)
|
||||
int l4_mutex_lock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
int err;
|
||||
|
||||
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED)
|
||||
l4_mutex_control(&m->lock, L4_MUTEX_LOCK);
|
||||
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED) {
|
||||
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_LOCK)) < 0) {
|
||||
printf("%s: Error: %d\n", __FUNCTION__, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int l4_mutex_unlock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
int err;
|
||||
|
||||
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED)
|
||||
l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK);
|
||||
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED) {
|
||||
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK)) < 0) {
|
||||
printf("%s: Error: %d\n", __FUNCTION__, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -52,9 +52,9 @@ void main(void)
|
||||
ipc_full_test();
|
||||
ipc_extended_test();
|
||||
}
|
||||
// if (parent_of_all == getpid()) {
|
||||
// user_mutex_test();
|
||||
// }
|
||||
if (parent_of_all == getpid()) {
|
||||
user_mutex_test();
|
||||
}
|
||||
exectest();
|
||||
|
||||
while (1)
|
||||
|
||||
@@ -68,11 +68,10 @@ int user_mutex_test(void)
|
||||
|
||||
/* Initialize buffer with unique child value */
|
||||
printf("Initializing shared page with child values.\n");
|
||||
BUG();
|
||||
shared_page->child_has_run = 0;
|
||||
shared_page->parent_has_run = 0;
|
||||
for (int i = 0; i < buf_size; i++)
|
||||
shared_page->page_rest[i] = 'c';
|
||||
shared_page->page_rest[i] = 0;
|
||||
|
||||
/* Fork the current task */
|
||||
if ((child = fork()) < 0) {
|
||||
@@ -88,17 +87,20 @@ int user_mutex_test(void)
|
||||
/* Child locks and produces */
|
||||
if (child == 0) {
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
for (int x = 0; x < 255; x++) {
|
||||
/* Lock */
|
||||
printf("Child locking page.\n");
|
||||
l4_mutex_lock(&shared_page->mutex);
|
||||
|
||||
printf("Child locked page.\n");
|
||||
|
||||
l4_thread_switch(0);
|
||||
/* Get sample value */
|
||||
char val = shared_page->page_rest[0];
|
||||
//char val = shared_page->page_rest[0];
|
||||
|
||||
/* Write a unique child value to whole buffer */
|
||||
for (int i = 0; i < buf_size; i++) {
|
||||
#if 0
|
||||
/* Check sample is same in all */
|
||||
if (shared_page->page_rest[i] != val) {
|
||||
printf("Sample values dont match. "
|
||||
@@ -107,22 +109,26 @@ int user_mutex_test(void)
|
||||
shared_page->page_rest[i], val);
|
||||
goto out_err;
|
||||
}
|
||||
shared_page->page_rest[i] = 'c';
|
||||
#endif
|
||||
shared_page->page_rest[i]--;
|
||||
}
|
||||
printf("Child produced.\n");
|
||||
printf("Child produced. Unlocking...\n");
|
||||
/* Unlock */
|
||||
l4_mutex_unlock(&shared_page->mutex);
|
||||
printf("Child unlocked page.\n");
|
||||
}
|
||||
/* Sync with the parent */
|
||||
l4_send(parent, L4_IPC_TAG_SYNC);
|
||||
|
||||
/* Parent locks and consumes */
|
||||
} else {
|
||||
for (int i = 0; i < 5; i++) {
|
||||
for (int x = 0; x < 255; x++) {
|
||||
printf("Parent locking page.\n");
|
||||
|
||||
/* Lock the page */
|
||||
l4_mutex_lock(&shared_page->mutex);
|
||||
printf("Parent locked page.\n");
|
||||
l4_thread_switch(0);
|
||||
|
||||
// printf("Parent reading:\n");
|
||||
|
||||
@@ -130,6 +136,7 @@ int user_mutex_test(void)
|
||||
for (int i = 0; i < buf_size; i++) {
|
||||
// printf("%c", shared_page->page_rest[i]);
|
||||
/* Test that child has produced */
|
||||
#if 0
|
||||
if (shared_page->page_rest[i] != 'c') {
|
||||
printf("Child not produced. "
|
||||
"page_rest[%d] = %c, "
|
||||
@@ -138,14 +145,22 @@ int user_mutex_test(void)
|
||||
BUG();
|
||||
goto out_err;
|
||||
}
|
||||
#endif
|
||||
/* Consume the page */
|
||||
shared_page->page_rest[i] = 'P';
|
||||
shared_page->page_rest[i]++;
|
||||
}
|
||||
// printf("\n\n");
|
||||
printf("Parent consumed.\n");
|
||||
printf("Parent consumed. Unlocking...\n");
|
||||
l4_mutex_unlock(&shared_page->mutex);
|
||||
printf("Parent unlocked page.\n");
|
||||
}
|
||||
/* Sync with the child */
|
||||
l4_receive(child);
|
||||
|
||||
printf("Parent checking validity of values.\n");
|
||||
for (int i = 0; i < 255; i++)
|
||||
if (shared_page->page_rest[i] != 0)
|
||||
goto out_err;
|
||||
|
||||
printf("USER MUTEX TEST: -- PASSED --\n");
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user