Fix to l4_mutex_control for the unexpected sleeping with mutex held

when preemption occurs after call to wait_on_prepare
This commit is contained in:
Bahadir Balban
2009-06-10 15:41:30 +03:00
parent 6b3ddadcf5
commit 7ef479733f
3 changed files with 36 additions and 6 deletions

View File

@@ -29,6 +29,7 @@ static inline void mutex_init(struct mutex *mutex)
int mutex_trylock(struct mutex *mutex);
int mutex_lock(struct mutex *mutex);
void mutex_unlock(struct mutex *mutex);
void mutex_unlock_async(struct mutex *mutex);
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */
static inline int mutex_inc(unsigned int *cnt)

View File

@@ -54,7 +54,8 @@ void mutex_queue_head_lock()
void mutex_queue_head_unlock()
{
mutex_unlock(&mutex_queue_head.mutex_control_mutex);
/* Async unlock because in some cases preemption may be disabled here */
mutex_unlock_async(&mutex_queue_head.mutex_control_mutex);
}
@@ -170,11 +171,18 @@ int mutex_control_lock(unsigned long mutex_address)
/* Prepare to wait on the contenders queue */
CREATE_WAITQUEUE_ON_STACK(wq, current);
/* Disable to protect from sleeping by preemption */
preempt_disable();
wait_on_prepare(&mutex_queue->wqh_contenders, &wq);
/* Release lock */
mutex_queue_head_unlock();
/* Now safe to sleep voluntarily or by preemption */
preempt_enable();
/* Initiate prepared wait */
return wait_on_prepared_wait();
}
@@ -213,11 +221,19 @@ int mutex_control_unlock(unsigned long mutex_address)
/* Prepare to wait on the lock holders queue */
CREATE_WAITQUEUE_ON_STACK(wq, current);
/* Disable to protect from sleeping by preemption */
preempt_disable();
/* Prepare to wait */
wait_on_prepare(&mutex_queue->wqh_holders, &wq);
/* Release lock */
/* Release lock first */
mutex_queue_head_unlock();
/* Now safe to sleep voluntarily or by preemption */
preempt_enable();
/* Initiate prepared wait */
return wait_on_prepared_wait();
}

View File

@@ -143,7 +143,7 @@ int mutex_lock(struct mutex *mutex)
return 0;
}
void mutex_unlock(struct mutex *mutex)
static inline void mutex_unlock_common(struct mutex *mutex, int sync)
{
spin_lock(&mutex->wqh.slock);
__mutex_unlock(&mutex->lock);
@@ -152,8 +152,8 @@ void mutex_unlock(struct mutex *mutex)
BUG_ON(mutex->wqh.sleepers < 0);
if (mutex->wqh.sleepers > 0) {
struct waitqueue *wq = link_to_struct(mutex->wqh.task_list.next,
struct waitqueue,
task_list);
struct waitqueue,
task_list);
struct ktcb *sleeper = wq->task;
task_unset_wqh(sleeper);
@@ -168,7 +168,10 @@ void mutex_unlock(struct mutex *mutex)
* but it may potentially starve the sleeper causing
* non-determinism. We may consider priorities here.
*/
sched_resume_sync(sleeper);
if (sync)
sched_resume_sync(sleeper);
else
sched_resume_async(sleeper);
/* Don't iterate, wake only one task. */
return;
@@ -176,3 +179,13 @@ void mutex_unlock(struct mutex *mutex)
spin_unlock(&mutex->wqh.slock);
}
void mutex_unlock(struct mutex *mutex)
{
mutex_unlock_common(mutex, 1);
}
void mutex_unlock_async(struct mutex *mutex)
{
mutex_unlock_common(mutex, 0);
}