mirror of
https://github.com/drasko/codezero.git
synced 2026-04-02 01:59:05 +02:00
Changes between 16 March 2010 - 6 April 2010
Mutex system call fixed for multiple contenders Userspace irq support extended to keyboard/mouse. Scheduler modified for real-time irq tasks
This commit is contained in:
@@ -140,6 +140,12 @@ int keyboard_irq_handler(void *arg)
|
||||
while (data--)
|
||||
if ((c = kmi_keyboard_read(keyboard->base, &keyboard->state)))
|
||||
printf("%c", c);
|
||||
|
||||
/*
|
||||
* Kernel has disabled irq for keyboard
|
||||
* We need to enable it
|
||||
*/
|
||||
kmi_rx_irq_enable(keyboard->base);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +180,12 @@ int mouse_irq_handler(void *arg)
|
||||
while (data--)
|
||||
if ((c = kmi_data_read(mouse->base)))
|
||||
printf("mouse data: %d\n", c);
|
||||
|
||||
/*
|
||||
* Kernel has disabled irq for mouse
|
||||
* We need to enable it
|
||||
*/
|
||||
kmi_rx_irq_enable(mouse->base);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ struct sleeper_task {
|
||||
struct wake_task_list {
|
||||
struct link head;
|
||||
struct link *end; /* optimization */
|
||||
struct l4_mutex lock; /* lock for sanity of head */
|
||||
struct l4_mutex wake_list_lock; /* lock for sanity of head */
|
||||
};
|
||||
|
||||
#define BUCKET_BASE_LEVEL_BITS 8
|
||||
@@ -77,7 +77,7 @@ struct timer {
|
||||
unsigned long base; /* Virtual base address */
|
||||
unsigned int count; /* Counter/jiffies */
|
||||
struct sleeper_task_bucket task_list; /* List of sleeping tasks */
|
||||
struct l4_mutex lock; /* Lock for sleeper_task_bucket */
|
||||
struct l4_mutex task_list_lock; /* Lock for sleeper_task_bucket */
|
||||
struct capability cap; /* Capability describing timer */
|
||||
};
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ static int total_caps = 0;
|
||||
|
||||
/* Total number of timer chips being handled by us */
|
||||
#define TIMERS_TOTAL 1
|
||||
static struct timer timer[TIMERS_TOTAL];
|
||||
static struct timer global_timer[TIMERS_TOTAL];
|
||||
|
||||
/* Deafult timer to be used for sleep/wake etc purposes */
|
||||
#define SLEEP_WAKE_TIMER 0
|
||||
@@ -85,7 +85,7 @@ void timer_struct_init(struct timer* timer, unsigned long base)
|
||||
timer->base = base;
|
||||
timer->count = 0;
|
||||
timer->slot = 0;
|
||||
l4_mutex_init(&timer->lock);
|
||||
l4_mutex_init(&timer->task_list_lock);
|
||||
|
||||
for (int i = 0; i < BUCKET_BASE_LEVEL_SIZE ; ++i) {
|
||||
link_init(&timer->task_list.bucket_level0[i]);
|
||||
@@ -106,7 +106,7 @@ void wake_task_list_init(void)
|
||||
{
|
||||
link_init(&wake_tasks.head);
|
||||
wake_tasks.end = &wake_tasks.head;
|
||||
l4_mutex_init(&wake_tasks.lock);
|
||||
l4_mutex_init(&wake_tasks.wake_list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -140,7 +140,7 @@ struct link* find_bucket_list(unsigned long seconds)
|
||||
struct link *vector;
|
||||
struct sleeper_task_bucket *bucket;
|
||||
|
||||
bucket = &timer[SLEEP_WAKE_TIMER].task_list;
|
||||
bucket = &global_timer[SLEEP_WAKE_TIMER].task_list;
|
||||
|
||||
/*
|
||||
* TODO: Check if we have already surpassed seconds
|
||||
@@ -172,8 +172,8 @@ int timer_probe_devices(void)
|
||||
/* Match device type */
|
||||
if (cap_devtype(&caparray[i]) == CAP_DEVTYPE_TIMER) {
|
||||
/* Copy to correct device index */
|
||||
memcpy(&timer[cap_devnum(&caparray[i]) - 1].cap,
|
||||
&caparray[i], sizeof(timer[0].cap));
|
||||
memcpy(&global_timer[cap_devnum(&caparray[i]) - 1].cap,
|
||||
&caparray[i], sizeof(global_timer[0].cap));
|
||||
timers++;
|
||||
}
|
||||
}
|
||||
@@ -196,8 +196,11 @@ int timer_irq_handler(void *arg)
|
||||
struct link *vector;
|
||||
const int slot = 0;
|
||||
|
||||
/* Initialise timer */
|
||||
timer_init(timer->base);
|
||||
/*
|
||||
* Initialise timer
|
||||
* 1 interrupt per second
|
||||
*/
|
||||
timer_init(timer->base, 1000000);
|
||||
|
||||
/* Register self for timer irq, using notify slot 0 */
|
||||
if ((err = l4_irq_control(IRQ_CONTROL_REGISTER, slot,
|
||||
@@ -221,34 +224,32 @@ int timer_irq_handler(void *arg)
|
||||
BUG();
|
||||
}
|
||||
|
||||
//printf("Got irq(count 0x%x)\n", timer->count);
|
||||
/*
|
||||
* Update timer count
|
||||
* TODO: Overflow check, we have 1 interrupt/sec from timer
|
||||
* with 32bit count it will take 9years to overflow
|
||||
*/
|
||||
timer->count += count;
|
||||
printf("Got timer irq, current count = 0x%x\n", timer->count);
|
||||
|
||||
/* find bucket list of taks to be woken for current count */
|
||||
vector = find_bucket_list(timer->count);
|
||||
|
||||
if (!list_empty(vector)) {
|
||||
/* Removing tasks from sleeper list */
|
||||
l4_mutex_lock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
l4_mutex_lock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
task_list = list_detach(vector);
|
||||
l4_mutex_unlock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
l4_mutex_unlock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
|
||||
/* Add tasks to wake_task_list */
|
||||
l4_mutex_lock(&wake_tasks.lock);
|
||||
list_attach(task_list,
|
||||
&wake_tasks.head, wake_tasks.end);
|
||||
l4_mutex_unlock(&wake_tasks.lock);
|
||||
l4_mutex_lock(&wake_tasks.wake_list_lock);
|
||||
list_attach(task_list, &wake_tasks.head, wake_tasks.end);
|
||||
l4_mutex_unlock(&wake_tasks.wake_list_lock);
|
||||
|
||||
/*
|
||||
* Send ipc to handle_request
|
||||
* thread to send wake signals
|
||||
*/
|
||||
printf("sending ipc %d to thread %d\n", L4_IPC_TAG_TIMER_WAKE_THREADS, tid_ipc_handler);
|
||||
l4_send(tid_ipc_handler,L4_IPC_TAG_TIMER_WAKE_THREADS);
|
||||
}
|
||||
}
|
||||
@@ -266,17 +267,16 @@ void task_wake(void)
|
||||
list_foreach_removable_struct(struct_ptr, temp_ptr,
|
||||
&wake_tasks.head, list) {
|
||||
/* Remove task from wake list */
|
||||
l4_mutex_lock(&wake_tasks.lock);
|
||||
l4_mutex_lock(&wake_tasks.wake_list_lock);
|
||||
list_remove(&struct_ptr->list);
|
||||
l4_mutex_unlock(&wake_tasks.lock);
|
||||
l4_mutex_unlock(&wake_tasks.wake_list_lock);
|
||||
|
||||
/* Set sender correctly */
|
||||
l4_set_sender(struct_ptr->tid);
|
||||
|
||||
#if 0
|
||||
printf("waking thread at time %x\n",
|
||||
(unsigned int)timer[SLEEP_WAKE_TIMER].count);
|
||||
#endif
|
||||
printf("%s : Waking thread 0x%x at time 0x%x\n", __CONTAINER_NAME__,
|
||||
struct_ptr->tid, global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
/* send wake ipc */
|
||||
if ((ret = l4_ipc_return(struct_ptr->retval)) < 0) {
|
||||
printf("%s: IPC return error: %d.\n",
|
||||
@@ -302,17 +302,17 @@ int timer_setup_devices(void)
|
||||
|
||||
for (int i = 0; i < TIMERS_TOTAL; i++) {
|
||||
/* initialize timer */
|
||||
timer_struct_init(&timer[i],(unsigned long)l4_new_virtual(1) );
|
||||
timer_struct_init(&global_timer[i],(unsigned long)l4_new_virtual(1) );
|
||||
|
||||
/* Map timer to a virtual address region */
|
||||
if (IS_ERR(l4_map((void *)__pfn_to_addr(timer[i].cap.start),
|
||||
(void *)timer[i].base, timer[i].cap.size,
|
||||
if (IS_ERR(l4_map((void *)__pfn_to_addr(global_timer[i].cap.start),
|
||||
(void *)global_timer[i].base, global_timer[i].cap.size,
|
||||
MAP_USR_IO,
|
||||
self_tid()))) {
|
||||
printf("%s: FATAL: Failed to map TIMER device "
|
||||
"%d to a virtual address\n",
|
||||
__CONTAINER_NAME__,
|
||||
cap_devnum(&timer[i].cap));
|
||||
cap_devnum(&global_timer[i].cap));
|
||||
BUG();
|
||||
}
|
||||
|
||||
@@ -323,7 +323,7 @@ int timer_setup_devices(void)
|
||||
* itself as its irq handler, initiate the timer and
|
||||
* wait on irqs.
|
||||
*/
|
||||
if ((err = thread_create(timer_irq_handler, &timer[i],
|
||||
if ((err = thread_create(timer_irq_handler, &global_timer[i],
|
||||
TC_SHARE_SPACE,
|
||||
&tptr)) < 0) {
|
||||
printf("FATAL: Creation of irq handler "
|
||||
@@ -404,13 +404,23 @@ void task_sleep(l4id_t tid, unsigned long seconds, int ret)
|
||||
struct link *vector;
|
||||
|
||||
/* can overflow happen here?, timer is in 32bit mode */
|
||||
seconds += timer[SLEEP_WAKE_TIMER].count;
|
||||
seconds += global_timer[SLEEP_WAKE_TIMER].count;
|
||||
|
||||
printf("sleep wake timer lock is present at address %lx\n",
|
||||
( (unsigned long)&global_timer[SLEEP_WAKE_TIMER].task_list_lock.lock));
|
||||
|
||||
vector = find_bucket_list(seconds);
|
||||
|
||||
l4_mutex_lock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
printf("Acquiring lock for sleep wake timer\n");
|
||||
l4_mutex_lock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
printf("got lock for sleep wake timer\n");
|
||||
|
||||
list_insert(&task->list, vector);
|
||||
l4_mutex_unlock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
|
||||
printf("Releasing lock for sleep wake timer\n");
|
||||
l4_mutex_unlock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
printf("released lock for sleep wake timer\n");
|
||||
|
||||
}
|
||||
|
||||
void handle_requests(void)
|
||||
@@ -448,7 +458,11 @@ void handle_requests(void)
|
||||
switch (tag) {
|
||||
/* Return time in seconds, since the timer was started */
|
||||
case L4_IPC_TAG_TIMER_GETTIME:
|
||||
mr[0] = timer[SLEEP_WAKE_TIMER].count;
|
||||
printf("%s: Got get time request from thread 0x%x "
|
||||
" at time = 0x%x\n", __CONTAINER_NAME__,
|
||||
senderid, global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
write_mr(2, global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
/* Reply */
|
||||
if ((ret = l4_ipc_return(ret)) < 0) {
|
||||
@@ -458,8 +472,11 @@ void handle_requests(void)
|
||||
break;
|
||||
|
||||
case L4_IPC_TAG_TIMER_SLEEP:
|
||||
printf("%s: Got sleep request from thread 0x%x, duration %d\n", __CONTAINER_NAME__,
|
||||
senderid, mr[0]);
|
||||
printf("%s: Got sleep request from thread 0x%x "
|
||||
"for 0x%x seconds at 0x%x seconds\n",
|
||||
__CONTAINER_NAME__, senderid, mr[0],
|
||||
global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
if (mr[0] > 0) {
|
||||
task_sleep(senderid, mr[0], ret);
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ struct keyboard_state{
|
||||
};
|
||||
|
||||
/* Common functions */
|
||||
void kmi_irq_handler(unsigned long base);
|
||||
void kmi_rx_irq_enable(unsigned long base);
|
||||
int kmi_data_read(unsigned long base);
|
||||
|
||||
/* Keyboard specific calls */
|
||||
|
||||
@@ -17,7 +17,7 @@ void timer_load(u32 val, unsigned long timer_base);
|
||||
u32 timer_read(unsigned long timer_base);
|
||||
void timer_stop(unsigned long timer_base);
|
||||
void timer_init_oneshot(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base);
|
||||
void timer_init(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base, u32 load_value);
|
||||
void timer_init(unsigned long timer_base, u32 load_value);
|
||||
|
||||
#endif /* __LIBDEV_TIMER_H__ */
|
||||
|
||||
@@ -135,7 +135,7 @@ struct keyboard_key keymap_uk2[256] = {
|
||||
/* 40 */ {';',':',0,0},
|
||||
/* 41 */ {'\'','@',0,0},
|
||||
/* 42 */ {0,0,0,0},
|
||||
/* 43 */ {KEYCODE_RETURN,0,KEYCODE_ENTER,0},
|
||||
/* 43 */ {'\n','\n',KEYCODE_ENTER,0},
|
||||
/* 44 */ {KEYCODE_LSHIFT,0,0,0},
|
||||
/* 45 */ {'\\','|',0,0},
|
||||
/* 46 */ {'z','Z',0,0},
|
||||
|
||||
@@ -9,11 +9,10 @@
|
||||
#include "kmi.h"
|
||||
#include "keymap.h"
|
||||
|
||||
/*
|
||||
* Reading Rx data automatically clears the RXITR
|
||||
*/
|
||||
void kmi_irq_handler(unsigned long base)
|
||||
/* Enable Rx irq */
|
||||
void kmi_rx_irq_enable(unsigned long base)
|
||||
{
|
||||
*(volatile unsigned long *)(base + PL050_KMICR) = KMI_RXINTR;
|
||||
}
|
||||
|
||||
int kmi_data_read(unsigned long base)
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
#define MOUSE_DATA_ENABLE 0xF4 // Mouse enable
|
||||
|
||||
/* Common functions */
|
||||
void kmi_irq_handler(unsigned long base);
|
||||
void kmi_rx_irq_enable(unsigned long base);
|
||||
int kmi_data_read(unsigned long base);
|
||||
|
||||
/* Keyboard specific calls */
|
||||
|
||||
@@ -36,7 +36,7 @@ void timer_stop(unsigned long timer_base)
|
||||
write(0, timer_base + SP804_CTRL);
|
||||
}
|
||||
|
||||
void timer_init_periodic(unsigned long timer_base)
|
||||
void timer_init_periodic(unsigned long timer_base, u32 load_value)
|
||||
{
|
||||
volatile u32 reg = read(timer_base + SP804_CTRL);
|
||||
|
||||
@@ -44,8 +44,11 @@ void timer_init_periodic(unsigned long timer_base)
|
||||
|
||||
write(reg, timer_base + SP804_CTRL);
|
||||
|
||||
/* 1 tick per usec, 1 irq per msec */
|
||||
timer_load(1000, timer_base);
|
||||
if (load_value)
|
||||
timer_load(load_value, timer_base);
|
||||
else
|
||||
/* 1 tick per usec, 1 irq per msec */
|
||||
timer_load(1000, timer_base);
|
||||
}
|
||||
|
||||
void timer_init_oneshot(unsigned long timer_base)
|
||||
@@ -58,8 +61,8 @@ void timer_init_oneshot(unsigned long timer_base)
|
||||
write(reg, timer_base + SP804_CTRL);
|
||||
}
|
||||
|
||||
void timer_init(unsigned long timer_base)
|
||||
void timer_init(unsigned long timer_base, u32 load_value)
|
||||
{
|
||||
timer_stop(timer_base);
|
||||
timer_init_periodic(timer_base);
|
||||
timer_init_periodic(timer_base, load_value);
|
||||
}
|
||||
|
||||
@@ -56,8 +56,8 @@ void timer_start(unsigned long timer_base);
|
||||
void timer_load(u32 loadval, unsigned long timer_base);
|
||||
u32 timer_read(unsigned long timer_base);
|
||||
void timer_stop(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base, u32 load_value);
|
||||
void timer_init_oneshot(unsigned long timer_base);
|
||||
void timer_init(unsigned long timer_base);
|
||||
void timer_init(unsigned long timer_base, u32 load_value);
|
||||
|
||||
#endif /* __SP804_TIMER_H__ */
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <l4/api/mutex.h>
|
||||
|
||||
struct l4_mutex {
|
||||
unsigned int lock;
|
||||
int lock;
|
||||
} __attribute__((aligned(sizeof(int))));
|
||||
|
||||
|
||||
@@ -28,11 +28,15 @@ int l4_mutex_unlock(struct l4_mutex *m);
|
||||
#define L4_MUTEX_CONTENDED -1
|
||||
#define L4_MUTEX_SUCCESS 0
|
||||
|
||||
/* Mutex states - Any valid tid value is a locked state */
|
||||
/*
|
||||
* Mutex states:
|
||||
* Unlocked = -1, locked = 0, anything above 0 tells
|
||||
* number of contended threads
|
||||
*/
|
||||
#define L4_MUTEX_LOCKED 0
|
||||
#define L4_MUTEX_UNLOCKED -1
|
||||
#define L4_MUTEX(m) \
|
||||
struct l4_mutex m = { L4_MUTEX_UNLOCKED }
|
||||
|
||||
|
||||
|
||||
#endif /* __L4_MUTEX_H__ */
|
||||
|
||||
@@ -15,9 +15,10 @@ int __l4_mutex_lock(void *m, l4id_t tid)
|
||||
loop:
|
||||
__asm__ __volatile__(
|
||||
"ldrex %0, [%1]\n"
|
||||
: "=r"(tmp)
|
||||
: "=&r"(tmp)
|
||||
: "r"(m)
|
||||
);
|
||||
: "memory"
|
||||
);
|
||||
|
||||
if(tmp != L4_MUTEX_UNLOCKED)
|
||||
ret = L4_MUTEX_CONTENDED;
|
||||
@@ -79,19 +80,19 @@ int __l4_mutex_unlock(void *m, l4id_t tid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u8 l4_atomic_dest_readb(u8 *location)
|
||||
u8 l4_atomic_dest_readb(unsigned long *location)
|
||||
{
|
||||
unsigned int tmp, res;
|
||||
__asm__ __volatile__ (
|
||||
"1: \n"
|
||||
"ldrex %0, [%2] \n"
|
||||
"strex %1, %3, [%2] \n"
|
||||
"teq %1, #0 \n"
|
||||
"bne 1b \n"
|
||||
"1: \n"
|
||||
" ldrex %0, [%2] \n"
|
||||
" strex %1, %3, [%2] \n"
|
||||
" teq %1, #0 \n"
|
||||
" bne 1b \n"
|
||||
: "=&r"(tmp), "=&r"(res)
|
||||
: "r"(location), "r"(0)
|
||||
: "cc", "memory"
|
||||
);
|
||||
);
|
||||
|
||||
return (u8)tmp;
|
||||
}
|
||||
|
||||
@@ -7,42 +7,35 @@
|
||||
|
||||
/*
|
||||
* @r0 = address of mutex word
|
||||
* @r1 = unique tid of current thread
|
||||
*/
|
||||
BEGIN_PROC(__l4_mutex_lock)
|
||||
1:
|
||||
ldrex r2, [r0] @ Load value
|
||||
cmp r2, #L4_MUTEX_UNLOCKED @ Decide what state lock will be if we succeed in a store
|
||||
movne r2, #L4_MUTEX_CONTENDED
|
||||
moveq r2, #L4_MUTEX_SUCCESS
|
||||
ldrex r1, [r0] @ Load value
|
||||
add r1, r1, #1 @ Add 1
|
||||
strex r3, r1, [r0] @ Store prospective lock state
|
||||
cmp r3, #0 @ If not successful
|
||||
@ No WFE. Whatif this were between 2 threads running on the same cpu
|
||||
bne 1b @ Retry and decide again on the prospective lock state.
|
||||
bne 1b @ Retry and decide again on the prospective lock state. No WFE as this would be a problem on single cpu
|
||||
dsb
|
||||
|
||||
cmp r1, #L4_MUTEX_LOCKED @ We succeeded in store, but are we a locker or a contender?
|
||||
movne r2, #L4_MUTEX_CONTENDED
|
||||
moveq r2, #L4_MUTEX_SUCCESS
|
||||
mov r0, r2
|
||||
mov pc, lr
|
||||
END_PROC(__l4_mutex_lock)
|
||||
|
||||
/*
|
||||
* @r0 = address of mutex word
|
||||
* @r1 = unique tid of current thread
|
||||
*/
|
||||
BEGIN_PROC(__l4_mutex_unlock)
|
||||
dsb
|
||||
push {r4}
|
||||
mov r4, #L4_MUTEX_UNLOCKED
|
||||
mov r3, #L4_MUTEX_UNLOCKED
|
||||
1:
|
||||
ldrex r2, [r0]
|
||||
cmp r2, r1
|
||||
moveq r3, #L4_MUTEX_SUCCESS
|
||||
movne r3, #L4_MUTEX_CONTENDED
|
||||
strex r2, r4, [r0]
|
||||
ldrex r1, [r0]
|
||||
strex r2, r3, [r0]
|
||||
cmp r2, #0
|
||||
bne 1b
|
||||
mov r0, r3
|
||||
pop {r4}
|
||||
mov r0, r1
|
||||
mov pc, lr
|
||||
END_PROC(__l4_mutex_unlock)
|
||||
|
||||
|
||||
|
||||
@@ -51,8 +51,8 @@
|
||||
* - Whether this is the best design - time will tell.
|
||||
*/
|
||||
|
||||
extern int __l4_mutex_lock(void *word, l4id_t tid);
|
||||
extern int __l4_mutex_unlock(void *word, l4id_t tid);
|
||||
extern int __l4_mutex_lock(void *word);
|
||||
extern int __l4_mutex_unlock(void *word);
|
||||
|
||||
void l4_mutex_init(struct l4_mutex *m)
|
||||
{
|
||||
@@ -61,10 +61,9 @@ void l4_mutex_init(struct l4_mutex *m)
|
||||
|
||||
int l4_mutex_lock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
int err;
|
||||
|
||||
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED) {
|
||||
while(__l4_mutex_lock(&m->lock) != L4_MUTEX_SUCCESS) {
|
||||
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_LOCK)) < 0) {
|
||||
printf("%s: Error: %d\n", __FUNCTION__, err);
|
||||
return err;
|
||||
@@ -75,15 +74,14 @@ int l4_mutex_lock(struct l4_mutex *m)
|
||||
|
||||
int l4_mutex_unlock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
int err;
|
||||
int err, contended;
|
||||
|
||||
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED) {
|
||||
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK)) < 0) {
|
||||
if ((contended = __l4_mutex_unlock(m))) {
|
||||
if ((err = l4_mutex_control(&m->lock,
|
||||
contended | L4_MUTEX_UNLOCK)) < 0) {
|
||||
printf("%s: Error: %d\n", __FUNCTION__, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user