mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 10:53:16 +01:00
A new scheduler replaces the old one. - There are no sched_xxx_notify() calls that ask scheduler to change task state. - Tasks now have priorities and different timeslices. - One second interval is distributed among processes. - There are just runnable and expired queues. - SCHED_GRANULARITY determines a maximum running boundary for tasks. - Scheduler can now detect a safe point and suspend a task. Interruptible blocking is implemented. - Mutexes, waitqueues and ipc are modified to have an interruptible nature. - Sleep information is stored on the ktcb. (which waitqueue? etc.)
45 lines
912 B
C
45 lines
912 B
C
/*
|
|
* The elementary concurrency constructs.
|
|
*
|
|
* Copyright (C) 2007 Bahadir Balban
|
|
*/
|
|
|
|
#ifndef __LIB_MUTEX_H__
|
|
#define __LIB_MUTEX_H__
|
|
|
|
#include <l4/lib/string.h>
|
|
#include <l4/lib/spinlock.h>
|
|
#include <l4/lib/list.h>
|
|
#include <l4/lib/printk.h>
|
|
#include <l4/lib/wait.h>
|
|
#include INC_ARCH(mutex.h)
|
|
|
|
/* A mutex is a binary semaphore that can sleep. */
|
|
struct mutex {
|
|
struct waitqueue_head wqh;
|
|
unsigned int lock;
|
|
};
|
|
|
|
static inline void mutex_init(struct mutex *mutex)
|
|
{
|
|
memset(mutex, 0, sizeof(struct mutex));
|
|
waitqueue_head_init(&mutex->wqh);
|
|
}
|
|
|
|
int mutex_trylock(struct mutex *mutex);
|
|
int mutex_lock(struct mutex *mutex);
|
|
void mutex_unlock(struct mutex *mutex);
|
|
|
|
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */
|
|
static inline int mutex_inc(unsigned int *cnt)
|
|
{
|
|
return ++*cnt;
|
|
}
|
|
|
|
static inline int mutex_dec(unsigned int *cnt)
|
|
{
|
|
return --*cnt;
|
|
}
|
|
|
|
#endif /* __LIB_MUTEX_H__ */
|