New scheduler and interruptible blocking.

A new scheduler replaces the old one.
  - There are no sched_xxx_notify() calls that ask scheduler to change task state.
  - Tasks now have priorities and different timeslices.
  - One second interval is distributed among processes.
  - There are just runnable and expired queues.
  - SCHED_GRANULARITY determines a maximum running boundary for tasks.
  - Scheduler can now detect a safe point and suspend a task.

Interruptible blocking is implemented.
  - Mutexes, waitqueues and ipc are modified to have an interruptible nature.
  - Sleep information is stored on the ktcb. (which waitqueue? etc.)
This commit is contained in:
Bahadir Balban
2008-10-01 12:43:44 +03:00
parent c54d505709
commit f6d0a79298
21 changed files with 681 additions and 429 deletions

View File

@@ -2,5 +2,6 @@
#define __LIB_MATH_H__
#define min(x, y) (((x) < (y)) ? x : y)
#define max(x, y) (((x) > (y)) ? x : y)
#endif /* __LIB_MATH_H__ */

View File

@@ -16,20 +16,18 @@
/* A mutex is a binary semaphore that can sleep. */
struct mutex {
int sleepers; /* Number of sleepers */
struct spinlock slock; /* Locks sleeper queue */
unsigned int lock; /* The mutex lock itself */
struct waitqueue wq; /* Sleeper queue head */
struct waitqueue_head wqh;
unsigned int lock;
};
static inline void mutex_init(struct mutex *mutex)
{
memset(mutex, 0, sizeof(struct mutex));
INIT_LIST_HEAD(&mutex->wq.task_list);
waitqueue_head_init(&mutex->wqh);
}
int mutex_trylock(struct mutex *mutex);
void mutex_lock(struct mutex *mutex);
int mutex_lock(struct mutex *mutex);
void mutex_unlock(struct mutex *mutex);
/* NOTE: Since spinlocks guard mutex acquiring & sleeping, no locks needed */

View File

@@ -10,23 +10,16 @@ struct waitqueue {
struct ktcb *task;
};
#define DECLARE_WAITQUEUE(wq, tsk) \
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \
struct waitqueue wq = { \
.task_list = { &wq.task_list, &wq.task_list }, \
.task = tsk, \
};
// LIST_HEAD_INIT(task_list),
/*
* The waitqueue spinlock ensures waiters are added and removed atomically so
* that wake-ups and sleeps occur in sync. Otherwise, a task could try to wake
* up a waitqueue **during when a task has decided to sleep but is not in the
* queue yet. (** Take "during" here as a pseudo-concurrency term on UP)
*/
struct waitqueue_head {
int sleepers;
struct spinlock slock; /* Locks sleeper queue */
struct list_head task_list; /* Sleeper queue head */
struct spinlock slock;
struct list_head task_list;
};
static inline void waitqueue_head_init(struct waitqueue_head *head)
@@ -35,11 +28,14 @@ static inline void waitqueue_head_init(struct waitqueue_head *head)
INIT_LIST_HEAD(&head->task_list);
}
/*
* Used for ipc related waitqueues who have special wait queue manipulation
* conditions.
*/
void wake_up(struct waitqueue_head *wqh);
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
struct waitqueue *wq);
void task_unset_wqh(struct ktcb *task);
void wake_up(struct waitqueue_head *wqh, int sync);
int wake_up_task(struct ktcb *task, int sync);
#endif /* __LIB_WAIT_H__ */