mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Multiple updates on sleeping, vma dropping and thread suspend.
- Updated sleeping paths such that a task is atomically put into a runqueue and made RUNNABLE, or removed from a runqueue and made SLEEPING. - Modified vma dropping sources to handle both copy_on_write() and exit() cases in a common function. - Added the first infrastructure to have a pager to suspend a task and wait for suspend completion from the scheduler.
This commit is contained in:
@@ -37,6 +37,7 @@ static inline struct ktcb *current_task(void)
|
||||
#define need_resched (current->ts_need_resched)
|
||||
|
||||
void sched_init_task(struct ktcb *task, int priority);
|
||||
void sched_prepare_sleep(void);
|
||||
void sched_resume_sync(struct ktcb *task);
|
||||
void sched_resume_async(struct ktcb *task);
|
||||
void scheduler_start(void);
|
||||
|
||||
@@ -25,8 +25,7 @@
|
||||
#define TASK_SUSPENDING (1 << 1)
|
||||
#define TASK_RESUMING (1 << 2)
|
||||
|
||||
|
||||
/* Scheduler states */
|
||||
/* Task states */
|
||||
enum task_state {
|
||||
TASK_INACTIVE = 0,
|
||||
TASK_SLEEPING = 1,
|
||||
@@ -52,6 +51,7 @@ struct ktcb {
|
||||
|
||||
/* Runqueue related */
|
||||
struct list_head rq_list;
|
||||
struct runqueue *rq;
|
||||
|
||||
/* Thread information */
|
||||
l4id_t tid; /* Global thread id */
|
||||
@@ -91,6 +91,9 @@ struct ktcb {
|
||||
struct waitqueue_head wqh_send;
|
||||
l4id_t expected_sender;
|
||||
|
||||
/* Waitqueue for pagers to wait for task states */
|
||||
struct waitqueue_head wqh_pager;
|
||||
|
||||
/* Tells where we are when we sleep */
|
||||
struct spinlock waitlock;
|
||||
struct waitqueue_head *waiting_on;
|
||||
|
||||
@@ -34,8 +34,40 @@ void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
|
||||
void task_unset_wqh(struct ktcb *task);
|
||||
|
||||
|
||||
/*
|
||||
* Sleep if the given condition isn't true.
|
||||
* ret will tell whether condition was met
|
||||
* or we got interrupted.
|
||||
*/
|
||||
#define WAIT_EVENT(wqh, condition, ret) \
|
||||
do { \
|
||||
ret = 0; \
|
||||
for (;;) { \
|
||||
spin_lock(&(wqh)->slock); \
|
||||
if (condition) { \
|
||||
spin_unlock(&(wqh)->slock); \
|
||||
break; \
|
||||
} \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
task_set_wqh(current, wqh, &wq); \
|
||||
(wqh)->sleepers++; \
|
||||
list_add_tail(&wq.task_list, &(wqh)->task_list);\
|
||||
printk("(%d) waiting...\n", current->tid); \
|
||||
sched_prepare_sleep(); \
|
||||
spin_unlock(&(wqh)->slock); \
|
||||
schedule(); \
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
current->flags &= ~TASK_INTERRUPTED; \
|
||||
ret = -EINTR; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
void wake_up(struct waitqueue_head *wqh, int sync);
|
||||
int wake_up_task(struct ktcb *task, int sync);
|
||||
void wake_up_all(struct waitqueue_head *wqh, int sync);
|
||||
|
||||
#endif /* __LIB_WAIT_H__ */
|
||||
|
||||
|
||||
@@ -82,6 +82,7 @@ int ipc_send(l4id_t recv_tid)
|
||||
/* Remove from waitqueue */
|
||||
list_del_init(&wq->task_list);
|
||||
wqhr->sleepers--;
|
||||
task_unset_wqh(receiver);
|
||||
|
||||
/* Release locks */
|
||||
spin_unlock(&wqhr->slock);
|
||||
@@ -103,7 +104,7 @@ int ipc_send(l4id_t recv_tid)
|
||||
wqhs->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqhs->task_list);
|
||||
task_set_wqh(current, wqhs, &wq);
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
spin_unlock(&wqhr->slock);
|
||||
spin_unlock(&wqhs->slock);
|
||||
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
|
||||
@@ -168,7 +169,7 @@ int ipc_recv(l4id_t senderid)
|
||||
wqhr->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqhr->task_list);
|
||||
task_set_wqh(current, wqhr, &wq);
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
// printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
|
||||
// current->tid, current->expected_sender);
|
||||
spin_unlock(&wqhr->slock);
|
||||
|
||||
@@ -87,8 +87,8 @@ void do_exchange_registers(struct ktcb *task, struct exregs_data *exregs)
|
||||
* the register context of a thread. The thread's registers can be
|
||||
* set only when the thread is in user mode. A newly created thread
|
||||
* that is the copy of another thread (forked or cloned) will also
|
||||
* be given its user mode context so such threads can also be
|
||||
* modified by this call before execution.
|
||||
* be given its user mode context on the first chance to execute so
|
||||
* such threads can also be modified by this call before execution.
|
||||
*
|
||||
* A thread executing in the kernel cannot be modified since this
|
||||
* would compromise the kernel. Also the thread must be in suspended
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/wait.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
@@ -20,8 +21,68 @@ int sys_thread_switch(syscall_context_t *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This suspends a thread which is in either suspended,
|
||||
* sleeping or runnable state.
|
||||
*/
|
||||
int thread_suspend(struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task;
|
||||
int ret;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if (task->state == TASK_INACTIVE)
|
||||
return 0;
|
||||
|
||||
/* First show our intention to suspend thread */
|
||||
task->flags |= TASK_SUSPENDING;
|
||||
|
||||
/*
|
||||
* Interrupt the task in case it was sleeping
|
||||
* so that it will be caught and suspended by
|
||||
* the scheduler.
|
||||
*/
|
||||
wake_up_task(task, 1);
|
||||
|
||||
/* Wait until scheduler wakes us up */
|
||||
WAIT_EVENT(&task->wqh_pager,
|
||||
task->state == TASK_INACTIVE, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int thread_destroy(struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task;
|
||||
int ret;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if ((ret = thread_suspend(ids)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Delete it from global list so any callers will get -ESRCH */
|
||||
list_del(&task->task_list);
|
||||
|
||||
/*
|
||||
* If there are any sleepers on any of the task's
|
||||
* waitqueues, we need to wake those tasks up.
|
||||
*/
|
||||
wake_up_all(&task->wqh_send, 0);
|
||||
wake_up_all(&task->wqh_recv, 0);
|
||||
|
||||
/*
|
||||
* The thread cannot have a pager waiting for it
|
||||
* since we ought to be the pager.
|
||||
*/
|
||||
BUG_ON(task->wqh_pager.sleepers > 0);
|
||||
|
||||
/* We can now safely delete the task */
|
||||
free_page(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -257,6 +318,7 @@ out:
|
||||
/* Initialise ipc waitqueues */
|
||||
waitqueue_head_init(&new->wqh_send);
|
||||
waitqueue_head_init(&new->wqh_recv);
|
||||
waitqueue_head_init(&new->wqh_pager);
|
||||
|
||||
arch_setup_new_thread(new, task, flags);
|
||||
|
||||
@@ -290,6 +352,8 @@ int sys_thread_control(syscall_context_t *regs)
|
||||
case THREAD_RESUME:
|
||||
ret = thread_resume(ids);
|
||||
break;
|
||||
case THREAD_DESTROY:
|
||||
ret = thread_destroy(ids);
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ struct runqueue {
|
||||
struct spinlock lock; /* Lock */
|
||||
struct list_head task_list; /* List of tasks in rq */
|
||||
unsigned int total; /* Total tasks */
|
||||
int recalc_timeslice; /* Need timeslice redistribution */
|
||||
};
|
||||
|
||||
#define SCHED_RQ_TOTAL 2
|
||||
@@ -136,14 +135,20 @@ static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
|
||||
else
|
||||
list_add_tail(&task->rq_list, &rq->task_list);
|
||||
rq->total++;
|
||||
task->rq = rq;
|
||||
spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
/* NOTE: Do we need an rq_lock on tcb? */
|
||||
|
||||
/* Helper for removing a task from its runqueue. */
|
||||
static inline void sched_rq_remove_task(struct ktcb *task, struct runqueue *rq)
|
||||
static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
{
|
||||
struct runqueue *rq = task->rq;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
list_del_init(&task->rq_list);
|
||||
task->rq = 0;
|
||||
rq->total--;
|
||||
|
||||
BUG_ON(rq->total < 0);
|
||||
@@ -161,11 +166,25 @@ void sched_init_task(struct ktcb *task, int prio)
|
||||
task->flags |= TASK_RESUMING;
|
||||
}
|
||||
|
||||
/*
|
||||
* Takes all the action that will make a task sleep
|
||||
* in the scheduler. If the task is woken up before
|
||||
* it schedules, then operations here are simply
|
||||
* undone and task remains as runnable.
|
||||
*/
|
||||
void sched_prepare_sleep()
|
||||
{
|
||||
preempt_disable();
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_SLEEPING;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Synchronously resumes a task */
|
||||
void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
@@ -177,8 +196,8 @@ void sched_resume_sync(struct ktcb *task)
|
||||
*/
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
}
|
||||
|
||||
@@ -251,20 +270,23 @@ void schedule()
|
||||
/* Cannot have any irqs that schedule after this */
|
||||
preempt_disable();
|
||||
|
||||
#if 0
|
||||
/* NOTE:
|
||||
* We could avoid double-scheduling by detecting a task
|
||||
* that's about to schedule voluntarily and skipping the
|
||||
* schedule() call in irq mode.
|
||||
* We could avoid unnecessary scheduling by detecting
|
||||
* a task that has been just woken up.
|
||||
*/
|
||||
if ((task->flags & TASK_WOKEN_UP) && in_process_context()) {
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Reset schedule flag */
|
||||
need_resched = 0;
|
||||
|
||||
/* Remove from runnable queue */
|
||||
sched_rq_remove_task(current, rq_runnable);
|
||||
|
||||
/* Put it into appropriate runqueue */
|
||||
/* Remove from runnable and put into appropriate runqueue */
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current, rq_runnable, RQ_ADD_BEHIND);
|
||||
else
|
||||
@@ -277,7 +299,8 @@ void schedule()
|
||||
* The task should have no locks and be in a runnable state.
|
||||
* (e.g. properly woken up by the suspender)
|
||||
*/
|
||||
if (current->nlocks == 0 && current->state == TASK_RUNNABLE) {
|
||||
if (current->nlocks == 0 &&
|
||||
current->state == TASK_RUNNABLE) {
|
||||
/* Suspend it if suitable */
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
@@ -290,7 +313,13 @@ void schedule()
|
||||
*/
|
||||
prio_total -= current->priority;
|
||||
BUG_ON(prio_total <= 0);
|
||||
|
||||
/* Prepare to wake up any waiters */
|
||||
wake_up(¤t->wqh_pager, 0);
|
||||
} else {
|
||||
if (current->state == TASK_RUNNABLE)
|
||||
sched_rq_remove_task(current);
|
||||
|
||||
/*
|
||||
* Top up task's ticks temporarily, and
|
||||
* wait for it to release its locks.
|
||||
|
||||
@@ -317,6 +317,7 @@ void init_pager(char *name, struct task_ids *ids)
|
||||
/* Task's rendezvous point */
|
||||
waitqueue_head_init(&task->wqh_send);
|
||||
waitqueue_head_init(&task->wqh_recv);
|
||||
waitqueue_head_init(&task->wqh_pager);
|
||||
|
||||
/* Global hashlist that keeps all existing tasks */
|
||||
add_task_global(task);
|
||||
|
||||
@@ -51,7 +51,7 @@ void sem_up(struct mutex *mutex)
|
||||
INIT_LIST_HEAD(&wq.task_list);
|
||||
list_add_tail(&wq.task_list, &mutex->wq.task_list);
|
||||
mutex->sleepers++;
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) produced, now sleeping...\n", current->tid);
|
||||
spin_unlock(&mutex->slock);
|
||||
schedule();
|
||||
@@ -89,7 +89,7 @@ void sem_down(struct mutex *mutex)
|
||||
INIT_LIST_HEAD(&wq.task_list);
|
||||
list_add_tail(&wq.task_list, &mutex->wq.task_list);
|
||||
mutex->sleepers++;
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) Waiting to consume, now sleeping...\n", current->tid);
|
||||
spin_unlock(&mutex->slock);
|
||||
schedule();
|
||||
@@ -124,7 +124,7 @@ int mutex_lock(struct mutex *mutex)
|
||||
task_set_wqh(current, &mutex->wqh, &wq);
|
||||
list_add_tail(&wq.task_list, &mutex->wqh.task_list);
|
||||
mutex->wqh.sleepers++;
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
spin_unlock(&mutex->wqh.slock);
|
||||
printk("(%d) sleeping...\n", current->tid);
|
||||
schedule();
|
||||
@@ -160,7 +160,6 @@ void mutex_unlock(struct mutex *mutex)
|
||||
BUG_ON(list_empty(&mutex->wqh.task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
mutex->wqh.sleepers--;
|
||||
sleeper->state = TASK_RUNNABLE;
|
||||
spin_unlock(&mutex->wqh.slock);
|
||||
|
||||
/*
|
||||
|
||||
@@ -35,57 +35,6 @@ void task_unset_wqh(struct ktcb *task)
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Sleep if the given condition isn't true.
|
||||
* ret will tell whether condition was met
|
||||
* or we got interrupted.
|
||||
*/
|
||||
#define WAIT_EVENT(wqh, condition, ret) \
|
||||
do { \
|
||||
ret = 0; \
|
||||
for (;;) { \
|
||||
if (condition) \
|
||||
break; \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
spin_lock(&wqh->slock); \
|
||||
task_set_wqh(current, wqh, wq); \
|
||||
wqh->sleepers++; \
|
||||
list_add_tail(&wq.task_list, &wqh->task_list); \
|
||||
task->state = TASK_SLEEPING; \
|
||||
printk("(%d) waiting...\n", current->tid); \
|
||||
spin_unlock(&wqh->slock); \
|
||||
schedule(); \
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
current->flags &= ~TASK_INTERRUPTED; \
|
||||
ret = -EINTR; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
/* Sleep without any condition */
|
||||
#define WAIT_ON(wqh, ret) \
|
||||
do { \
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current); \
|
||||
spin_lock(&wqh->slock); \
|
||||
task_set_wqh(current, wqh, &wq); \
|
||||
wqh->sleepers++; \
|
||||
list_add_tail(&wq.task_list, &wqh->task_list); \
|
||||
current->state = TASK_SLEEPING; \
|
||||
printk("(%d) waiting on wqh at: 0x%p\n", \
|
||||
current->tid, wqh); \
|
||||
spin_unlock(&wqh->slock); \
|
||||
schedule(); \
|
||||
\
|
||||
/* Did we wake up normally or get interrupted */\
|
||||
if (current->flags & TASK_INTERRUPTED) { \
|
||||
current->flags &= ~TASK_INTERRUPTED; \
|
||||
ret = -EINTR; \
|
||||
} else \
|
||||
ret = 0; \
|
||||
} while(0);
|
||||
|
||||
/* Sleep without any condition */
|
||||
int wait_on(struct waitqueue_head *wqh)
|
||||
{
|
||||
@@ -94,7 +43,7 @@ int wait_on(struct waitqueue_head *wqh)
|
||||
task_set_wqh(current, wqh, &wq);
|
||||
wqh->sleepers++;
|
||||
list_add_tail(&wq.task_list, &wqh->task_list);
|
||||
current->state = TASK_SLEEPING;
|
||||
sched_prepare_sleep();
|
||||
printk("(%d) waiting on wqh at: 0x%p\n",
|
||||
current->tid, wqh);
|
||||
spin_unlock(&wqh->slock);
|
||||
@@ -109,6 +58,31 @@ int wait_on(struct waitqueue_head *wqh)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wake up all */
|
||||
void wake_up_all(struct waitqueue_head *wqh, int sync)
|
||||
{
|
||||
BUG_ON(wqh->sleepers < 0);
|
||||
spin_lock(&wqh->slock);
|
||||
while (wqh->sleepers > 0) {
|
||||
struct waitqueue *wq = list_entry(wqh->task_list.next,
|
||||
struct waitqueue,
|
||||
task_list);
|
||||
struct ktcb *sleeper = wq->task;
|
||||
task_unset_wqh(sleeper);
|
||||
BUG_ON(list_empty(&wqh->task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
wqh->sleepers--;
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
spin_unlock(&wqh->slock);
|
||||
|
||||
if (sync)
|
||||
sched_resume_sync(sleeper);
|
||||
else
|
||||
sched_resume_async(sleeper);
|
||||
}
|
||||
spin_unlock(&wqh->slock);
|
||||
}
|
||||
|
||||
/* Wake up single waiter */
|
||||
void wake_up(struct waitqueue_head *wqh, int sync)
|
||||
@@ -124,7 +98,7 @@ void wake_up(struct waitqueue_head *wqh, int sync)
|
||||
BUG_ON(list_empty(&wqh->task_list));
|
||||
list_del_init(&wq->task_list);
|
||||
wqh->sleepers--;
|
||||
sleeper->state = TASK_RUNNABLE;
|
||||
sleeper->flags |= TASK_INTERRUPTED;
|
||||
printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
|
||||
spin_unlock(&wqh->slock);
|
||||
|
||||
@@ -152,6 +126,8 @@ int wake_up_task(struct ktcb *task, int sync)
|
||||
spin_unlock(&task->waitlock);
|
||||
return -1;
|
||||
}
|
||||
wqh = task->waiting_on;
|
||||
wq = task->wq;
|
||||
|
||||
/*
|
||||
* We have found the waitqueue head.
|
||||
@@ -159,8 +135,6 @@ int wake_up_task(struct ktcb *task, int sync)
|
||||
* lock order and avoid deadlocks. Release task's
|
||||
* waitlock and take the wqh's one.
|
||||
*/
|
||||
wqh = task->waiting_on;
|
||||
wq = task->wq;
|
||||
spin_unlock(&task->waitlock);
|
||||
|
||||
/* -- Task can be woken up by someone else here -- */
|
||||
@@ -184,11 +158,15 @@ int wake_up_task(struct ktcb *task, int sync)
|
||||
wqh->sleepers--;
|
||||
task->waiting_on = 0;
|
||||
task->wq = 0;
|
||||
task->state = TASK_RUNNABLE;
|
||||
task->flags |= TASK_INTERRUPTED;
|
||||
spin_unlock(&wqh->slock);
|
||||
spin_unlock(&task->waitlock);
|
||||
|
||||
/* Removed from waitqueue, we can now safely resume task */
|
||||
/*
|
||||
* Task is removed from its waitqueue. Now we can
|
||||
* safely resume it without locks as this is the only
|
||||
* code path that can resume the task.
|
||||
*/
|
||||
if (sync)
|
||||
sched_resume_sync(task);
|
||||
else
|
||||
|
||||
@@ -30,4 +30,5 @@ int pager_update_stats(struct tcb *sender, unsigned long vnum,
|
||||
int pager_notify_fork(struct tcb *sender, l4id_t parid,
|
||||
l4id_t chid, unsigned long utcb_address);
|
||||
|
||||
int pager_notify_exit(struct tcb *sender, l4id_t tid);
|
||||
#endif /* __FS0_SYSCALLS_H__ */
|
||||
|
||||
@@ -113,6 +113,9 @@ void handle_fs_requests(void)
|
||||
ret = pager_notify_fork(sender, (l4id_t)mr[0], (l4id_t)mr[1],
|
||||
(unsigned long)mr[2]);
|
||||
break;
|
||||
case L4_IPC_TAG_NOTIFY_EXIT:
|
||||
ret = pager_notify_exit(sender, (l4id_t)mr[0]);
|
||||
break;
|
||||
|
||||
default:
|
||||
printf("%s: Unrecognised ipc tag (%d) "
|
||||
|
||||
@@ -33,46 +33,6 @@ struct tcb *find_task(int tid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Asks pager to send information about currently running tasks. Since this is
|
||||
* called during initialisation, there can't be that many, so we assume message
|
||||
* registers are sufficient. First argument tells how many there are, the rest
|
||||
* tells the tids.
|
||||
*/
|
||||
int receive_pager_taskdata_orig(l4id_t *tdata)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Make the actual ipc call */
|
||||
if ((err = l4_sendrecv(PAGER_TID, PAGER_TID,
|
||||
L4_IPC_TAG_TASKDATA)) < 0) {
|
||||
printf("%s: L4 IPC Error: %d.\n", __FUNCTION__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Check if call itself was successful */
|
||||
if ((err = l4_get_retval()) < 0) {
|
||||
printf("%s: Error: %d.\n", __FUNCTION__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Read total number of tasks. Note already used one mr. */
|
||||
if ((tdata[0] = (l4id_t)read_mr(L4SYS_ARG0)) >= MR_UNUSED_TOTAL) {
|
||||
printf("%s: Error: Too many tasks to read. Won't fit in mrs.\n",
|
||||
__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
// printf("%s: %d Total tasks.\n", __FUNCTION__, tdata[0]);
|
||||
|
||||
/* Now read task ids. */
|
||||
for (int i = 0; i < (int)tdata[0]; i++) {
|
||||
tdata[1 + i] = (l4id_t)read_mr(L4SYS_ARG1 + i);
|
||||
// printf("%s: Task id: %d\n", __FUNCTION__, tdata[1 + i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate a task struct and initialise it */
|
||||
struct tcb *create_tcb(void)
|
||||
{
|
||||
@@ -89,6 +49,15 @@ struct tcb *create_tcb(void)
|
||||
return t;
|
||||
}
|
||||
|
||||
void destroy_tcb(struct tcb *t)
|
||||
{
|
||||
kfree(t->fdpool);
|
||||
|
||||
list_del(&t->list);
|
||||
tcb_head.total--;
|
||||
kfree(t);
|
||||
}
|
||||
|
||||
/*
|
||||
* Attaches to task's utcb. FIXME: Add SHM_RDONLY and test it.
|
||||
* FIXME: This calls the pager and is a potential for deadlock
|
||||
@@ -156,6 +125,24 @@ int pager_notify_fork(struct tcb *sender, l4id_t parid,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Pager tells us that a task is exiting by this call.
|
||||
*/
|
||||
int pager_notify_exit(struct tcb *sender, l4id_t tid)
|
||||
{
|
||||
struct tcb *task;
|
||||
|
||||
printf("%s/%s\n", __TASKNAME__, __FUNCTION__);
|
||||
BUG_ON(!(task = find_task(tid)));
|
||||
|
||||
destroy_tcb(task);
|
||||
|
||||
printf("%s/%s: Exiting...\n", __TASKNAME__, __FUNCTION__);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Read task information into the utcb page, since it won't fit into mrs. */
|
||||
struct task_data_head *receive_pager_taskdata(void)
|
||||
{
|
||||
|
||||
@@ -59,6 +59,6 @@
|
||||
#define L4_IPC_TAG_PAGER_CLOSE 44 /* Pager notifies vfs of file close */
|
||||
#define L4_IPC_TAG_PAGER_UPDATE_STATS 45 /* Pager updates file stats in vfs */
|
||||
#define L4_IPC_TAG_NOTIFY_FORK 46 /* Pager notifies vfs of process fork */
|
||||
#define L4_IPC_TAG_NOTIFY_EXIT 46 /* Pager notifies vfs of process exit */
|
||||
#define L4_IPC_TAG_NOTIFY_EXIT 47 /* Pager notifies vfs of process exit */
|
||||
|
||||
#endif /* __IPCDEFS_H__ */
|
||||
|
||||
@@ -121,10 +121,12 @@ int task_setup_regions(struct vm_file *file, struct tcb *task,
|
||||
int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
unsigned int sp, l4id_t pager);
|
||||
struct tcb *tcb_alloc_init(unsigned int flags);
|
||||
int tcb_destroy(struct tcb *task);
|
||||
int task_exec(struct vm_file *f, unsigned long task_region_start,
|
||||
unsigned long task_region_end, struct task_ids *ids);
|
||||
int task_start(struct tcb *task, struct task_ids *ids);
|
||||
int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags);
|
||||
int task_release_vmas(struct task_vma_head *vma_head);
|
||||
struct tcb *task_create(struct tcb *orig,
|
||||
struct task_ids *ids,
|
||||
unsigned int ctrl_flags,
|
||||
|
||||
@@ -139,9 +139,9 @@ struct vm_object {
|
||||
/* In memory representation of either a vfs file, a device. */
|
||||
struct vm_file {
|
||||
int openers;
|
||||
struct list_head list;
|
||||
unsigned long length;
|
||||
unsigned int type;
|
||||
struct list_head list;
|
||||
struct vm_object vm_obj;
|
||||
void *priv_data; /* Device pagers use to access device info */
|
||||
};
|
||||
@@ -230,6 +230,7 @@ struct vm_file *vm_file_alloc_init(void);
|
||||
struct vm_object *vm_object_alloc_init(void);
|
||||
struct vm_object *vm_object_create(void);
|
||||
struct vm_file *vm_file_create(void);
|
||||
int vm_file_delete(struct vm_file *f);
|
||||
int vm_object_delete(struct vm_object *vmo);
|
||||
void vm_object_print(struct vm_object *vmo);
|
||||
|
||||
@@ -254,4 +255,7 @@ static inline void task_add_vma(struct tcb *task, struct vm_area *vma)
|
||||
/* Main page fault entry point */
|
||||
int page_fault_handler(struct tcb *faulty_task, fault_kdata_t *fkdata);
|
||||
|
||||
int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link);
|
||||
int vma_drop_merge_delete_all(struct vm_area *vma);
|
||||
|
||||
#endif /* __VM_AREA_H__ */
|
||||
|
||||
@@ -22,7 +22,7 @@ int vfs_notify_fork(struct tcb *child, struct tcb *parent)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
printf("%s/%s\n", __TASKNAME__, __FUNCTION__);
|
||||
// printf("%s/%s\n", __TASKNAME__, __FUNCTION__);
|
||||
|
||||
l4_save_ipcregs();
|
||||
|
||||
|
||||
@@ -60,36 +60,18 @@ struct vm_obj_link *vma_next_link(struct list_head *link,
|
||||
}
|
||||
|
||||
/* Unlinks orig_link from its vma and deletes it but keeps the object. */
|
||||
struct vm_object *vma_drop_link(struct vm_obj_link *shadower_link,
|
||||
struct vm_obj_link *orig_link)
|
||||
struct vm_object *vma_drop_link(struct vm_obj_link *link)
|
||||
{
|
||||
struct vm_object *dropped = orig_link->obj;
|
||||
struct vm_object *dropped;
|
||||
|
||||
/* Remove object link from vma's list */
|
||||
list_del(&orig_link->list);
|
||||
list_del(&link->list);
|
||||
|
||||
/* Unlink the link from object */
|
||||
vm_unlink_object(orig_link);
|
||||
|
||||
/*
|
||||
* Reduce object's shadow count since its not shadowed
|
||||
* by this shadower anymore.
|
||||
*
|
||||
* FIXME: Is every object drop because of shadows???
|
||||
* What about exiting tasks?
|
||||
*
|
||||
*/
|
||||
dropped->shadows--;
|
||||
|
||||
/*
|
||||
* Remove the shadower from original's shadower list.
|
||||
* We know shadower is deleted from original's list
|
||||
* because each shadow can shadow a single object.
|
||||
*/
|
||||
list_del(&shadower_link->obj->shref);
|
||||
dropped = vm_unlink_object(link);
|
||||
|
||||
/* Delete the original link */
|
||||
kfree(orig_link);
|
||||
kfree(link);
|
||||
|
||||
return dropped;
|
||||
}
|
||||
@@ -128,8 +110,8 @@ int vm_object_is_subset(struct vm_object *shadow,
|
||||
static inline int vm_object_is_droppable(struct vm_object *shadow,
|
||||
struct vm_object *original)
|
||||
{
|
||||
if (vm_object_is_subset(shadow, original)
|
||||
&& (original->flags & VM_OBJ_SHADOW))
|
||||
if (shadow->npages == original->npages &&
|
||||
(original->flags & VM_OBJ_SHADOW))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
@@ -191,8 +173,9 @@ int vma_merge_object(struct vm_object *redundant)
|
||||
/* Find, unlink and delete the last link for the object */
|
||||
last_link = list_entry(redundant->link_list.next,
|
||||
struct vm_obj_link, linkref);
|
||||
vm_unlink_object(last_link);
|
||||
kfree(last_link);
|
||||
|
||||
/* Drop the last link to the object */
|
||||
vma_drop_link(last_link);
|
||||
|
||||
/* Redundant shadow has no shadows anymore */
|
||||
redundant->shadows--;
|
||||
@@ -266,54 +249,136 @@ struct page *copy_to_new_page(struct page *orig)
|
||||
* Drops a link to an object if possible, and if it has dropped it,
|
||||
* decides and takes action on the dropped object, depending on
|
||||
* how many links and shadows it has left, and the type of the object.
|
||||
* This covers both copy_on_write() shadow drops and exit() cases.
|
||||
*/
|
||||
int vma_drop_merge_delete(struct vm_obj_link *shadow_link,
|
||||
struct vm_obj_link *orig_link)
|
||||
int vma_drop_merge_delete(struct vm_area *vma, struct vm_obj_link *link)
|
||||
{
|
||||
/* Can we drop one link? */
|
||||
if (vm_object_is_droppable(shadow_link->obj, orig_link->obj)) {
|
||||
struct vm_object *dropped;
|
||||
struct vm_obj_link *prev, *next;
|
||||
struct vm_object *obj;
|
||||
|
||||
dprintf("VM OBJECT is a subset of its shadow.\nShadow:\n");
|
||||
vm_object_print(shadow_link->obj);
|
||||
dprintf("Original:\n");
|
||||
vm_object_print(orig_link->obj);
|
||||
/* Get previous and next links, if they exist */
|
||||
prev = (link->list.prev == &vma->vm_obj_list) ? 0 :
|
||||
list_entry(link->list.prev, struct vm_obj_link, list);
|
||||
|
||||
/* We can drop the link to original object */
|
||||
dropped = vma_drop_link(shadow_link, orig_link);
|
||||
dprintf("Dropped link to object:\n");
|
||||
vm_object_print(dropped);
|
||||
orig_link = 0;
|
||||
next = (link->list.next == &vma->vm_obj_list) ? 0 :
|
||||
list_entry(link->list.next, struct vm_obj_link, list);
|
||||
|
||||
/* Drop the link */
|
||||
obj = vma_drop_link(link);
|
||||
|
||||
/*
|
||||
* If there was an object in front, that implies it was
|
||||
* a shadow. Current object has lost it, so deduce it.
|
||||
*/
|
||||
if (prev) {
|
||||
BUG_ON(!(prev->obj->flags & VM_OBJ_SHADOW));
|
||||
obj->shadows--;
|
||||
list_del_init(&prev->obj->shref);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there was an object after, that implies current object
|
||||
* is a shadow, deduce it from the object after.
|
||||
*/
|
||||
if (next && obj->flags & VM_OBJ_SHADOW) {
|
||||
BUG_ON(obj->orig_obj != next->obj);
|
||||
next->obj->shadows--;
|
||||
list_del_init(&obj->shref);
|
||||
|
||||
/*
|
||||
* Now decide on what to do with the dropped object:
|
||||
* merge, delete, or do nothing.
|
||||
* Furthermore, if there was an object in front,
|
||||
* that means front will become a shadow of after.
|
||||
*/
|
||||
|
||||
/* If it's not a shadow, we're not to touch it */
|
||||
if (!(dropped->flags & VM_OBJ_SHADOW))
|
||||
return 0;
|
||||
|
||||
/* If the object has no links left, we can delete it */
|
||||
if (dropped->nlinks == 0) {
|
||||
BUG_ON(dropped->shadows != 0);
|
||||
dprintf("Deleting object:\n");
|
||||
vm_object_print(dropped);
|
||||
vm_object_delete(dropped);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only one link and one shadow left.
|
||||
* Merge it with its only shadow
|
||||
*/
|
||||
if (dropped->nlinks == 1 &&
|
||||
dropped->shadows == 1) {
|
||||
dprintf("Merging object:\n");
|
||||
vm_object_print(dropped);
|
||||
vma_merge_object(dropped);
|
||||
if (prev) {
|
||||
list_add(&prev->obj->shref,
|
||||
&next->obj->shdw_list);
|
||||
prev->obj->orig_obj = next->obj;
|
||||
next->obj->shadows++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now deal with the object itself */
|
||||
|
||||
/* If it's not a shadow, we're not to touch it.
|
||||
*
|
||||
* TODO: In the future we can check if a vm_file's
|
||||
* openers are 0 and take action here. (i.e. keep,
|
||||
* delete or swap it)
|
||||
*/
|
||||
if (!(obj->flags & VM_OBJ_SHADOW))
|
||||
return 0;
|
||||
|
||||
/* If the object has no links left, we can delete it */
|
||||
if (obj->nlinks == 0) {
|
||||
BUG_ON(obj->shadows != 0);
|
||||
dprintf("Deleting object:\n");
|
||||
vm_object_print(obj);
|
||||
vm_object_delete(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only one link and one shadow left.
|
||||
* Merge it with its only shadow.
|
||||
*
|
||||
* FIXME: Currently this is an optimisation that needs to go
|
||||
* away when swapping is available. We have this solely because
|
||||
* currently a shadow needs to identically mirror the whole
|
||||
* object underneath, in order to drop it. A file that is 1MB
|
||||
* long would spend 2MB until dropped. When swapping is available,
|
||||
* we will go back to identical mirroring instead of merging the
|
||||
* last shadow, since most unused pages would be swapped out.
|
||||
*/
|
||||
if (obj->nlinks == 1 &&
|
||||
obj->shadows == 1) {
|
||||
dprintf("Merging object:\n");
|
||||
vm_object_print(obj);
|
||||
vma_merge_object(obj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* A scenario that pretty much covers every exit() case.
|
||||
*
|
||||
* T = vma on a unique task
|
||||
* l = link
|
||||
* Sobj = Shadow object
|
||||
* Fobj = File object
|
||||
*
|
||||
* Every l links to the object on the nearest
|
||||
* row to it and on the same column.
|
||||
*
|
||||
* l l l l l l T
|
||||
* Sobj Sobj
|
||||
*
|
||||
* Sobj Sobj Sobj Fobj
|
||||
*
|
||||
* Sobj Sobj
|
||||
* l l l l l l T
|
||||
*
|
||||
* l l l l l l l T
|
||||
* Sobj
|
||||
*
|
||||
*/
|
||||
|
||||
/* This version is used when exiting. */
|
||||
int vma_drop_merge_delete_all(struct vm_area *vma)
|
||||
{
|
||||
struct vm_obj_link *vmo_link;
|
||||
|
||||
/* Get the first link on the vma */
|
||||
BUG_ON(list_empty(&vma->vm_obj_list));
|
||||
vmo_link = list_entry(vma->vm_obj_list.next,
|
||||
struct vm_obj_link, list);
|
||||
|
||||
/* Traverse and get rid of all links */
|
||||
do {
|
||||
vma_drop_merge_delete(vma, vmo_link);
|
||||
|
||||
} while((vmo_link = vma_next_link(&vmo_link->list,
|
||||
&vma->vm_obj_list)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -421,11 +486,11 @@ struct page *copy_on_write(struct fault_data *fault)
|
||||
|
||||
/* Update page details */
|
||||
spin_lock(&new_page->lock);
|
||||
BUG_ON(!list_empty(&new_page->list));
|
||||
new_page->refcnt = 0;
|
||||
new_page->owner = shadow_link->obj;
|
||||
new_page->offset = file_offset;
|
||||
new_page->virtual = 0;
|
||||
BUG_ON(!list_empty(&new_page->list));
|
||||
spin_unlock(&page->lock);
|
||||
|
||||
/* Add the page to owner's list of in-memory pages */
|
||||
@@ -446,7 +511,8 @@ struct page *copy_on_write(struct fault_data *fault)
|
||||
printf("Copier must have had an object under it!\n");
|
||||
BUG();
|
||||
}
|
||||
vma_drop_merge_delete(shadow_link, vmo_link);
|
||||
if (vm_object_is_droppable(shadow_link->obj, vmo_link->obj))
|
||||
vma_drop_merge_delete(vma, vmo_link);
|
||||
}
|
||||
|
||||
return new_page;
|
||||
|
||||
@@ -158,8 +158,11 @@ int do_open(struct tcb *task, int fd, unsigned long vnum, unsigned long length)
|
||||
task->files->fd[fd].vmfile = vmfile;
|
||||
vmfile->openers++;
|
||||
|
||||
/* Add to global list */
|
||||
list_add(&vmfile->vm_obj.list, &vm_file_list);
|
||||
/* Add to file list */
|
||||
list_add(&vmfile->list, &vm_file_list);
|
||||
|
||||
/* Add to object list */
|
||||
list_add(&vmfile->vm_obj.list, &vm_object_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -399,7 +402,8 @@ int do_close(struct tcb *task, int fd)
|
||||
return err;
|
||||
|
||||
/* Reduce file's opener count */
|
||||
task->files->fd[fd].vmfile->openers--;
|
||||
if (!(--task->files->fd[fd].vmfile->openers))
|
||||
vm_file_delete(task->files->fd[fd].vmfile);
|
||||
|
||||
task->files->fd[fd].vnum = 0;
|
||||
task->files->fd[fd].cursor = 0;
|
||||
|
||||
@@ -46,18 +46,15 @@ int default_release_pages(struct vm_object *vm_obj)
|
||||
struct page *p, *n;
|
||||
|
||||
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
|
||||
list_del(&p->list);
|
||||
list_del_init(&p->list);
|
||||
BUG_ON(p->refcnt);
|
||||
|
||||
/* Reinitialise the page */
|
||||
page_init(p);
|
||||
|
||||
/* Return page back to allocator */
|
||||
free_page((void *)page_to_phys(p));
|
||||
|
||||
/*
|
||||
* Reset the page structure.
|
||||
* No freeing for page_array pages
|
||||
*/
|
||||
memset(p, 0, sizeof(*p));
|
||||
|
||||
/* Reduce object page count */
|
||||
BUG_ON(--vm_obj->npages < 0);
|
||||
}
|
||||
@@ -103,7 +100,7 @@ int file_page_out(struct vm_object *vm_obj, unsigned long page_offset)
|
||||
/* Unmap it from vfs */
|
||||
l4_unmap(vaddr, 1, VFS_TID);
|
||||
|
||||
/* Update page details */
|
||||
/* Clear dirty flag */
|
||||
page->flags &= ~VM_DIRTY;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -197,6 +197,7 @@ struct vm_file *shm_new(key_t key, unsigned long npages)
|
||||
shm_file->vm_obj.pager = &swap_pager;
|
||||
shm_file->vm_obj.flags = VM_OBJ_FILE | VM_WRITE;
|
||||
|
||||
/* Add to shm file and global object list */
|
||||
list_add(&shm_file->list, &shm_file_list);
|
||||
list_add(&shm_file->vm_obj.list, &vm_object_list);
|
||||
|
||||
|
||||
@@ -104,6 +104,16 @@ struct tcb *tcb_alloc_init(unsigned int flags)
|
||||
return task;
|
||||
}
|
||||
|
||||
/* NOTE: We may need to delete shared tcb parts here as well. */
|
||||
int tcb_destroy(struct tcb *task)
|
||||
{
|
||||
list_del(&task->list);
|
||||
tcb_head.total--;
|
||||
|
||||
kfree(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy all vmas from the given task and populate each with
|
||||
@@ -147,6 +157,27 @@ int copy_vmas(struct tcb *to, struct tcb *from)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Traverse all vmas, release all links to vm_objects.
|
||||
* Used when a task or thread group with a shared vm is exiting.
|
||||
*/
|
||||
int task_release_vmas(struct task_vma_head *vma_head)
|
||||
{
|
||||
struct vm_area *vma, *n;
|
||||
|
||||
list_for_each_entry_safe(vma, n, &vma_head->list, list) {
|
||||
/* Release all links */
|
||||
vma_drop_merge_delete_all(vma);
|
||||
|
||||
/* Delete the vma from task's vma list */
|
||||
list_del(&vma->list);
|
||||
|
||||
/* Free the vma */
|
||||
kfree(vma);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags)
|
||||
{
|
||||
/* Copy program segment boundary information */
|
||||
|
||||
@@ -133,6 +133,9 @@ int vm_object_delete(struct vm_object *vmo)
|
||||
/* Obtain and free via the base object */
|
||||
if (vmo->flags & VM_OBJ_FILE) {
|
||||
f = vm_object_to_file(vmo);
|
||||
BUG_ON(!list_empty(&f->list));
|
||||
if (f->priv_data)
|
||||
kfree(f->priv_data);
|
||||
kfree(f);
|
||||
} else if (vmo->flags & VM_OBJ_SHADOW)
|
||||
kfree(vmo);
|
||||
@@ -141,3 +144,12 @@ int vm_object_delete(struct vm_object *vmo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vm_file_delete(struct vm_file *f)
|
||||
{
|
||||
/* Delete it from global file list */
|
||||
list_del_init(&f->list);
|
||||
|
||||
/* Delete file via base object */
|
||||
return vm_object_delete(&f->vm_obj);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user