mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Reimplemented space handling by introducing an address_space structure.
- Fixed potential concurrency bugs due to preemption being enabled. - Introduced a new address space structure to better account for address spaces and page tables. - Currently executes fine up to forking. Will investigate.
This commit is contained in:
@@ -137,6 +137,8 @@ void arch_hardware_flush(pgd_table_t *pgd);
|
||||
void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags);
|
||||
|
||||
struct address_space;
|
||||
int copy_user_tables(struct address_space *new, struct address_space *orig);
|
||||
pgd_table_t *copy_page_tables(pgd_table_t *from);
|
||||
void remap_as_pages(void *vstart, void *vend);
|
||||
|
||||
|
||||
@@ -21,15 +21,27 @@
|
||||
#if defined (__KERNEL__)
|
||||
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
/* A simple page table with a reference count */
|
||||
struct address_space {
|
||||
l4id_t spid;
|
||||
struct list_head list;
|
||||
struct mutex lock;
|
||||
pgd_table_t *pgd;
|
||||
int ktcb_refs;
|
||||
};
|
||||
|
||||
struct address_space *address_space_create(struct address_space *orig);
|
||||
void address_space_delete(struct address_space *space);
|
||||
void address_space_attach(struct ktcb *tcb, struct address_space *space);
|
||||
struct address_space *address_space_find(l4id_t spid);
|
||||
void address_space_add(struct address_space *space);
|
||||
void address_space_remove(struct address_space *space);
|
||||
void address_space_reference_lock();
|
||||
void address_space_reference_unlock();
|
||||
void init_address_space_list(void);
|
||||
int check_access(unsigned long vaddr, unsigned long size, unsigned int flags);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -109,24 +109,6 @@ union ktcb_union {
|
||||
char kstack[PAGE_SIZE];
|
||||
};
|
||||
|
||||
/* For traversing global task list */
|
||||
extern struct list_head global_task_list;
|
||||
static inline struct ktcb *find_task(l4id_t tid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
list_for_each_entry(task, &global_task_list, task_list)
|
||||
if (task->tid == tid)
|
||||
return task;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int add_task_global(struct ktcb *new)
|
||||
{
|
||||
INIT_LIST_HEAD(&new->task_list);
|
||||
list_add(&new->task_list, &global_task_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each task is allocated a unique global id. A thread group can only belong to
|
||||
@@ -151,6 +133,15 @@ extern struct id_pool *thread_id_pool;
|
||||
extern struct id_pool *space_id_pool;
|
||||
extern struct id_pool *tgroup_id_pool;
|
||||
|
||||
struct ktcb *tcb_find(l4id_t tid);
|
||||
void tcb_add(struct ktcb *tcb);
|
||||
void tcb_remove(struct ktcb *tcb);
|
||||
|
||||
void tcb_init(struct ktcb *tcb);
|
||||
struct ktcb *tcb_alloc_init(void);
|
||||
void tcb_delete(struct ktcb *tcb);
|
||||
|
||||
void init_ktcb_list(void);
|
||||
void task_update_utcb(struct ktcb *cur, struct ktcb *next);
|
||||
|
||||
#endif /* __TCB_H__ */
|
||||
|
||||
@@ -63,7 +63,7 @@ int sys_ipc_control(syscall_context_t *regs)
|
||||
/* Interruptible ipc */
|
||||
int ipc_send(l4id_t recv_tid)
|
||||
{
|
||||
struct ktcb *receiver = find_task(recv_tid);
|
||||
struct ktcb *receiver = tcb_find(recv_tid);
|
||||
struct waitqueue_head *wqhs, *wqhr;
|
||||
|
||||
wqhs = &receiver->wqh_send;
|
||||
|
||||
@@ -29,7 +29,7 @@ int sys_map(syscall_context_t *regs)
|
||||
target = current;
|
||||
goto found;
|
||||
} else /* else search the tcb from its hash list */
|
||||
if ((target = find_task(tid)))
|
||||
if ((target = tcb_find(tid)))
|
||||
goto found;
|
||||
|
||||
BUG();
|
||||
@@ -56,7 +56,7 @@ int sys_unmap(syscall_context_t *regs)
|
||||
|
||||
if (tid == current->tid)
|
||||
target = current;
|
||||
else if (!(target = find_task(tid)))
|
||||
else if (!(target = tcb_find(tid)))
|
||||
return -ESRCH;
|
||||
|
||||
for (int i = 0; i < npages; i++) {
|
||||
|
||||
@@ -106,7 +106,7 @@ int sys_exchange_registers(syscall_context_t *regs)
|
||||
l4id_t tid = regs->r1;
|
||||
|
||||
/* Find tcb from its list */
|
||||
if (!(task = find_task(tid)))
|
||||
if (!(task = tcb_find(tid)))
|
||||
return -ESRCH;
|
||||
|
||||
/*
|
||||
|
||||
@@ -30,7 +30,7 @@ int thread_suspend(struct task_ids *ids)
|
||||
struct ktcb *task;
|
||||
int ret = 0;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if (task->state == TASK_INACTIVE)
|
||||
@@ -65,7 +65,7 @@ int thread_recycle(struct task_ids *ids)
|
||||
struct ktcb *task;
|
||||
int ret;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if ((ret = thread_suspend(ids)) < 0)
|
||||
@@ -95,14 +95,14 @@ int thread_destroy(struct task_ids *ids)
|
||||
struct ktcb *task;
|
||||
int ret;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if ((ret = thread_suspend(ids)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Delete it from global list so any callers will get -ESRCH */
|
||||
list_del(&task->task_list);
|
||||
/* Remove tcb from global list so any callers will get -ESRCH */
|
||||
tcb_remove(task);
|
||||
|
||||
/*
|
||||
* If there are any sleepers on any of the task's
|
||||
@@ -111,18 +111,8 @@ int thread_destroy(struct task_ids *ids)
|
||||
wake_up_all(&task->wqh_send, 0);
|
||||
wake_up_all(&task->wqh_recv, 0);
|
||||
|
||||
/*
|
||||
* The thread cannot have a pager waiting for it
|
||||
* since we ought to be the pager.
|
||||
*/
|
||||
BUG_ON(task->wqh_pager.sleepers > 0);
|
||||
|
||||
/*
|
||||
* FIXME: We need to free the pgd and any thread specific pmds!!!
|
||||
*/
|
||||
|
||||
/* We can now safely delete the task */
|
||||
free_page(task);
|
||||
tcb_delete(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -131,7 +121,7 @@ int thread_resume(struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if (!mutex_trylock(&task->thread_control_lock))
|
||||
@@ -150,7 +140,7 @@ int thread_start(struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
if (!(task = find_task(ids->tid)))
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
if (!mutex_trylock(&task->thread_control_lock))
|
||||
@@ -256,6 +246,69 @@ int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int thread_setup_space(struct ktcb *tcb, struct task_ids *ids, unsigned int flags)
|
||||
{
|
||||
struct address_space *space, *new;
|
||||
|
||||
address_space_reference_lock();
|
||||
|
||||
if (flags == THREAD_SAME_SPACE) {
|
||||
if (!(space = address_space_find(ids->spid)))
|
||||
return -ESRCH;
|
||||
address_space_attach(tcb, space);
|
||||
}
|
||||
if (flags == THREAD_COPY_SPACE) {
|
||||
if (!(space = address_space_find(ids->spid)))
|
||||
return -ESRCH;
|
||||
if (IS_ERR(new = address_space_create(space)))
|
||||
return (int)new;
|
||||
address_space_attach(tcb, new);
|
||||
address_space_add(new);
|
||||
}
|
||||
if (flags == THREAD_NEW_SPACE) {
|
||||
if (IS_ERR(new = address_space_create(0)))
|
||||
return (int)new;
|
||||
address_space_attach(tcb, new);
|
||||
address_space_add(new);
|
||||
}
|
||||
|
||||
address_space_reference_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int thread_create(struct task_ids *ids, unsigned int flags)
|
||||
{
|
||||
struct ktcb *new, *orig_task;
|
||||
int err;
|
||||
|
||||
flags &= THREAD_CREATE_MASK;
|
||||
|
||||
if (!(new = tcb_alloc_init()))
|
||||
return -ENOMEM;
|
||||
|
||||
if ((err = thread_setup_space(new, ids, flags))) {
|
||||
tcb_delete(new);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (flags != THREAD_NEW_SPACE) {
|
||||
BUG_ON(!(orig_task = tcb_find(ids->tid)));
|
||||
|
||||
/* Set up ids and context using original tcb */
|
||||
thread_setup_new_ids(ids, flags, new, orig_task);
|
||||
arch_setup_new_thread(new, orig_task, flags);
|
||||
} else {
|
||||
/* Set up ids and context from scratch */
|
||||
thread_setup_new_ids(ids, flags, new, 0);
|
||||
arch_setup_new_thread(new, 0, flags);
|
||||
}
|
||||
|
||||
tcb_add(new);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Creates a thread, with a new thread id, and depending on the flags,
|
||||
* either creates a new space, uses the same space as another thread,
|
||||
@@ -263,7 +316,7 @@ int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
|
||||
* are respectively used when creating a brand new task, creating a
|
||||
* new thread in an existing address space, or forking a task.
|
||||
*/
|
||||
int thread_create(struct task_ids *ids, unsigned int flags)
|
||||
int thread_create_old(struct task_ids *ids, unsigned int flags)
|
||||
{
|
||||
struct ktcb *task = 0;
|
||||
struct ktcb *new = (struct ktcb *)zalloc_page();
|
||||
@@ -323,6 +376,7 @@ out:
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Creates, destroys and modifies threads. Also implicitly creates an address
|
||||
|
||||
@@ -419,9 +419,64 @@ int remove_mapping(unsigned long vaddr)
|
||||
return remove_mapping_pgd(vaddr, TASK_PGD(current));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Copies userspace entries of one task to another. In order to do that,
|
||||
* it allocates new pmds and copies the original values into new ones.
|
||||
*/
|
||||
int copy_user_tables(struct address_space *new, struct address_space *orig_space)
|
||||
{
|
||||
pgd_table_t *to = new->pgd, *from = orig_space->pgd;
|
||||
pmd_table_t *pmd, *orig;
|
||||
|
||||
/* Allocate and copy all pmds that will be exclusive to new task. */
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
|
||||
/* Detect a pmd entry that is not a kernel pmd? */
|
||||
if (!is_kern_pgdi(i) &&
|
||||
((from->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
|
||||
/* Allocate new pmd */
|
||||
if (!(pmd = alloc_pmd()))
|
||||
goto out_error;
|
||||
|
||||
/* Find original pmd */
|
||||
orig = (pmd_table_t *)
|
||||
phys_to_virt((from->entry[i] &
|
||||
PGD_COARSE_ALIGN_MASK));
|
||||
|
||||
/* Copy original to new */
|
||||
memcpy(pmd, orig, sizeof(pmd_table_t));
|
||||
|
||||
/* Replace original pmd entry in pgd with new */
|
||||
to->entry[i] = (pgd_t)virt_to_phys(pmd);
|
||||
to->entry[i] |= PGD_TYPE_COARSE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_error:
|
||||
/* Find all non-kernel pmds we have just allocated and free them */
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
|
||||
/* Non-kernel pmd that has just been allocated. */
|
||||
if (!is_kern_pgdi(i) &&
|
||||
(to->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
|
||||
/* Obtain the pmd handle */
|
||||
pmd = (pmd_table_t *)
|
||||
phys_to_virt((to->entry[i] &
|
||||
PGD_COARSE_ALIGN_MASK));
|
||||
/* Free pmd */
|
||||
free_pmd(pmd);
|
||||
}
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates and copies all levels of page tables from one task to another.
|
||||
* Useful when forking.
|
||||
*
|
||||
* The copied page tables end up having shared pmds for kernel entries
|
||||
* and private copies of same pmds for user entries.
|
||||
*/
|
||||
pgd_table_t *copy_page_tables(pgd_table_t *from)
|
||||
{
|
||||
@@ -432,6 +487,7 @@ pgd_table_t *copy_page_tables(pgd_table_t *from)
|
||||
if (!(pgd = alloc_pgd()))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
/* First copy whole pgd entries */
|
||||
memcpy(pgd, from, sizeof(pgd_table_t));
|
||||
|
||||
/* Allocate and copy all pmds that will be exclusive to new task. */
|
||||
|
||||
@@ -225,7 +225,7 @@ void sched_suspend_sync(void)
|
||||
preempt_enable();
|
||||
|
||||
/* Async wake up any waiters */
|
||||
wake_up_task(find_task(current->pagerid), 0);
|
||||
wake_up_task(tcb_find(current->pagerid), 0);
|
||||
schedule();
|
||||
}
|
||||
|
||||
@@ -242,7 +242,7 @@ void sched_suspend_async(void)
|
||||
preempt_enable();
|
||||
|
||||
/* Async wake up any waiters */
|
||||
wake_up_task(find_task(current->pagerid), 0);
|
||||
wake_up_task(tcb_find(current->pagerid), 0);
|
||||
need_resched = 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,131 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/api/space.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include <l4/api/kip.h>
|
||||
|
||||
struct address_space_list {
|
||||
struct list_head list;
|
||||
|
||||
/* Lock for list add/removal */
|
||||
struct spinlock list_lock;
|
||||
|
||||
/* To manage refcounting of *all* spaces in the list */
|
||||
struct mutex ref_lock;
|
||||
int count;
|
||||
};
|
||||
|
||||
static struct address_space_list address_space_list;
|
||||
|
||||
void init_address_space_list(void)
|
||||
{
|
||||
memset(&address_space_list, 0, sizeof(address_space_list));
|
||||
|
||||
mutex_init(&address_space_list.ref_lock);
|
||||
spin_lock_init(&address_space_list.list_lock);
|
||||
INIT_LIST_HEAD(&address_space_list.list);
|
||||
}
|
||||
|
||||
void address_space_reference_lock()
|
||||
{
|
||||
mutex_lock(&address_space_list.ref_lock);
|
||||
}
|
||||
|
||||
void address_space_reference_unlock()
|
||||
{
|
||||
mutex_unlock(&address_space_list.ref_lock);
|
||||
}
|
||||
|
||||
void address_space_attach(struct ktcb *tcb, struct address_space *space)
|
||||
{
|
||||
tcb->space = space;
|
||||
space->ktcb_refs++;
|
||||
}
|
||||
|
||||
struct address_space *address_space_find(l4id_t spid)
|
||||
{
|
||||
struct address_space *space;
|
||||
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
list_for_each_entry(space, &address_space_list.list, list) {
|
||||
if (space->spid == spid) {
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
return space;
|
||||
}
|
||||
}
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void address_space_add(struct address_space *space)
|
||||
{
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
list_add(&space->list, &address_space_list.list);
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
}
|
||||
|
||||
void address_space_remove(struct address_space *space)
|
||||
{
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
BUG_ON(list_empty(&space->list));
|
||||
list_del_init(&space->list);
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
}
|
||||
|
||||
void address_space_delete(struct address_space *space)
|
||||
{
|
||||
/* Address space refcount lock must be held */
|
||||
|
||||
/* Sanity checks ??? */
|
||||
|
||||
/* Traverse the page tables and delete private pmds */
|
||||
|
||||
/* Delete the top-level pgd */
|
||||
|
||||
/* Return the space id ??? */
|
||||
|
||||
/* Deallocate the space structure */
|
||||
}
|
||||
|
||||
struct address_space *address_space_create(struct address_space *orig)
|
||||
{
|
||||
struct address_space *space;
|
||||
pgd_table_t *pgd;
|
||||
int err;
|
||||
|
||||
/* Allocate space structure */
|
||||
if (!(space = kzalloc(sizeof(*space))))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
/* Allocate pgd */
|
||||
if (!(pgd = alloc_pgd())) {
|
||||
kfree(space);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Initialize space structure */
|
||||
INIT_LIST_HEAD(&space->list);
|
||||
mutex_init(&space->lock);
|
||||
space->pgd = pgd;
|
||||
|
||||
/* Copy all kernel entries */
|
||||
copy_pgd_kern_all(pgd);
|
||||
|
||||
/* If an original space is supplied */
|
||||
if (orig) {
|
||||
/* Copy its user entries/tables */
|
||||
if ((err = copy_user_tables(space, orig)) < 0) {
|
||||
free_pgd(pgd);
|
||||
kfree(space);
|
||||
return PTR_ERR(err);
|
||||
}
|
||||
}
|
||||
|
||||
return space;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks whether the given user address is a valid userspace address.
|
||||
* If so, whether it is currently mapped into its own address space.
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/api/kip.h>
|
||||
#include INC_ARCH(exception.h)
|
||||
@@ -19,7 +20,112 @@ struct id_pool *space_id_pool;
|
||||
struct id_pool *tgroup_id_pool;
|
||||
|
||||
/* Hash table for all existing tasks */
|
||||
struct list_head global_task_list;
|
||||
struct ktcb_list {
|
||||
struct list_head list;
|
||||
struct spinlock list_lock;
|
||||
int count;
|
||||
};
|
||||
|
||||
static struct ktcb_list ktcb_list;
|
||||
|
||||
void init_ktcb_list(void)
|
||||
{
|
||||
memset(&ktcb_list, 0, sizeof(ktcb_list));
|
||||
spin_lock_init(&ktcb_list.list_lock);
|
||||
INIT_LIST_HEAD(&ktcb_list.list);
|
||||
}
|
||||
|
||||
void tcb_init(struct ktcb *new)
|
||||
{
|
||||
INIT_LIST_HEAD(&new->task_list);
|
||||
mutex_init(&new->thread_control_lock);
|
||||
|
||||
/* Initialise task's scheduling state and parameters. */
|
||||
sched_init_task(new, TASK_PRIO_NORMAL);
|
||||
|
||||
/* Initialise ipc waitqueues */
|
||||
spin_lock_init(&new->waitlock);
|
||||
waitqueue_head_init(&new->wqh_send);
|
||||
waitqueue_head_init(&new->wqh_recv);
|
||||
waitqueue_head_init(&new->wqh_pager);
|
||||
}
|
||||
|
||||
struct ktcb *tcb_alloc(void)
|
||||
{
|
||||
return zalloc_page();
|
||||
}
|
||||
|
||||
struct ktcb *tcb_alloc_init(void)
|
||||
{
|
||||
struct ktcb *tcb;
|
||||
|
||||
if (!(tcb = tcb_alloc()))
|
||||
return 0;
|
||||
|
||||
tcb_init(tcb);
|
||||
return tcb;
|
||||
}
|
||||
|
||||
void tcb_delete(struct ktcb *tcb)
|
||||
{
|
||||
/* Sanity checks first */
|
||||
BUG_ON(!is_page_aligned(tcb));
|
||||
BUG_ON(tcb->wqh_pager.sleepers > 0);
|
||||
BUG_ON(tcb->wqh_send.sleepers > 0);
|
||||
BUG_ON(tcb->wqh_recv.sleepers > 0);
|
||||
BUG_ON(!list_empty(&tcb->task_list));
|
||||
BUG_ON(!list_empty(&tcb->rq_list));
|
||||
BUG_ON(tcb->nlocks);
|
||||
BUG_ON(tcb->waiting_on);
|
||||
BUG_ON(tcb->wq);
|
||||
|
||||
/*
|
||||
* Take this lock as we may delete
|
||||
* the address space as well
|
||||
*/
|
||||
address_space_reference_lock();
|
||||
BUG_ON(--tcb->space->ktcb_refs < 0);
|
||||
|
||||
/* No refs left for the space, delete it */
|
||||
if (tcb->space->ktcb_refs == 0)
|
||||
address_space_delete(tcb->space);
|
||||
|
||||
address_space_reference_unlock();
|
||||
|
||||
/* Free the tcb */
|
||||
free_page(tcb);
|
||||
}
|
||||
|
||||
struct ktcb *tcb_find(l4id_t tid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
list_for_each_entry(task, &ktcb_list.list, task_list) {
|
||||
if (task->tid == tid) {
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tcb_add(struct ktcb *new)
|
||||
{
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
BUG_ON(!list_empty(&new->task_list));
|
||||
list_add(&new->task_list, &ktcb_list.list);
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
void tcb_remove(struct ktcb *new)
|
||||
{
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
BUG_ON(list_empty(&new->task_list));
|
||||
list_del_init(&new->task_list);
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
/* Offsets for ktcb fields that are accessed from assembler */
|
||||
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
|
||||
@@ -34,27 +140,5 @@ void task_update_utcb(struct ktcb *cur, struct ktcb *next)
|
||||
{
|
||||
/* Update the KIP pointer */
|
||||
kip.utcb = next->utcb_address;
|
||||
|
||||
/* We stick with KIP update and no private tls mapping for now */
|
||||
#if 0
|
||||
/*
|
||||
* Unless current and next are in the same address
|
||||
* space and sharing the same physical utcb page, we
|
||||
* update the mapping
|
||||
*/
|
||||
if (cur->utcb_phys != next->utcb_phys)
|
||||
add_mapping(page_align(next->utcb_phys),
|
||||
page_align(next->utcb_virt),
|
||||
page_align_up(UTCB_SIZE),
|
||||
MAP_USR_RW_FLAGS);
|
||||
/*
|
||||
* If same physical utcb but different pgd, it means two
|
||||
* address spaces share the same utcb. We treat this as a
|
||||
* bug for now.
|
||||
*/
|
||||
else
|
||||
BUG_ON(cur->pgd != next->pgd);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -256,26 +256,27 @@ void switch_to_user(struct ktcb *task)
|
||||
jump(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the pager in the system.
|
||||
*
|
||||
* The pager uses the bootstack as its ktcb, the initial kspace as its pgd,
|
||||
* (kernel pmds are shared among all tasks) and a statically allocated
|
||||
* pager_space struct for its space structure.
|
||||
*/
|
||||
void init_pager(char *name, struct task_ids *ids)
|
||||
{
|
||||
struct svc_image *taskimg = 0;
|
||||
struct ktcb *task;
|
||||
int task_pages;
|
||||
|
||||
/*
|
||||
* NOTE: Inittask uses the kernel bootstack as its PAGE_SIZE'd kernel
|
||||
* stack. There is no problem with this as the inittask always exists.
|
||||
* This also solves the problem of freeing the bootstack and making use
|
||||
* of the initial kspace pgd.
|
||||
*/
|
||||
if (!strcmp(name, __PAGERNAME__))
|
||||
task = current; /* mm0 is the mockup current during init */
|
||||
else
|
||||
task = (struct ktcb *)zalloc_page();
|
||||
BUG_ON(strcmp(name, __PAGERNAME__));
|
||||
task = current;
|
||||
|
||||
tcb_init(task);
|
||||
|
||||
/*
|
||||
* Search the compile-time generated boot descriptor for information on
|
||||
* available task images.
|
||||
* Search the compile-time generated boot descriptor for
|
||||
* information on available task images.
|
||||
*/
|
||||
for (int i = 0; i < bootdesc->total_images; i++) {
|
||||
if (!strcmp(name, bootdesc->images[i].name)) {
|
||||
@@ -324,7 +325,7 @@ void init_pager(char *name, struct task_ids *ids)
|
||||
waitqueue_head_init(&task->wqh_pager);
|
||||
|
||||
/* Global hashlist that keeps all existing tasks */
|
||||
add_task_global(task);
|
||||
tcb_add(task);
|
||||
|
||||
/* Scheduler initialises the very first task itself */
|
||||
}
|
||||
@@ -341,8 +342,9 @@ void init_tasks()
|
||||
ids.spid = id_new(space_id_pool);
|
||||
ids.tgid = id_new(tgroup_id_pool);
|
||||
|
||||
/* Initialise the global task list head */
|
||||
INIT_LIST_HEAD(&global_task_list);
|
||||
/* Initialise the global task and address space lists */
|
||||
init_ktcb_list();
|
||||
init_address_space_list();
|
||||
|
||||
/*
|
||||
* This must come last so that other tasks can copy its pgd before it
|
||||
|
||||
Reference in New Issue
Block a user