Reimplemented space handling by introducing an address_space structure.

- Fixed potential concurrency bugs due to preemption being enabled.
- Introduced a new address space structure to better account for
  address spaces and page tables.
- Currently executes fine up to forking. Will investigate.
This commit is contained in:
Bahadir Balban
2009-05-08 23:35:19 +03:00
parent 2df8875a88
commit 7415546410
12 changed files with 403 additions and 81 deletions

View File

@@ -225,7 +225,7 @@ void sched_suspend_sync(void)
preempt_enable();
/* Async wake up any waiters */
wake_up_task(find_task(current->pagerid), 0);
wake_up_task(tcb_find(current->pagerid), 0);
schedule();
}
@@ -242,7 +242,7 @@ void sched_suspend_async(void)
preempt_enable();
/* Async wake up any waiters */
wake_up_task(find_task(current->pagerid), 0);
wake_up_task(tcb_find(current->pagerid), 0);
need_resched = 1;
}

View File

@@ -8,10 +8,131 @@
#include INC_ARCH(exception.h)
#include <l4/generic/space.h>
#include <l4/generic/tcb.h>
#include <l4/generic/kmalloc.h>
#include <l4/api/space.h>
#include <l4/api/errno.h>
#include <l4/api/kip.h>
struct address_space_list {
struct list_head list;
/* Lock for list add/removal */
struct spinlock list_lock;
/* To manage refcounting of *all* spaces in the list */
struct mutex ref_lock;
int count;
};
static struct address_space_list address_space_list;
void init_address_space_list(void)
{
memset(&address_space_list, 0, sizeof(address_space_list));
mutex_init(&address_space_list.ref_lock);
spin_lock_init(&address_space_list.list_lock);
INIT_LIST_HEAD(&address_space_list.list);
}
void address_space_reference_lock()
{
mutex_lock(&address_space_list.ref_lock);
}
void address_space_reference_unlock()
{
mutex_unlock(&address_space_list.ref_lock);
}
void address_space_attach(struct ktcb *tcb, struct address_space *space)
{
tcb->space = space;
space->ktcb_refs++;
}
struct address_space *address_space_find(l4id_t spid)
{
struct address_space *space;
spin_lock(&address_space_list.list_lock);
list_for_each_entry(space, &address_space_list.list, list) {
if (space->spid == spid) {
spin_unlock(&address_space_list.list_lock);
return space;
}
}
spin_unlock(&address_space_list.list_lock);
return 0;
}
void address_space_add(struct address_space *space)
{
spin_lock(&address_space_list.list_lock);
list_add(&space->list, &address_space_list.list);
spin_unlock(&address_space_list.list_lock);
}
void address_space_remove(struct address_space *space)
{
spin_lock(&address_space_list.list_lock);
BUG_ON(list_empty(&space->list));
list_del_init(&space->list);
spin_unlock(&address_space_list.list_lock);
}
void address_space_delete(struct address_space *space)
{
/* Address space refcount lock must be held */
/* Sanity checks ??? */
/* Traverse the page tables and delete private pmds */
/* Delete the top-level pgd */
/* Return the space id ??? */
/* Deallocate the space structure */
}
struct address_space *address_space_create(struct address_space *orig)
{
struct address_space *space;
pgd_table_t *pgd;
int err;
/* Allocate space structure */
if (!(space = kzalloc(sizeof(*space))))
return PTR_ERR(-ENOMEM);
/* Allocate pgd */
if (!(pgd = alloc_pgd())) {
kfree(space);
return PTR_ERR(-ENOMEM);
}
/* Initialize space structure */
INIT_LIST_HEAD(&space->list);
mutex_init(&space->lock);
space->pgd = pgd;
/* Copy all kernel entries */
copy_pgd_kern_all(pgd);
/* If an original space is supplied */
if (orig) {
/* Copy its user entries/tables */
if ((err = copy_user_tables(space, orig)) < 0) {
free_pgd(pgd);
kfree(space);
return PTR_ERR(err);
}
}
return space;
}
/*
* Checks whether the given user address is a valid userspace address.
* If so, whether it is currently mapped into its own address space.

View File

@@ -7,6 +7,7 @@
#include <l4/generic/space.h>
#include <l4/generic/scheduler.h>
#include <l4/generic/preempt.h>
#include <l4/generic/space.h>
#include <l4/lib/idpool.h>
#include <l4/api/kip.h>
#include INC_ARCH(exception.h)
@@ -19,7 +20,112 @@ struct id_pool *space_id_pool;
struct id_pool *tgroup_id_pool;
/* Hash table for all existing tasks */
struct list_head global_task_list;
struct ktcb_list {
struct list_head list;
struct spinlock list_lock;
int count;
};
static struct ktcb_list ktcb_list;
void init_ktcb_list(void)
{
memset(&ktcb_list, 0, sizeof(ktcb_list));
spin_lock_init(&ktcb_list.list_lock);
INIT_LIST_HEAD(&ktcb_list.list);
}
void tcb_init(struct ktcb *new)
{
INIT_LIST_HEAD(&new->task_list);
mutex_init(&new->thread_control_lock);
/* Initialise task's scheduling state and parameters. */
sched_init_task(new, TASK_PRIO_NORMAL);
/* Initialise ipc waitqueues */
spin_lock_init(&new->waitlock);
waitqueue_head_init(&new->wqh_send);
waitqueue_head_init(&new->wqh_recv);
waitqueue_head_init(&new->wqh_pager);
}
struct ktcb *tcb_alloc(void)
{
return zalloc_page();
}
struct ktcb *tcb_alloc_init(void)
{
struct ktcb *tcb;
if (!(tcb = tcb_alloc()))
return 0;
tcb_init(tcb);
return tcb;
}
void tcb_delete(struct ktcb *tcb)
{
/* Sanity checks first */
BUG_ON(!is_page_aligned(tcb));
BUG_ON(tcb->wqh_pager.sleepers > 0);
BUG_ON(tcb->wqh_send.sleepers > 0);
BUG_ON(tcb->wqh_recv.sleepers > 0);
BUG_ON(!list_empty(&tcb->task_list));
BUG_ON(!list_empty(&tcb->rq_list));
BUG_ON(tcb->nlocks);
BUG_ON(tcb->waiting_on);
BUG_ON(tcb->wq);
/*
* Take this lock as we may delete
* the address space as well
*/
address_space_reference_lock();
BUG_ON(--tcb->space->ktcb_refs < 0);
/* No refs left for the space, delete it */
if (tcb->space->ktcb_refs == 0)
address_space_delete(tcb->space);
address_space_reference_unlock();
/* Free the tcb */
free_page(tcb);
}
struct ktcb *tcb_find(l4id_t tid)
{
struct ktcb *task;
spin_lock(&ktcb_list.list_lock);
list_for_each_entry(task, &ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&ktcb_list.list_lock);
return task;
}
}
spin_unlock(&ktcb_list.list_lock);
return 0;
}
void tcb_add(struct ktcb *new)
{
spin_lock(&ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list));
list_add(&new->task_list, &ktcb_list.list);
spin_unlock(&ktcb_list.list_lock);
}
void tcb_remove(struct ktcb *new)
{
spin_lock(&ktcb_list.list_lock);
BUG_ON(list_empty(&new->task_list));
list_del_init(&new->task_list);
spin_unlock(&ktcb_list.list_lock);
}
/* Offsets for ktcb fields that are accessed from assembler */
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
@@ -34,27 +140,5 @@ void task_update_utcb(struct ktcb *cur, struct ktcb *next)
{
/* Update the KIP pointer */
kip.utcb = next->utcb_address;
/* We stick with KIP update and no private tls mapping for now */
#if 0
/*
* Unless current and next are in the same address
* space and sharing the same physical utcb page, we
* update the mapping
*/
if (cur->utcb_phys != next->utcb_phys)
add_mapping(page_align(next->utcb_phys),
page_align(next->utcb_virt),
page_align_up(UTCB_SIZE),
MAP_USR_RW_FLAGS);
/*
* If same physical utcb but different pgd, it means two
* address spaces share the same utcb. We treat this as a
* bug for now.
*/
else
BUG_ON(cur->pgd != next->pgd);
#endif
}