Added means to search for threads in other containers

Thread ids now contain their container ids in the top 2 nibbles.
Threads on other containers can be addressed by changing those
two nibbles. The addressing of inter-container threads are
subject to capabilities.
This commit is contained in:
Bahadir Balban
2009-11-04 20:56:57 +02:00
parent 516efccd99
commit d9520adb55
15 changed files with 91 additions and 37 deletions

View File

@@ -48,7 +48,6 @@ int sys_getid(struct task_ids *ids)
ids->tid = this->tid;
ids->spid = this->space->spid;
ids->tgid = this->tgid;
ids->cid = this->container->cid;
return 0;
}

View File

@@ -346,7 +346,7 @@ int thread_create(struct task_ids *ids, unsigned int flags)
}
}
if (!(new = tcb_alloc_init()))
if (!(new = tcb_alloc_init(curcont->cid)))
return -ENOMEM;
/* Set up new thread space by using space id and flags */

View File

@@ -451,10 +451,11 @@ struct capability *cap_match_thread(struct capability *cap,
if (action_flags == THREAD_CREATE) {
/*
* TODO: Add cid to task_ids arg.
* NOTE: Currently we only allow creation in
* current container.
*
* Its a thread create and we have no knowledge of
* thread id, space id, or any other id.
* TODO: Add capability checking for space,
* as well.
*
* We _assume_ target is the largest group,
* e.g. same container as current. We check
@@ -462,7 +463,7 @@ struct capability *cap_match_thread(struct capability *cap,
*/
if (cap_rtype(cap) != CAP_RTYPE_CONTAINER)
return 0;
if (cap->resid != current->container->cid)
if (cap->resid != curcont->cid)
return 0;
/* Resource type and id match, success */
return cap;

View File

@@ -47,8 +47,25 @@ struct container *container_create(void)
void kres_insert_container(struct container *c,
struct kernel_resources *kres)
{
spin_lock(&kres->containers.lock);
list_insert(&c->list, &kres->containers.list);
kres->containers.ncont++;
spin_unlock(&kres->containers.lock);
}
struct container *container_find(struct kernel_resources *kres, l4id_t cid)
{
struct container *c;
spin_lock(&kres->containers.lock);
list_foreach_struct(c, &kres->containers.list, list) {
if (c->cid == cid) {
spin_unlock(&kres->containers.lock);
return c;
}
}
spin_unlock(&kres->containers.lock);
return 0;
}
/*
@@ -86,8 +103,11 @@ int init_pager(struct pager *pager,
*/
cap_list_move(&current->cap_list, &pager->cap_list);
/* Setup dummy container pointer so that curcont works */
current->container = cont;
/* New ktcb allocation is needed */
task = tcb_alloc_init();
task = tcb_alloc_init(cont->cid);
/* If first, manually allocate/initalize space */
if (first) {
@@ -117,11 +137,7 @@ int init_pager(struct pager *pager,
task->tgid = task->tid;
task->container = cont;
/*
* Setup dummy container pointer so that curcont works,
* and add the address space to container space list
*/
current->container = cont;
/* Add the address space to container space list */
address_space_add(task->space);
/* Initialize uninitialized capability fields while on dummy */

View File

@@ -402,8 +402,6 @@ void init_kernel_resources(struct kernel_resources *kres)
memcap_unmap(&kres->physmem_free, kernel_area->start,
kernel_area->end);
/* Initialize zombie pager list */
init_ktcb_list(&kres->zombie_list);
/* TODO:
* Add all virtual memory areas used by the kernel
@@ -529,7 +527,8 @@ void setup_kernel_resources(struct boot_resources *bootres,
* See how many containers we have. Assign next
* unused container id for kernel resources
*/
kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
//kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
kres->cid = id_get(&kres->container_ids, 0);
/* First initialize the list of non-memory capabilities */
cap = boot_capability_create();

View File

@@ -43,7 +43,7 @@ void tcb_init(struct ktcb *new)
waitqueue_head_init(&new->wqh_pager);
}
struct ktcb *tcb_alloc_init(void)
struct ktcb *tcb_alloc_init(l4id_t cid)
{
struct ktcb *tcb;
struct task_ids ids;
@@ -52,6 +52,7 @@ struct ktcb *tcb_alloc_init(void)
return 0;
ids.tid = id_new(&kernel_resources.ktcb_ids);
ids.tid |= TASK_CID_MASK & (cid << TASK_CID_SHIFT);
ids.tgid = L4_NILTHREAD;
ids.spid = L4_NILTHREAD;
@@ -92,6 +93,9 @@ void tcb_delete(struct ktcb *tcb)
mutex_unlock(&curcont->space_list.lock);
}
/* Clear container id part */
tcb->tid &= ~TASK_CID_MASK;
/* Deallocate tcb ids */
id_del(&kernel_resources.ktcb_ids, tcb->tid);
@@ -114,22 +118,44 @@ struct ktcb *tcb_find_by_space(l4id_t spid)
return 0;
}
struct ktcb *tcb_find(l4id_t tid)
struct ktcb *container_find_tcb(struct container *c, l4id_t tid)
{
struct ktcb *task;
spin_lock(&c->ktcb_list.list_lock);
list_foreach_struct(task, &c->ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&c->ktcb_list.list_lock);
return task;
}
}
spin_unlock(&c->ktcb_list.list_lock);
return 0;
}
/*
* Threads are the only resource where inter-container searches are
* allowed. This is because on other containers, only threads can be
* targeted for operations. E.g. ipc, sharing memory. Currently you
* can't reach a space, a mutex, or any other resouce on another
* container.
*/
struct ktcb *tcb_find(l4id_t tid)
{
struct container *c;
if (current->tid == tid)
return current;
spin_lock(&curcont->ktcb_list.list_lock);
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
if (task->tid == tid) {
spin_unlock(&curcont->ktcb_list.list_lock);
return task;
}
if (tid_to_cid(tid) == curcont->cid) {
return container_find_tcb(curcont, tid);
} else {
if (!(c = container_find(&kernel_resources,
tid_to_cid(tid))))
return 0;
else
return container_find_tcb(c, tid);
}
spin_unlock(&curcont->ktcb_list.list_lock);
return 0;
}
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list)