Capability checking added as compiling code.

Capability checking for thread_control, exregs, mutex, cap_control,
ipc, and map system calls.

The visualised model is implemented in code that compiles, but
actual functionality hasn't been tested.

Need to add:
- Dynamic assignment of initial resources matching with what's
defined in the configuration.
- A paged-thread-group, since that would be a logical group of
seperation from a capability point-of-view.
- Resource ids for various tasks. E.g.
  - Memory capabilities don't have target resources.
  - Thread capability assumes current container for THREAD_CREATE.
  - Mutex syscall assumes current thread (this one may not need
    any changing)
  - cap_control syscall assumes current thread. It may happen to
    be that another thread's capability list is manipulated.

Last but not least:
- A simple and easy-to-use userspace library for dynamic expansion
  of resource domains as new resources are created such as threads.
This commit is contained in:
Bahadir Balban
2009-10-25 23:57:17 +02:00
parent 83ce4280b0
commit 88e3706474
18 changed files with 511 additions and 320 deletions

View File

@@ -10,6 +10,8 @@
#define CAP_CONTROL_NCAPS 0x00
#define CAP_CONTROL_READ 0x01
#define CAP_CONTROL_SHARE 0x02
#define CAP_CONTROL_GRANT 0x03
#define CAP_CONTROL_MODIFY 0x05
#define CAP_SHARE_MASK 0x1F
#define CAP_SHARE_SPACE 0x01

View File

@@ -13,6 +13,27 @@
#define EXREGS_SET_PAGER 1
#define EXREGS_SET_UTCB 2
#define EXREGS_VALID_REGULAR_REGS \
(FIELD_TO_BIT(exregs_context_t, r0) | \
FIELD_TO_BIT(exregs_context_t, r1) | \
FIELD_TO_BIT(exregs_context_t, r2) | \
FIELD_TO_BIT(exregs_context_t, r3) | \
FIELD_TO_BIT(exregs_context_t, r4) | \
FIELD_TO_BIT(exregs_context_t, r5) | \
FIELD_TO_BIT(exregs_context_t, r6) | \
FIELD_TO_BIT(exregs_context_t, r7) | \
FIELD_TO_BIT(exregs_context_t, r8) | \
FIELD_TO_BIT(exregs_context_t, r9) | \
FIELD_TO_BIT(exregs_context_t, r10) | \
FIELD_TO_BIT(exregs_context_t, r11) | \
FIELD_TO_BIT(exregs_context_t, r12) | \
FIELD_TO_BIT(exregs_context_t, lr)) \
#define EXREGS_VALID_SP \
FIELD_TO_BIT(exregs_context_t, sp) \
#define EXREGS_VALID_PC \
FIELD_TO_BIT(exregs_context_t, pc) \
/* Structure passed by userspace pagers for exchanging registers */
struct exregs_data {

View File

@@ -12,6 +12,17 @@
#if defined (__KERNEL__)
/*
* ipc syscall uses an ipc_type variable and send/recv
* details are embedded in this variable.
*/
enum IPC_TYPE {
IPC_INVALID = 0,
IPC_SEND = 1,
IPC_RECV = 2,
IPC_SENDRECV = 3,
};
/* These are for internally created ipc paths. */
int ipc_send(l4id_t to, unsigned int flags);
int ipc_sendrecv(l4id_t to, l4id_t from, unsigned int flags);

View File

@@ -40,7 +40,7 @@ int sys_unmap(unsigned long virtual, unsigned long npages, unsigned int tid);
int sys_space_control(void);
int sys_ipc_control(void);
int sys_map(unsigned long phys, unsigned long virt, unsigned long npages,
unsigned long flags, unsigned int tid);
unsigned int flags, l4id_t tid);
int sys_getid(struct task_ids *ids);
int sys_capability_control(unsigned int req, unsigned int flags, void *addr);
int sys_container_control(unsigned int req, unsigned int flags, void *addr);

View File

@@ -17,7 +17,7 @@
#define CAP_TYPE_SCHED (1 << 4)
#define CAP_TYPE_UMUTEX (1 << 5)
#define CAP_TYPE_QUANTITY (1 << 6)
#define CAP_TYPE_CAP (1 << 7)
#define cap_type(c) ((c)->type & CAP_TYPE_MASK)
/*
@@ -57,7 +57,7 @@
#define CAP_EXREGS_RW_UTCB (1 << 1)
#define CAP_EXREGS_RW_SP (1 << 2)
#define CAP_EXREGS_RW_PC (1 << 3)
#define CAP_EXREGS_RW_REGS (1 << 4)
#define CAP_EXREGS_RW_REGS (1 << 4) /* Other regular regs */
#define CAP_EXREGS_RW_CPU (1 << 5)
#define CAP_EXREGS_RW_CPUTIME (1 << 6)
@@ -90,12 +90,12 @@
/* Userspace mutex capability */
#define CAP_UMUTEX_LOCK (1 << 0)
#define CAP_UMUTEX_UNLOCK (1 << 1)
/* Capability control capability */
#define CAP_CAP_SPLIT (1 << 0)
#define CAP_CAP_SPLICE (1 << 1)
#define CAP_CAP_REDUCE (1 << 2)
#define CAP_CAP_REVOKE (1 << 3)
#define CAP_CAP_GRANT (1 << 4)
#define CAP_CAP_MODIFY (1 << 0)
#define CAP_CAP_GRANT (1 << 1)
#define CAP_CAP_READ (1 << 2)
#define CAP_CAP_SHARE (1 << 3)
#endif /* __CAP_TYPES_H__ */

View File

@@ -7,6 +7,7 @@
#define __GENERIC_CAPABILITY_H__
#include <l4/lib/list.h>
#include <l4/api/exregs.h>
/*
* Some resources that capabilities possess don't
@@ -126,7 +127,10 @@ static inline void cap_list_move(struct cap_list *to,
cap_list_attach(cap_head, to);
}
/* Have to have these as tcb.h includes this file */
struct ktcb;
struct task_ids;
/* Capability checking for quantitative capabilities */
int capability_consume(struct capability *cap, int quantity);
int capability_free(struct capability *cap, int quantity);
@@ -135,6 +139,18 @@ struct capability *capability_find_by_rtype(struct ktcb *task,
struct capability *cap_list_find_by_rtype(struct cap_list *clist,
unsigned int rtype);
/* Capability checking on system calls */
int cap_map_check(struct ktcb *task, unsigned long phys, unsigned long virt,
unsigned long npages, unsigned int flags, l4id_t tid);
int cap_thread_check(struct ktcb *task, unsigned int flags,
struct task_ids *ids);
int cap_exregs_check(struct ktcb *task, struct exregs_data *exregs);
int cap_ipc_check(l4id_t to, l4id_t from,
unsigned int flags, unsigned int ipc_type);
int cap_cap_check(struct ktcb *task, unsigned int req, unsigned int flags);
int cap_mutex_check(unsigned long mutex_address, int mutex_op);
#if 0
/* Virtual memory space allocated to container */
struct capability cap_virtmap = {

View File

@@ -9,6 +9,7 @@
/* Number of containers defined at compile-time */
#include <l4/generic/capability.h>
#include <l4/lib/list.h>
#include <l4/lib/idpool.h>
#include INC_SUBARCH(mm.h)

View File

@@ -10,7 +10,7 @@
#define MAP_USR_RW_FLAGS 0 /* CB as one would expect */
#define MAP_USR_RO_FLAGS 1 /* CB as one would expect */
#define MAP_SVC_RW_FLAGS 2 /* CB as one would expect */
#define MAP_USR_IO_FLAGS 3 /* Non-CB, RW */
#define MAP_USR_IO_FLAGS 3 /* Non-CB, RW TODO: How about RO one? */
#define MAP_SVC_IO_FLAGS 4 /* Non-CB, RW */
/* Some default aliases */

View File

@@ -11,7 +11,7 @@ void thread_id_pool_init(void);
int thread_id_new(void);
int thread_id_del(int tid);
void task_destroy_current(void);
void thread_destroy_current(void);
void task_make_zombie(struct ktcb *task);
#endif /* __GENERIC_THREAD_H__ */

View File

@@ -110,6 +110,13 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
{
int err;
/*
* Check capability to do a capability operation.
* Supported only on current's caps for time being.
*/
if ((err = cap_cap_check(current, req, flags)) < 0)
return err;
switch(req) {
/* Return number of capabilities the thread has */
case CAP_CONTROL_NCAPS:

View File

@@ -137,14 +137,8 @@ int sys_exchange_registers(struct exregs_data *exregs, l4id_t tid)
goto out;
}
/*
* FIXME:
* Capability Check.
* Whose clist are we ought to check? Pager's or threads?
* Need to check exregs capability
* Need to check utcb capability if present.
* if ((exregs->flags & EXREGS_SET_UTCB) &&
*/
if ((err = cap_exregs_check(task, exregs)) < 0)
return -ENOCAP;
/* Copy registers */
do_exchange_registers(task, exregs);

View File

@@ -15,17 +15,6 @@
#include INC_GLUE(message.h)
#include INC_GLUE(ipc.h)
/*
* ipc syscall uses an ipc_type variable and send/recv
* details are embedded in this variable.
*/
enum IPC_TYPE {
IPC_INVALID = 0,
IPC_SEND = 1,
IPC_RECV = 2,
IPC_SENDRECV = 3,
};
int ipc_short_copy(struct ktcb *to, struct ktcb *from)
{
unsigned int *mr0_src = KTCB_REF_MR0(from);
@@ -373,7 +362,7 @@ int ipc_sendrecv(l4id_t to, l4id_t from, unsigned int flags)
int ipc_sendrecv_extended(l4id_t to, l4id_t from, unsigned int flags)
{
return 0;
return -ENOSYS;
}
/*
@@ -577,8 +566,8 @@ int sys_ipc(l4id_t to, l4id_t from, unsigned int flags)
}
/* Everything in place, now check capability */
if ((err = cap_ipc_check(to, from, flags, ipc_type)) < 0)
return -ENOCAP;
if ((ret = cap_ipc_check(to, from, flags, ipc_type)) < 0)
return ret;
/* Encode ipc type in task flags */
tcb_set_ipc_flags(current, flags);

View File

@@ -15,12 +15,12 @@ int sys_map(unsigned long phys, unsigned long virt, unsigned long npages,
struct ktcb *target;
int err;
if ((err = cap_map_check(phys, virt, npages, flags, tid)) < 0)
return err;
if (!(target = tcb_find(tid)))
return -ESRCH;
if ((err = cap_map_check(target, phys, virt, npages, flags, tid)) < 0)
return err;
add_mapping_pgd(phys, virt, npages << PAGE_BITS, flags, TASK_PGD(target));
return 0;

View File

@@ -239,7 +239,17 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
return -EINVAL;
}
/* Find and check physical address for virtual mutex address */
if ((ret = cap_mutex_check(mutex_address, mutex_op)) < 0)
return ret;
/*
* Find and check physical address for virtual mutex address
*
* FIXME: Could we check this as a capability? Perhaps not
* since not always the caller but its pager possesses
* relevant memory capability. Maybe check on behalf of
* its pager?
*/
if (!(mutex_physical =
virt_to_phys_by_pgd(mutex_address,
TASK_PGD(current))))

View File

@@ -13,6 +13,7 @@
#include <l4/lib/mutex.h>
#include <l4/lib/wait.h>
#include <l4/generic/resource.h>
#include <l4/generic/capability.h>
#include INC_ARCH(asm.h)
#include INC_SUBARCH(mm.h)
@@ -41,7 +42,7 @@ int sys_thread_switch(void)
* already gone, the state is already TASK_INACTIVE so the pager
* won't sleep at all.
*/
int task_suspend(struct ktcb *task, unsigned int flags)
int thread_suspend(struct ktcb *task, unsigned int flags)
{
int ret = 0;
@@ -94,15 +95,11 @@ int arch_clear_thread(struct ktcb *tcb)
return 0;
}
int thread_recycle(struct task_ids *ids)
int thread_recycle(struct ktcb *task)
{
struct ktcb *task;
int ret;
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
if ((ret = task_suspend(task, 0)) < 0)
if ((ret = thread_suspend(task, 0)) < 0)
return ret;
/*
@@ -124,9 +121,9 @@ int thread_recycle(struct task_ids *ids)
return 0;
}
void task_destroy_current();
void thread_destroy_current();
int task_destroy(struct ktcb *task)
int thread_destroy(struct ktcb *task)
{
int ret;
@@ -134,13 +131,13 @@ int task_destroy(struct ktcb *task)
* Pager destroying itself
*/
if (task == current) {
task_destroy_current();
thread_destroy_current();
/* It should not return */
BUG();
}
if ((ret = task_suspend(task, 0)) < 0)
if ((ret = thread_suspend(task, 0)) < 0)
return ret;
/* Remove tcb from global list so any callers will get -ESRCH */
@@ -182,7 +179,7 @@ void task_make_zombie(struct ktcb *task)
* address or voluntarily. All threads managed also get
* destroyed.
*/
void task_destroy_current(void)
void thread_destroy_current(void)
{
struct ktcb *task, *n;
@@ -195,7 +192,7 @@ void task_destroy_current(void)
task->pagerid != current->tid)
continue;
spin_unlock(&curcont->ktcb_list.list_lock);
task_suspend(task, TASK_EXITING);
thread_suspend(task, TASK_EXITING);
spin_lock(&curcont->ktcb_list.list_lock);
}
spin_unlock(&curcont->ktcb_list.list_lock);
@@ -206,7 +203,7 @@ void task_destroy_current(void)
sched_suspend_sync();
}
int task_resume(struct ktcb *task)
int thread_resume(struct ktcb *task)
{
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
@@ -221,13 +218,8 @@ int task_resume(struct ktcb *task)
}
/* Runs a thread for the first time */
int thread_start(struct task_ids *ids)
int thread_start(struct ktcb *task)
{
struct ktcb *task;
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
if (!mutex_trylock(&task->thread_control_lock))
return -EAGAIN;
@@ -420,37 +412,6 @@ out_err:
return err;
}
static inline int thread_resume(struct task_ids *ids)
{
struct ktcb *task;
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
return task_resume(task);
}
static inline int thread_suspend(struct task_ids *ids)
{
struct ktcb *task;
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
return task_suspend(task, 0);
}
static inline int thread_destroy(struct task_ids *ids)
{
struct ktcb *task;
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
return task_destroy(task);
}
/*
* Creates, destroys and modifies threads. Also implicitly creates an address
* space for a thread that doesn't already have one, or destroys it if the last
@@ -458,13 +419,18 @@ static inline int thread_destroy(struct task_ids *ids)
*/
int sys_thread_control(unsigned int flags, struct task_ids *ids)
{
struct ktcb *task = 0;
int err, ret = 0;
if ((err = check_access((unsigned long)ids, sizeof(*ids),
MAP_USR_RW_FLAGS, 1)) < 0)
return err;
if ((err = cap_thread_check(flags, ids)) < 0)
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE)
if (!(task = tcb_find(ids->tid)))
return -ESRCH;
if ((err = cap_thread_check(task, flags, ids)) < 0)
return err;
switch (flags & THREAD_ACTION_MASK) {
@@ -472,19 +438,19 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
ret = thread_create(ids, flags);
break;
case THREAD_RUN:
ret = thread_start(ids);
ret = thread_start(task);
break;
case THREAD_SUSPEND:
ret = thread_suspend(ids);
ret = thread_suspend(task, flags);
break;
case THREAD_RESUME:
ret = thread_resume(ids);
ret = thread_resume(task);
break;
case THREAD_DESTROY:
ret = thread_destroy(ids);
ret = thread_destroy(task);
break;
case THREAD_RECYCLE:
ret = thread_recycle(ids);
ret = thread_recycle(task);
break;
default:
ret = -EINVAL;

View File

@@ -113,7 +113,7 @@ void fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far)
if (current->tid == current->pagerid) {
printk("Pager (%d) faulted on itself. FAR: 0x%x, PC: 0x%x Exiting.\n",
current->tid, fault->far, fault->faulty_pc);
task_destroy_current();
thread_destroy_current();
}
/* Send ipc to the task's pager */

View File

@@ -8,8 +8,13 @@
#include <l4/generic/container.h>
#include <l4/generic/cap-types.h>
#include <l4/generic/tcb.h>
#include <l4/api/capability.h>
#include <l4/api/errno.h>
#include <l4/lib/printk.h>
#include <l4/api/thread.h>
#include <l4/api/exregs.h>
#include <l4/api/ipc.h>
#include INC_GLUE(message.h)
void capability_init(struct capability *cap)
{
@@ -76,12 +81,16 @@ struct capability *cap_list_find_by_rtype(struct cap_list *cap_list,
* mutexes, if a mutex is freed, it needs to be accounted to private
* pool first if that is not full, because freeing it into shared
* pool may lose the mutex right to another task.
*
* In conclusion freeing of pool-type capabilities need to be done
* in order of privacy. -> It may get confusing as a space, thread
* group id or paged-thread group is not necessarily in a different
* privacy ring.
*/
struct capability *capability_find_by_rtype(struct ktcb *task,
unsigned int rtype)
{
struct capability *cap;
struct ktcb *tgleader, *pager;
/* Search task's own list */
list_foreach_struct(cap, &task->cap_list.caps, list)
@@ -101,280 +110,316 @@ struct capability *capability_find_by_rtype(struct ktcb *task,
return 0;
}
typedef struct capability *(cap_match_func *)
(struct capability *cap, void *match_args)
cap_match_func_t;
typedef struct capability *(*cap_match_func_t) \
(struct capability *cap, void *match_args);
struct capability *cap_find(struct ktcb *task, cap_match_func_t cap_match_func,
void *match_args, unsigned int val)
void *match_args, unsigned int cap_type)
{
struct capability *cap;
struct ktcb *tgleader, *pager;
/* Search task's own list */
list_foreach_struct(cap, &task->cap_list.caps, list)
if ((cap = cap_match_func(cap, match_args, val)))
if (cap_type(cap) == cap_type &&
((cap = cap_match_func(cap, match_args))))
return cap;
/* Search space list */
list_foreach_struct(cap, &task->space->cap_list.caps, list)
if ((cap = cap_match_func(cap, match_args, val)))
if (cap_type(cap) == cap_type &&
((cap = cap_match_func(cap, match_args))))
return cap;
/* Search container list */
list_foreach_struct(cap, &task->container->cap_list.caps, list)
if ((cap = cap_match_func(cap, match_args, val)))
if (cap_type(cap) == cap_type &&
((cap = cap_match_func(cap, match_args))))
return cap;
return 0;
}
struct sys_mutex_args {
unsigned long address;
unsigned int op;
};
struct capability *
cap_match_mem(struct capability *cap,
void *match_args, unsigned int valid)
cap_match_mutex(struct capability *cap, void *args)
{
struct capability *match = match_args;
/* Unconditionally expect these flags */
unsigned int perms = CAP_UMUTEX_LOCK | CAP_UMUTEX_UNLOCK;
/* Equality-check these fields based on valid vector */
if (valid & FIELD_TO_BIT(struct capability, capid))
if (cap->capid != match->capid)
return 0;
if (valid & FIELD_TO_BIT(struct capability, resid))
if (cap->resid != match->resid)
return 0;
if (valid & FIELD_TO_BIT(struct capability, owner))
if (cap->owner != match->owner)
return 0;
if (valid & FIELD_TO_BIT(struct capability, type))
if (cap->type != match->type)
return 0;
if (valid & FIELD_TO_BIT(struct capability, access))
if ((cap->access & match->access)
!= match->access)
return 0;
if ((cap->access & perms) != perms)
return 0;
/* Checked these together as a range */
if (valid & FIELD_TO_BIT(struct capability, start) ||
valid & FIELD_TO_BIT(struct capability, end))
if (!(match->start >= cap->start &&
match->end <= cap->end &&
match->start < match->end))
/* Now check the usual restype/resid pair */
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
if (current->tid != cap->resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (current->space->spid != cap->resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (current->container->cid != cap->resid)
return 0;
break;
default:
BUG(); /* Unknown cap type is a bug */
}
/* It is a match */
return cap;
}
struct ipc_match = {
l4id_t tid;
l4id_t tgid;
l4id_t spid;
l4id_t cid;
struct capability *cap;
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
{
struct sys_mutex_args args = {
.address = mutex_address,
.op = mutex_op,
};
if (!(cap_find(current, cap_match_mutex,
&args, CAP_TYPE_UMUTEX)))
return -ENOCAP;
return 0;
}
struct sys_capctrl_args {
unsigned int req;
unsigned int flags;
struct ktcb *task;
};
struct capability *
cap_match_capctrl(struct capability *cap, void *args_ptr)
{
struct sys_capctrl_args *args = args_ptr;
unsigned int req = args->req;
struct ktcb *target = args->task;
/* Check operation privileges */
if (req == CAP_CONTROL_NCAPS ||
req == CAP_CONTROL_READ)
if (!(cap->access & CAP_CAP_READ))
return 0;
if (req == CAP_CONTROL_SHARE)
if (!(cap->access & CAP_CAP_SHARE))
return 0;
if (req == CAP_CONTROL_GRANT)
if (!(cap->access & CAP_CAP_GRANT))
return 0;
if (req == CAP_CONTROL_MODIFY)
if (!(cap->access & CAP_CAP_MODIFY))
return 0;
/* Now check the usual restype/resid pair */
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
if (target->tid != cap->resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (target->space->spid != cap->resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (target->container->cid != cap->resid)
return 0;
break;
default:
BUG(); /* Unknown cap type is a bug */
}
return cap;
}
int cap_cap_check(struct ktcb *task, unsigned int req, unsigned int flags)
{
struct sys_capctrl_args args = {
.req = req,
.flags = flags,
.task = task,
};
if (!(cap_find(task, cap_match_capctrl,
&args, CAP_TYPE_CAP)))
return -ENOCAP;
return 0;
}
struct sys_ipc_args {
struct ktcb *task;
unsigned int ipc_type;
unsigned int flags;
};
/*
* In an ipc, we could look for access bits, resource type and target id
*/
struct capability *
cap_match_ipc(struct capability *cap, void *match_args, unsigned int valid)
cap_match_ipc(struct capability *cap, void *args_ptr)
{
struct ipc_match *ipc_match = match_args;
struct capability *cap = ipc_match->cap;
struct sys_ipc_args *args = args_ptr;
struct ktcb *target = args->task;
/*
* Check these for basic equality.
*/
if (valid & FIELD_TO_BIT(struct capability, capid))
if (cap->capid != match->capid)
/* Check operation privileges */
if (args->flags & IPC_FLAGS_SHORT)
if (!(cap->access & CAP_IPC_SHORT))
return 0;
if (valid & FIELD_TO_BIT(struct capability, owner))
if (cap->owner != match->owner)
if (args->flags & IPC_FLAGS_FULL)
if (!(cap->access & CAP_IPC_FULL))
return 0;
if (valid & FIELD_TO_BIT(struct capability, access))
if ((cap->access & match->access)
!= match->access)
if (args->flags & IPC_FLAGS_EXTENDED)
if (!(cap->access & CAP_IPC_EXTENDED))
return 0;
/* Assume we have both send and receive unconditionally */
if (!((cap->access & CAP_IPC_SEND) &&
(cap->access & CAP_IPC_RECV)))
return 0;
/*
* Check these optimised/specially.
* rtype and target are checked against each
* other all at once
* We have a target thread, check if capability match
* any resource fields in target
*/
if (valid & FIELD_TO_BIT(struct capability, resid)) {
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
if (ipc_matcher->tid != resid)
return 0;
break;
case CAP_RTYPE_TGROUP:
if (ipc_matcher->tgid != resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (ipc_matcher->spid != resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (ipc_matcher->cid != resid)
return 0;
break;
/*
* It's simply a bug to
* get an unknown resource here
*/
default:
BUG();
}
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
if (target->tid != cap->resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (target->space->spid != cap->resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (target->container->cid != cap->resid)
return 0;
break;
default:
BUG(); /* Unknown cap type is a bug */
}
return cap;
}
int cap_ipc_check(struct ktcb *target, l4id_t from,
/*
* Limitation: We currently only check from sender's
* perspective. Sender always targets a real thread.
* Does sender have the right to do this ipc?
*/
int cap_ipc_check(l4id_t to, l4id_t from,
unsigned int flags, unsigned int ipc_type)
{
unsigned int valid = 0;
struct capability ipccap = {
.access = ipc_flags_type_to_access(flags, ipc_type);
};
struct ktcb *target;
struct sys_ipc_args args;
/* Receivers can get away from us (for now) */
if (ipc_type != IPC_SEND && ipc_type != IPC_SENDRECV)
return 0;
/*
* All these ids will get checked at once,
* depending on the encountered capability's
* rtype field
* We're the sender, meaning we have
* a real target
*/
struct ipc_matcher ipc_matcher = {
.tid = target->tid,
.tgid = target->tgid,
.spid = target->space->spid,
.cid = target->container->cid,
.ipccap = ipccap,
};
if (!(target = tcb_find(to)))
return -ESRCH;
valid |= FIELD_TO_BIT(struct capability, access);
valid |= FIELD_TO_BIT(struct capability, resid);
/* Set up other args */
args.flags = flags;
args.ipc_type = ipc_type;
args.task = target;
if (!(cap_find(task, cap_match_ipc,
&ipc_matcher, valid)))
if (!(cap_find(target, cap_match_ipc,
&args, CAP_TYPE_IPC)))
return -ENOCAP;
return 0;
}
struct exregs_match = {
l4id_t tid
l4id_t pagerid;
l4id_t tgid;
l4id_t spid;
l4id_t cid;
struct capability *cap;
struct sys_exregs_args {
struct exregs_data *exregs;
struct ktcb *task;
};
/*
* CAP_TYPE_EXREGS already matched upon entry
*/
struct capability *
cap_match_thread(struct capability *cap, void *match_args, unsigned int valid)
cap_match_exregs(struct capability *cap, void *args_ptr)
{
struct thread_match *match = match_args;
struct capability *cap = match->cap;
struct sys_exregs_args *args = args_ptr;
struct exregs_data *exregs = args->exregs;
struct ktcb *target = args->task;
/*
* Check these for basic equality.
*/
if (valid & FIELD_TO_BIT(struct capability, capid))
if (cap->capid != match->capid)
/* Check operation privileges */
if (exregs->valid_vect & EXREGS_VALID_REGULAR_REGS)
if (!(cap->access & CAP_EXREGS_RW_REGS))
return 0;
if (valid & FIELD_TO_BIT(struct capability, owner))
if (cap->owner != match->owner)
if (exregs->valid_vect & EXREGS_VALID_SP)
if (!(cap->access & CAP_EXREGS_RW_SP))
return 0;
if (valid & FIELD_TO_BIT(struct capability, access))
if ((cap->access & match->access)
!= match->access)
if (exregs->valid_vect & EXREGS_VALID_PC)
if (!(cap->access & CAP_EXREGS_RW_PC))
return 0;
if (args->exregs->valid_vect & EXREGS_SET_UTCB)
if (!(cap->access & CAP_EXREGS_RW_UTCB))
return 0;
if (args->exregs->valid_vect & EXREGS_SET_PAGER)
if (!(cap->access & CAP_EXREGS_RW_PAGER))
return 0;
/*
* Check these optimised/specially.
* rtype and target are checked against each
* other all at once
* We have a target thread, check if capability
* match any resource fields in target.
*/
if (valid & FIELD_TO_BIT(struct capability, resid)) {
switch (cap_rtype(cap)) {
/* Ability to thread_control over a paged group */
case CAP_RTYPE_PGGROUP:
if (match->pagerid != resid)
return 0;
break;
case CAP_RTYPE_TGROUP:
if (match->tgid != resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (match->spid != resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (match->cid != resid)
return 0;
break;
/*
* It's simply a bug to
* get an unknown resource here
*/
default:
BUG();
}
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
if (target->tid != cap->resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (target->space->spid != cap->resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (target->container->cid != cap->resid)
return 0;
break;
default:
BUG(); /* Unknown cap type is a bug */
}
return cap;
}
/*
* TODO: We are here!!!
*/
int cap_exregs_check(unsigned int flags, struct task_ids *ids)
int cap_exregs_check(struct ktcb *task, struct exregs_data *exregs)
{
struct capability cap = {
.access = thread_control_flags_to_access(flags);
/* all resid's checked all at once by comparing against rtype */
struct sys_exregs_args args = {
.exregs = exregs,
.task = task,
};
struct thread_match match = {
.pagerid = current->tid
.tgid = current->tgid,
.spid = current->spid,
.cid = current->cid,
.cap = threadmatch,
};
unsigned int valid = 0;
valid |= FIELD_TO_BIT(struct capability, access);
valid |= FIELD_TO_BIT(struct capability, resid);
if (!(cap_find(task, cap_match_thread,
&thread_match, valid)))
if (!(cap_find(task, cap_match_exregs,
&args, CAP_TYPE_EXREGS)))
return -ENOCAP;
return 0;
}
/*
* FIXME: As new pagers, thread groups,
* FIXME: Issues on capabilities:
*
* As new pagers, thread groups,
* thread ids, spaces are created, we need to
* give them thread_control capabilities dynamically,
* based on those ids!!! How do we get to do that, so that
* in userspace it looks not so difficult ???
*/
struct thread_match = {
l4id_t tgid;
l4id_t tid;
l4id_t pagerid;
l4id_t spid;
l4id_t cid;
unsigned int thread_control_flags;
};
def thread_create():
new_space, same_tgid, same_pager,
check_existing_ids(tid, same_tgid, same_pager, thread_create)
thread_create(new_space)
thread_add_cap(new_space, all other caps)
/*
*
* What do you match here?
*
* THREAD_CREATE:
@@ -396,59 +441,187 @@ def thread_create():
* New thread -> New set of caps for that thread!
* New space -> New set of caps for that space! So many capabilities!
*/
itn cap_thread_check(struct ktcb *task, unsigned int flags, struct task_ids *ids)
{
struct thread_matcher = {
.pagerid = current->tid
.tgid = current->tgid,
.spid = current->spid,
.cid = current->cid,
.thread_control_flags = flags;
};
unsigned int valid = 0;
valid |= FIELD_TO_BIT(struct capability, access);
valid |= FIELD_TO_BIT(struct capability, resid);
struct sys_tctrl_args {
struct ktcb *task;
unsigned int flags;
struct task_ids *ids;
};
/*
* CAP_TYPE_TCTRL matched upon entry
*/
struct capability *cap_match_thread(struct capability *cap,
void *args_ptr)
{
struct sys_tctrl_args *args = args_ptr;
struct ktcb *target = args->task;
/* Check operation privileges */
if (args->flags & THREAD_CREATE)
if (!(cap->access & CAP_TCTRL_CREATE))
return 0;
if (args->flags & THREAD_DESTROY)
if (!(cap->access & CAP_TCTRL_DESTROY))
return 0;
if (args->flags & THREAD_SUSPEND)
if (!(cap->access & CAP_TCTRL_SUSPEND))
return 0;
if (args->flags & THREAD_RESUME)
if (!(cap->access & CAP_TCTRL_RESUME))
return 0;
/* If no target and create, or vice versa, it really is a bug */
BUG_ON(!target && !(args->flags & THREAD_CREATE));
BUG_ON(target && (args->flags & THREAD_CREATE));
if (args->flags & THREAD_CREATE) {
/*
* FIXME: Add cid to task_ids arg.
*
* Its a thread create and we have no knowledge of
* thread id, space id, or any other id.
*
* We _assume_ target is the largest group,
* e.g. same container as current. We check
* for `container' as target in capability
*/
if (cap_rtype(cap) != CAP_RTYPE_CONTAINER)
return 0;
if (cap->resid != current->container->cid)
return 0;
/* Resource type and it match, success */
return cap;
}
/*
* We have a target thread, check if capability match
* any resource fields in target
*/
switch (cap_rtype(cap)) {
case CAP_RTYPE_THREAD:
if (target->tid != cap->resid)
return 0;
break;
case CAP_RTYPE_SPACE:
if (target->space->spid != cap->resid)
return 0;
break;
case CAP_RTYPE_CONTAINER:
if (target->container->cid != cap->resid)
return 0;
break;
default:
BUG(); /* Unknown cap type is a bug */
}
return cap;
}
int cap_thread_check(struct ktcb *task,
unsigned int flags,
struct task_ids *ids)
{
struct sys_tctrl_args args = {
.task = task,
.flags = flags,
.ids = ids,
};
if (!(cap_find(task, cap_match_thread,
&thread_matcher, valid)))
&args, CAP_TYPE_TCTRL)))
return -ENOCAP;
return 0;
}
struct sys_map_args {
struct ktcb *task;
unsigned long phys;
unsigned long virt;
unsigned long npages;
unsigned int flags;
unsigned int rtype;
l4id_t tid;
};
/*
* CAP_TYPE_MAP already matched upon entry
*/
struct capability *cap_match_mem(struct capability *cap,
void *args_ptr)
{
struct sys_map_args *args = args_ptr;
unsigned long pfn;
unsigned int perms;
/* Set base according to what type of mem type we're matching */
if (args->rtype == CAP_RTYPE_PHYSMEM)
pfn = __pfn(args->phys);
else
pfn = __pfn(args->virt);
/* Check range */
if (cap->start > pfn || cap->end < pfn + args->npages)
return 0;
/* Check permissions */
switch (args->flags) {
case MAP_USR_RW_FLAGS:
perms = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_CACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_USR_RO_FLAGS:
perms = CAP_MAP_READ | CAP_MAP_CACHED;
if ((cap->access & perms) != perms)
return 0;
break;
case MAP_USR_IO_FLAGS:
perms = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_UNCACHED;
if ((cap->access & perms) != perms)
return 0;
break;
default:
/* Anything else is an invalid/unrecognised argument */
return 0;
}
return cap;
/*
* TODO: Does it make sense to have a meaningful resid field
* in a memory resource? E.g. Which resources may I map it to?
* It might, as I can map an arbitrary mapping to an arbitrary
* thread in my container and break it's memory integrity.
*
* It seems it would be reasonable for a pager to have memory
* capabilities with a resid of its own id, and rtype of
* CAP_RTYPE_PGGROUP, effectively allowing it to do map
* operations on itself and its group of paged children.
*/
}
int cap_map_check(struct ktcb *task, unsigned long phys, unsigned long virt,
unsigned long npages, unsigned int flags, l4id_t tid)
{
struct capability *physmem, *virtmem;
struct capability physmatch = {
.start = __pfn(phys),
.end = __pfn(phys) + npages,
.type = CAP_TYPE_PHYSMEM,
.flags = map_flags_to_cap_flags(flags),
}
struct capability virtmatch = {
.start = __pfn(virt),
.end = __pfn(virt) + npages,
.type = CAP_TYPE_VIRTMEM,
.flags = map_flags_to_cap_flags(flags),
}
unsigned int virt_valid = 0;
unsigned int phys_valid = 0;
virt_valid |= FIELD_TO_BIT(struct capability, access);
virt_valid |= FIELD_TO_BIT(struct capability, start);
virt_valid |= FIELD_TO_BIT(struct capability, end);
phys_valid |= FIELD_TO_BIT(struct capability, access);
phys_valid |= FIELD_TO_BIT(struct capability, start);
phys_valid |= FIELD_TO_BIT(struct capability, end);
struct sys_map_args args = {
.task = task,
.phys = phys,
.virt = virt,
.npages = npages,
.flags = flags,
.tid = tid,
};
args.rtype = CAP_RTYPE_PHYSMEM;
if (!(physmem = cap_find(task, cap_match_mem,
&physmatch, phys_valid)))
&args, CAP_TYPE_MAP)))
return -ENOCAP;
args.rtype = CAP_RTYPE_VIRTMEM;
if (!(virtmem = cap_find(task, cap_match_mem,
&virtmatch, virt_valid)))
&args, CAP_TYPE_MAP)))
return -ENOCAP;
return 0;

View File

@@ -0,0 +1 @@
sloccount src/api/ src/arch/arm/v5 src/arch/arm/*.[cS] src/glue/arm src/platform/pb926/* src/drivers/* src/generic/* src/lib/* include/l4/api include/l4/arch/arm/*.h include/l4/arch/arm/v5 include/l4/glue/arm/* include/l4/drivers/* include/l4/generic/* include/l4/lib/* include/l4/platform/pb926/*