mirror of
https://github.com/drasko/codezero.git
synced 2026-01-13 11:23:16 +01:00
Changes since April
Clean up of build directories. Simplifications to capability model.
This commit is contained in:
@@ -1,10 +1,10 @@
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
Import('symbols')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'map.c', 'mutex.c', 'cap.c', 'exregs.c', 'irq.c', 'cache.c']
|
||||
src_local = ['kip.c', 'syscall.c', 'thread.c', 'ipc.c', 'map.c',
|
||||
'mutex.c', 'cap.c', 'exregs.c', 'irq.c', 'cache.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
|
||||
Return('obj')
|
||||
|
||||
655
src/api/cap.c
655
src/api/cap.c
@@ -25,12 +25,6 @@ int cap_read_all(struct capability *caparray)
|
||||
struct capability *cap;
|
||||
int capidx = 0;
|
||||
|
||||
/* Copy all capabilities from lists to buffer */
|
||||
list_foreach_struct(cap, ¤t->cap_list.caps, list) {
|
||||
memcpy(&caparray[capidx], cap, sizeof(*cap));
|
||||
capidx++;
|
||||
}
|
||||
|
||||
list_foreach_struct(cap, ¤t->space->cap_list.caps, list) {
|
||||
memcpy(&caparray[capidx], cap, sizeof(*cap));
|
||||
capidx++;
|
||||
@@ -44,616 +38,6 @@ int cap_read_all(struct capability *caparray)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shares single cap. If you are sharing, there is
|
||||
* only one target that makes sense, that is your
|
||||
* own container.
|
||||
*/
|
||||
int cap_share_single(struct capability *user)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct cap_list *clist;
|
||||
|
||||
if (!(cap = cap_find_by_capid(user->capid, &clist)))
|
||||
return -EEXIST;
|
||||
|
||||
if (cap->owner != current->tid)
|
||||
return -EPERM;
|
||||
|
||||
/* First remove it from its list */
|
||||
cap_list_remove(cap, clist);
|
||||
|
||||
/* Place it where it is shared */
|
||||
cap_list_insert(cap, &curcont->cap_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shares the whole capability list.
|
||||
*
|
||||
* FIXME: Make sure each and every capability has its
|
||||
* share right set!
|
||||
*/
|
||||
int cap_share_all(unsigned int flags)
|
||||
{
|
||||
if (flags == CAP_SHARE_ALL_CONTAINER) {
|
||||
|
||||
/* Move all private caps to container */
|
||||
cap_list_move(&curcont->cap_list,
|
||||
¤t->cap_list);
|
||||
|
||||
/*
|
||||
* Move all space caps to container, also.
|
||||
*
|
||||
* FIXME: Make sure all space capabilities
|
||||
* are owned by the sharer!!!
|
||||
*/
|
||||
cap_list_move(&curcont->cap_list,
|
||||
¤t->space->cap_list);
|
||||
} else if (flags == CAP_SHARE_ALL_SPACE) {
|
||||
|
||||
/* Move all private caps to space */
|
||||
cap_list_move(¤t->space->cap_list,
|
||||
¤t->cap_list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_share(struct capability *cap, unsigned int flags)
|
||||
{
|
||||
if (flags == CAP_SHARE_SINGLE)
|
||||
return cap_share_single(cap);
|
||||
else
|
||||
return cap_share_all(flags);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
/*
|
||||
* Currently unused. API hasn't settled.
|
||||
*/
|
||||
/* Grants all caps */
|
||||
int cap_grant_all(struct capability *req, unsigned int flags)
|
||||
{
|
||||
struct ktcb *target;
|
||||
struct capability *cap_head, *cap;
|
||||
int err;
|
||||
|
||||
/* Owners are always threads, for simplicity */
|
||||
if (!(target = tcb_find(req->owner)))
|
||||
return -ESRCH;
|
||||
|
||||
/* Detach all caps */
|
||||
cap_head = cap_list_detach(¤t->space->cap_list);
|
||||
|
||||
list_foreach_struct(cap, &cap_head->list, list) {
|
||||
/* Change ownership */
|
||||
cap->owner = target->tid;
|
||||
BUG_ON(target->tid != req->owner);
|
||||
|
||||
/* Make immutable if GRANT_IMMUTABLE given */
|
||||
if (flags & CAP_GRANT_IMMUTABLE) {
|
||||
cap->access &= ~CAP_GENERIC_MASK;
|
||||
cap->access |= CAP_IMMUTABLE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity check: granted cap cannot have used
|
||||
* quantity. Otherwise how else the original
|
||||
* users of the cap free them?
|
||||
*/
|
||||
if (cap->used) {
|
||||
err = -EPERM;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Attach all to target */
|
||||
cap_list_attach(cap_head, &target->space->cap_list);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
/* Attach it back to original */
|
||||
cap_list_attach(cap_head, ¤t->space->cap_list);
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int cap_grant_single(struct capability *req, unsigned int flags)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct cap_list *clist;
|
||||
struct ktcb *target;
|
||||
|
||||
if (!(cap = cap_find_by_capid(req->capid, &clist)))
|
||||
return -EEXIST;
|
||||
|
||||
if (!(target = tcb_find(req->owner)))
|
||||
return -ESRCH;
|
||||
|
||||
if (cap->owner != current->tid)
|
||||
return -EPERM;
|
||||
|
||||
/* Granted cap cannot have used quantity */
|
||||
if (cap->used)
|
||||
return -EPERM;
|
||||
|
||||
/* First remove it from its list */
|
||||
cap_list_remove(cap, clist);
|
||||
|
||||
/* Change ownership */
|
||||
cap->owner = target->tid;
|
||||
BUG_ON(cap->owner != req->owner);
|
||||
|
||||
/* Make immutable if GRANT_IMMUTABLE given */
|
||||
if (flags & CAP_GRANT_IMMUTABLE) {
|
||||
cap->access &= ~CAP_GENERIC_MASK;
|
||||
cap->access |= CAP_IMMUTABLE;
|
||||
}
|
||||
|
||||
/* Place it where it is granted */
|
||||
cap_list_insert(cap, &target->space->cap_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_grant(struct capability *cap, unsigned int flags)
|
||||
{
|
||||
if (flags & CAP_GRANT_SINGLE)
|
||||
cap_grant_single(cap, flags);
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_deduce_rtype(struct capability *orig, struct capability *new)
|
||||
{
|
||||
struct ktcb *target;
|
||||
struct address_space *sp;
|
||||
|
||||
/* An rtype deduction can only be to a space or thread */
|
||||
switch (cap_rtype(new)) {
|
||||
case CAP_RTYPE_SPACE:
|
||||
/* Check containment right */
|
||||
if (cap_rtype(orig) != CAP_RTYPE_CONTAINER)
|
||||
return -ENOCAP;
|
||||
|
||||
/*
|
||||
* Find out if this space exists in this
|
||||
* container.
|
||||
*
|
||||
* Note address space search is local only.
|
||||
* Only thread searches are global.
|
||||
*/
|
||||
if (!(sp = address_space_find(new->resid)))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Success. Assign new type to original cap */
|
||||
cap_set_rtype(orig, cap_rtype(new));
|
||||
|
||||
/* Assign the space id to orig cap */
|
||||
orig->resid = sp->spid;
|
||||
break;
|
||||
case CAP_RTYPE_THREAD:
|
||||
/* Find the thread */
|
||||
if (!(target = tcb_find(new->resid)))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check containment */
|
||||
if (cap_rtype(orig) == CAP_RTYPE_SPACE) {
|
||||
if (orig->resid != target->space->spid)
|
||||
return -ENOCAP;
|
||||
} else if (cap_rtype(orig) == CAP_RTYPE_CONTAINER) {
|
||||
if(orig->resid != target->container->cid)
|
||||
return -ENOCAP;
|
||||
} else
|
||||
return -ENOCAP;
|
||||
|
||||
/* Success. Assign new type to original cap */
|
||||
cap_set_rtype(orig, cap_rtype(new));
|
||||
|
||||
/* Assign the space id to orig cap */
|
||||
orig->resid = target->tid;
|
||||
break;
|
||||
default:
|
||||
return -ENOCAP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Deduction can be by access permissions, start, end, size
|
||||
* fields, or the target resource type. Inter-container
|
||||
* deduction is not allowed.
|
||||
*
|
||||
* Target resource deduction denotes reducing the applicable
|
||||
* space of the target, e.g. from a container to a space in
|
||||
* that container.
|
||||
*
|
||||
* NOTE: If there is no target deduction, you cannot change
|
||||
* resid, as this is forbidden.
|
||||
*
|
||||
* Imagine a space cap, it cannot be deduced to become applicable
|
||||
* to another space, i.e a space is in same privilege level.
|
||||
* But a container-wide cap can be reduced to be applied on
|
||||
* a space in that container (thus changing the resid to that
|
||||
* space's id)
|
||||
*
|
||||
* capid: Id of original capability
|
||||
* new: Userspace pointer to new state of capability
|
||||
* that is desired.
|
||||
*
|
||||
* orig = deduced;
|
||||
*/
|
||||
int cap_deduce(struct capability *new)
|
||||
{
|
||||
struct capability *orig;
|
||||
struct cap_list *clist;
|
||||
int ret;
|
||||
|
||||
/* Find original capability */
|
||||
if (!(orig = cap_find_by_capid(new->capid, &clist)))
|
||||
return -EEXIST;
|
||||
|
||||
/* Check that caller is owner */
|
||||
if (orig->owner != current->tid)
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check that it is deducable */
|
||||
if (!(orig->access & CAP_CHANGEABLE))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check target resource deduction */
|
||||
if (cap_rtype(new) != cap_rtype(orig))
|
||||
if ((ret = cap_deduce_rtype(orig, new)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Check owners are same for request validity */
|
||||
if (orig->owner != new->owner)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check permissions for deduction */
|
||||
if (orig->access) {
|
||||
/* New cannot have more bits than original */
|
||||
if ((orig->access & new->access) != new->access)
|
||||
return -EINVAL;
|
||||
/* New cannot make original redundant */
|
||||
if (new->access == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Deduce bits of orig */
|
||||
orig->access &= new->access;
|
||||
} else if (new->access)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check size for deduction */
|
||||
if (orig->size) {
|
||||
/* New can't have more, or make original redundant */
|
||||
if (new->size >= orig->size)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Can't make reduction on used ones, so there
|
||||
* must be enough available ones
|
||||
*/
|
||||
if (new->size < orig->used)
|
||||
return -EPERM;
|
||||
orig->size = new->size;
|
||||
} else if (new->size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Range-like permissions can't be deduced */
|
||||
if (orig->start || orig->end) {
|
||||
if (orig->start != new->start ||
|
||||
orig->end != new->end)
|
||||
return -EPERM;
|
||||
} else if (new->start || new->end)
|
||||
return -EINVAL;
|
||||
|
||||
/* Ensure orig and new are the same */
|
||||
BUG_ON(orig->capid != new->capid);
|
||||
BUG_ON(orig->resid != new->resid);
|
||||
BUG_ON(orig->owner != new->owner);
|
||||
BUG_ON(orig->type != new->type);
|
||||
BUG_ON(orig->access != new->access);
|
||||
BUG_ON(orig->start != new->start);
|
||||
BUG_ON(orig->end != new->end);
|
||||
BUG_ON(orig->size != new->size);
|
||||
BUG_ON(orig->used != new->used);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroys a capability
|
||||
*/
|
||||
int cap_destroy(struct capability *cap)
|
||||
{
|
||||
struct capability *orig;
|
||||
struct cap_list *clist;
|
||||
|
||||
/* Find original capability */
|
||||
if (!(orig = cap_find_by_capid(cap->capid, &clist)))
|
||||
return -EEXIST;
|
||||
|
||||
/* Check that caller is owner */
|
||||
if (orig->owner != current->tid)
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check that it is destroyable */
|
||||
if (!(cap_generic_perms(orig) & CAP_CHANGEABLE))
|
||||
return -ENOCAP;
|
||||
|
||||
/*
|
||||
* Check that it is not a device.
|
||||
*
|
||||
* We don't allow devices for now. To do this
|
||||
* correctly, we need to check if device irq
|
||||
* is not currently registered.
|
||||
*/
|
||||
if (cap_is_devmem(orig))
|
||||
return -ENOCAP;
|
||||
|
||||
cap_list_remove(orig, clist);
|
||||
free_capability(orig);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cap_has_size(struct capability *c)
|
||||
{
|
||||
return c->size;
|
||||
}
|
||||
|
||||
static inline int cap_has_range(struct capability *c)
|
||||
{
|
||||
return c->start && c->end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Splits a capability
|
||||
*
|
||||
* Pools of typed memory objects can't be replicated, and
|
||||
* deduced that way, as replication would temporarily double
|
||||
* their size. So they are split in place.
|
||||
*
|
||||
* Splitting occurs by diff'ing resources possessed between
|
||||
* capabilities.
|
||||
*
|
||||
* capid: Original capability that is valid.
|
||||
* diff: New capability that we want to split out.
|
||||
*
|
||||
* orig = orig - diff;
|
||||
* new = diff;
|
||||
*/
|
||||
int cap_split(struct capability *diff, unsigned int flags)
|
||||
{
|
||||
struct capability *orig, *new;
|
||||
struct cap_list *clist;
|
||||
int ret;
|
||||
|
||||
/* Find original capability */
|
||||
if (!(orig = cap_find_by_capid(diff->capid, &clist)))
|
||||
return -EEXIST;
|
||||
|
||||
/* Check target type/resid/owner is the same */
|
||||
if (orig->type != diff->type ||
|
||||
orig->resid != diff->resid ||
|
||||
orig->owner != diff->owner)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check that caller is owner */
|
||||
if (orig->owner != current->tid)
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check owners are same */
|
||||
if (orig->owner != diff->owner)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check that it is splitable */
|
||||
if (!(orig->access & CAP_CHANGEABLE))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Create new */
|
||||
if (!(new = capability_create()))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check access bits usage and split */
|
||||
if (flags & CAP_SPLIT_ACCESS) {
|
||||
/* Access bits must never be redundant */
|
||||
BUG_ON(!orig->access);
|
||||
|
||||
/* Split one can't have more bits than original */
|
||||
if ((orig->access & diff->access) != diff->access) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Split one cannot make original redundant */
|
||||
if ((orig->access & ~diff->access) == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Split one cannot be redundant itself */
|
||||
if (!diff->access) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Subtract given access permissions */
|
||||
orig->access &= ~diff->access;
|
||||
|
||||
/* Assign given perms to new capability */
|
||||
new->access = diff->access;
|
||||
} else {
|
||||
/* Can't split only by access bits alone */
|
||||
if (!cap_has_size(orig) &&
|
||||
!cap_has_range(orig)) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
/* If no split, then they are identical */
|
||||
new->access = orig->access;
|
||||
|
||||
/* Diff must also reflect orig by convention */
|
||||
if (diff->access != orig->access) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* If cap has size, split by size is compulsory */
|
||||
if (cap_type(orig) == CAP_TYPE_QUANTITY) {
|
||||
BUG_ON(!cap_has_size(orig));
|
||||
|
||||
/*
|
||||
* Split one can't have more,
|
||||
* or make original redundant
|
||||
*/
|
||||
if (diff->size >= orig->size) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Split one can't be redundant itself */
|
||||
if (!diff->size) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Split one must be clean i.e. all unused */
|
||||
if (orig->size - orig->used < diff->size) {
|
||||
ret = -EPERM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
orig->size -= diff->size;
|
||||
new->size = diff->size;
|
||||
new->used = 0;
|
||||
} else {
|
||||
|
||||
/* Diff must also reflect orig by convention */
|
||||
if (diff->size != orig->size) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* If no split, then they are identical */
|
||||
new->size = orig->size;
|
||||
new->used = orig->used;
|
||||
|
||||
}
|
||||
|
||||
if (flags & CAP_SPLIT_RANGE) {
|
||||
/* They must either be both one or both zero */
|
||||
BUG_ON(!!orig->start ^ !!orig->end);
|
||||
|
||||
/* If orig doesn't have a range, return invalid */
|
||||
if (!orig->start && !orig->end) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
} else {
|
||||
/* Orig has a range but diff doesn't */
|
||||
if (!diff->start || !diff->end) {
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
/* Both valid, but we don't permit range split */
|
||||
ret = -EPERM;
|
||||
goto out_err;
|
||||
}
|
||||
/* If no split, then they are identical */
|
||||
} else {
|
||||
new->start = orig->start;
|
||||
new->end = orig->end;
|
||||
}
|
||||
|
||||
/* Copy other fields */
|
||||
new->type = orig->type;
|
||||
new->resid = orig->resid;
|
||||
new->owner = orig->owner;
|
||||
|
||||
/* Add the new capability to the most private list */
|
||||
cap_list_insert(new, ¤t->space->cap_list);
|
||||
|
||||
/* Check fields that must be identical */
|
||||
BUG_ON(new->resid != diff->resid);
|
||||
BUG_ON(new->owner != diff->owner);
|
||||
BUG_ON(new->type != diff->type);
|
||||
BUG_ON(new->access != diff->access);
|
||||
BUG_ON(new->start != diff->start);
|
||||
BUG_ON(new->end != diff->end);
|
||||
BUG_ON(new->size != diff->size);
|
||||
|
||||
/* Copy capid, and used field that may not be the same */
|
||||
diff->capid = new->capid;
|
||||
diff->used = new->used;
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
free_capability(new);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Replicates an existing capability. This is for expanding
|
||||
* capabilities to managed children.
|
||||
*
|
||||
* After replication, a duplicate capability exists in the
|
||||
* system, but as it is not a quantity, this does not increase
|
||||
* the capabilities of the caller in any way.
|
||||
*/
|
||||
int cap_replicate(struct capability *dupl)
|
||||
{
|
||||
struct capability *new, *orig;
|
||||
struct cap_list *clist;
|
||||
|
||||
/* Find original capability */
|
||||
if (!(orig = cap_find_by_capid(dupl->capid, &clist)))
|
||||
return -EEXIST;
|
||||
|
||||
/* Check that caller is owner */
|
||||
if (orig->owner != current->tid)
|
||||
return -ENOCAP;
|
||||
|
||||
/* Check that it is replicable */
|
||||
if (!(orig->access & CAP_REPLICABLE))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Quantitative types must not be replicable */
|
||||
if (cap_type(orig) == CAP_TYPE_QUANTITY) {
|
||||
printk("Cont %d: FATAL: Capability (%d) "
|
||||
"is quantitative but also replicable\n",
|
||||
curcont->cid, orig->capid);
|
||||
/* FIXME: Should rule this out as a CML2 requirement */
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Replicate it */
|
||||
if (!(new = capability_create()))
|
||||
return -ENOCAP;
|
||||
|
||||
/* Copy all except capid & listptrs */
|
||||
dupl->resid = new->resid = orig->resid;
|
||||
dupl->owner = new->owner = orig->owner;
|
||||
dupl->type = new->type = orig->type;
|
||||
dupl->access = new->access = orig->access;
|
||||
dupl->start = new->start = orig->start;
|
||||
dupl->end = new->end = orig->end;
|
||||
dupl->size = new->size = orig->size;
|
||||
dupl->used = new->used = orig->used;
|
||||
|
||||
/* Copy new fields */
|
||||
dupl->capid = new->capid;
|
||||
|
||||
/* Add it to most private list */
|
||||
cap_list_insert(new, ¤t->space->cap_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read, manipulate capabilities.
|
||||
*/
|
||||
@@ -661,13 +45,6 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Check capability to do a capability operation.
|
||||
* Supported only on current's caps for time being.
|
||||
*/
|
||||
if ((err = cap_cap_check(current, req, flags)) < 0)
|
||||
return err;
|
||||
|
||||
/* Check access for each request */
|
||||
switch(req) {
|
||||
case CAP_CONTROL_NCAPS:
|
||||
@@ -683,20 +60,6 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
|
||||
MAP_USR_RW, 1)) < 0)
|
||||
return err;
|
||||
break;
|
||||
case CAP_CONTROL_SHARE:
|
||||
if (flags == CAP_SHARE_ALL_CONTAINER ||
|
||||
flags == CAP_SHARE_ALL_SPACE)
|
||||
break;
|
||||
case CAP_CONTROL_GRANT:
|
||||
case CAP_CONTROL_SPLIT:
|
||||
case CAP_CONTROL_REPLICATE:
|
||||
case CAP_CONTROL_DEDUCE:
|
||||
case CAP_CONTROL_DESTROY:
|
||||
if ((err = check_access((unsigned long)userbuf,
|
||||
sizeof(struct capability),
|
||||
MAP_USR_RW, 1)) < 0)
|
||||
return err;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -709,24 +72,6 @@ int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
|
||||
case CAP_CONTROL_READ:
|
||||
err = cap_read_all((struct capability *)userbuf);
|
||||
break;
|
||||
case CAP_CONTROL_SHARE:
|
||||
err = cap_share((struct capability *)userbuf, flags);
|
||||
break;
|
||||
case CAP_CONTROL_GRANT:
|
||||
err = cap_grant((struct capability *)userbuf, flags);
|
||||
break;
|
||||
case CAP_CONTROL_SPLIT:
|
||||
err = cap_split((struct capability *)userbuf, flags);
|
||||
break;
|
||||
case CAP_CONTROL_REPLICATE:
|
||||
err = cap_replicate((struct capability *)userbuf);
|
||||
break;
|
||||
case CAP_CONTROL_DEDUCE:
|
||||
err = cap_deduce((struct capability *)userbuf);
|
||||
break;
|
||||
case CAP_CONTROL_DESTROY:
|
||||
err = cap_destroy((struct capability *)userbuf);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -61,10 +61,6 @@ void exregs_write_registers(struct ktcb *task, struct exregs_data *exregs)
|
||||
context->pc = exregs->context.pc;
|
||||
|
||||
flags:
|
||||
/* Set thread's pager if one is supplied */
|
||||
if (exregs->flags & EXREGS_SET_PAGER)
|
||||
task->pagerid = exregs->pagerid;
|
||||
|
||||
/* Set thread's utcb if supplied */
|
||||
if (exregs->flags & EXREGS_SET_UTCB) {
|
||||
task->utcb_address = exregs->utcb_address;
|
||||
@@ -122,7 +118,7 @@ void exregs_read_registers(struct ktcb *task, struct exregs_data *exregs)
|
||||
flags:
|
||||
/* Read thread's pager if pager flag supplied */
|
||||
if (exregs->flags & EXREGS_SET_PAGER)
|
||||
exregs->pagerid = task->pagerid;
|
||||
exregs->pagerid = tcb_pagerid(task);
|
||||
|
||||
/* Read thread's utcb if utcb flag supplied */
|
||||
if (exregs->flags & EXREGS_SET_UTCB)
|
||||
@@ -183,7 +179,7 @@ int sys_exchange_registers(struct exregs_data *exregs, l4id_t tid)
|
||||
* be the pagers making the call on themselves.
|
||||
*/
|
||||
if (task->state != TASK_INACTIVE && exregs->valid_vect &&
|
||||
current != task && task->pagerid != current->tid) {
|
||||
current != task && tcb_pagerid(task) != current->tid) {
|
||||
err = -EACTIVE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -48,10 +48,8 @@ int sys_map(unsigned long phys, unsigned long virt,
|
||||
if ((err = cap_map_check(target, phys, virt, npages, flags)) < 0)
|
||||
return err;
|
||||
|
||||
add_mapping_pgd(phys, virt, npages << PAGE_BITS,
|
||||
flags, TASK_PGD(target));
|
||||
|
||||
return 0;
|
||||
return add_mapping_space(phys, virt, npages << PAGE_BITS,
|
||||
flags, target->space);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -74,8 +72,8 @@ int sys_unmap(unsigned long virtual, unsigned long npages, unsigned int tid)
|
||||
return ret;
|
||||
|
||||
for (int i = 0; i < npages; i++) {
|
||||
ret = remove_mapping_pgd(TASK_PGD(target),
|
||||
virtual + i * PAGE_SIZE);
|
||||
ret = remove_mapping_space(target->space,
|
||||
virtual + i * PAGE_SIZE);
|
||||
if (ret)
|
||||
retval = ret;
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ struct mutex_queue *mutex_control_create(unsigned long mutex_physical)
|
||||
struct mutex_queue *mutex_queue;
|
||||
|
||||
/* Allocate the mutex queue structure */
|
||||
if (!(mutex_queue = alloc_user_mutex()))
|
||||
if (!(mutex_queue = mutex_cap_alloc()))
|
||||
return 0;
|
||||
|
||||
/* Init and return */
|
||||
@@ -98,7 +98,7 @@ void mutex_control_delete(struct mutex_queue *mq)
|
||||
BUG_ON(!list_empty(&mq->wqh_contenders.task_list));
|
||||
BUG_ON(!list_empty(&mq->wqh_holders.task_list));
|
||||
|
||||
free_user_mutex(mq);
|
||||
mutex_cap_free(mq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -310,9 +310,6 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_flags)
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK)
|
||||
return -EPERM;
|
||||
|
||||
if ((ret = cap_mutex_check(mutex_address, mutex_op)) < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Find and check physical address for virtual mutex address
|
||||
*
|
||||
|
||||
@@ -62,8 +62,7 @@ int thread_exit(struct ktcb *task)
|
||||
|
||||
static inline int task_is_child(struct ktcb *task)
|
||||
{
|
||||
return (((task) != current) &&
|
||||
((task)->pagerid == current->tid));
|
||||
return ((task != current) && task->pager == current);
|
||||
}
|
||||
|
||||
int thread_destroy_child(struct ktcb *task)
|
||||
@@ -91,9 +90,11 @@ int thread_destroy_child(struct ktcb *task)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Must be called from pager thread only */
|
||||
int thread_destroy_children(void)
|
||||
{
|
||||
struct ktcb *task, *n;
|
||||
int ret;
|
||||
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
list_foreach_removable_struct(task, n,
|
||||
@@ -106,17 +107,21 @@ int thread_destroy_children(void)
|
||||
}
|
||||
}
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return 0;
|
||||
|
||||
/* Wait till all children are gone */
|
||||
WAIT_EVENT(¤t->wqh_pager, current->nchild == 0, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void thread_destroy_self(unsigned int exit_code)
|
||||
{
|
||||
/* Destroy all children first */
|
||||
thread_destroy_children();
|
||||
|
||||
/* If self-paged, finish everything except deletion */
|
||||
if (current->tid == current->pagerid) {
|
||||
if (thread_is_pager(current)) {
|
||||
|
||||
/* Destroy all children first */
|
||||
BUG_ON(thread_destroy_children() < 0);
|
||||
|
||||
/* Remove self safe against ipc */
|
||||
tcb_remove(current);
|
||||
|
||||
@@ -124,9 +129,9 @@ void thread_destroy_self(unsigned int exit_code)
|
||||
wake_up_all(¤t->wqh_send, WAKEUP_INTERRUPT);
|
||||
wake_up_all(¤t->wqh_recv, WAKEUP_INTERRUPT);
|
||||
|
||||
/* Move capabilities to current cpu idle task */
|
||||
cap_list_move(&per_cpu(scheduler).idle_task->cap_list,
|
||||
¤t->cap_list);
|
||||
/* Move capabilities to struct pager */
|
||||
cap_list_move(&curcont->pager->cap_list,
|
||||
¤t->space->cap_list);
|
||||
|
||||
/* Place self on the per-cpu zombie queue */
|
||||
ktcb_list_add(current, &per_cpu(kernel_resources.zombie_list));
|
||||
@@ -219,7 +224,7 @@ int arch_clear_thread(struct ktcb *tcb)
|
||||
tcb->context.spsr = ARM_MODE_USR;
|
||||
|
||||
/* Clear the page tables */
|
||||
remove_mapping_pgd_all_user(TASK_PGD(tcb));
|
||||
remove_mapping_pgd_all_user(tcb->space, ¤t->space->cap_list);
|
||||
|
||||
/* Reinitialize all other fields */
|
||||
tcb_init(tcb);
|
||||
@@ -358,36 +363,36 @@ int thread_setup_space(struct ktcb *tcb, struct task_ids *ids, unsigned int flag
|
||||
int ret = 0;
|
||||
|
||||
if (flags & TC_SHARE_SPACE) {
|
||||
mutex_lock(&curcont->space_list.lock);
|
||||
spin_lock(&curcont->space_list.lock);
|
||||
if (!(space = address_space_find(ids->spid))) {
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
spin_unlock(&curcont->space_list.lock);
|
||||
ret = -ESRCH;
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&space->lock);
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
spin_lock(&space->lock);
|
||||
spin_unlock(&curcont->space_list.lock);
|
||||
address_space_attach(tcb, space);
|
||||
mutex_unlock(&space->lock);
|
||||
spin_unlock(&space->lock);
|
||||
}
|
||||
else if (flags & TC_COPY_SPACE) {
|
||||
mutex_lock(&curcont->space_list.lock);
|
||||
spin_lock(&curcont->space_list.lock);
|
||||
if (!(space = address_space_find(ids->spid))) {
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
spin_unlock(&curcont->space_list.lock);
|
||||
ret = -ESRCH;
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&space->lock);
|
||||
spin_lock(&space->lock);
|
||||
if (IS_ERR(new = address_space_create(space))) {
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
mutex_unlock(&space->lock);
|
||||
spin_unlock(&curcont->space_list.lock);
|
||||
spin_unlock(&space->lock);
|
||||
ret = (int)new;
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&space->lock);
|
||||
spin_unlock(&space->lock);
|
||||
ids->spid = new->spid; /* Return newid to caller */
|
||||
address_space_attach(tcb, new);
|
||||
address_space_add(new);
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
spin_unlock(&curcont->space_list.lock);
|
||||
}
|
||||
else if (flags & TC_NEW_SPACE) {
|
||||
if (IS_ERR(new = address_space_create(0))) {
|
||||
@@ -397,9 +402,9 @@ int thread_setup_space(struct ktcb *tcb, struct task_ids *ids, unsigned int flag
|
||||
/* New space id to be returned back to caller */
|
||||
ids->spid = new->spid;
|
||||
address_space_attach(tcb, new);
|
||||
mutex_lock(&curcont->space_list.lock);
|
||||
spin_lock(&curcont->space_list.lock);
|
||||
address_space_add(new);
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
spin_unlock(&curcont->space_list.lock);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -448,7 +453,12 @@ int thread_create(struct task_ids *ids, unsigned int flags)
|
||||
}
|
||||
|
||||
/* Set creator as pager */
|
||||
new->pagerid = current->tid;
|
||||
new->pager = current;
|
||||
|
||||
/* Update pager child count */
|
||||
spin_lock(¤t->thread_lock);
|
||||
current->nchild++;
|
||||
spin_unlock(¤t->thread_lock);
|
||||
|
||||
/* Setup container-generic fields from current task */
|
||||
new->container = current->container;
|
||||
@@ -476,7 +486,7 @@ int thread_create(struct task_ids *ids, unsigned int flags)
|
||||
|
||||
out_err:
|
||||
/* Pre-mature tcb needs freeing by free_ktcb */
|
||||
free_ktcb(new, current);
|
||||
ktcb_cap_free(new, ¤t->space->cap_list);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -487,7 +497,7 @@ out_err:
|
||||
*/
|
||||
int sys_thread_control(unsigned int flags, struct task_ids *ids)
|
||||
{
|
||||
struct ktcb *task = 0;
|
||||
struct ktcb *task = 0, *pager = 0;
|
||||
int err, ret = 0;
|
||||
|
||||
if ((err = check_access((unsigned long)ids, sizeof(*ids),
|
||||
@@ -498,15 +508,13 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
pager = task->pager;
|
||||
|
||||
/*
|
||||
* Tasks may only operate on their children. They may
|
||||
* also destroy themselves or any children.
|
||||
* Caller may operate on a thread if it shares
|
||||
* the same address space with that thread's pager
|
||||
*/
|
||||
if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY &&
|
||||
!task_is_child(task) && task != current)
|
||||
return -EPERM;
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY
|
||||
&& !task_is_child(task))
|
||||
if (!space_is_pager(current))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,23 +1,25 @@
|
||||
|
||||
# Inherit global environment
|
||||
import os, sys, glob
|
||||
import os, sys
|
||||
|
||||
PROJRELROOT = '../../'
|
||||
|
||||
sys.path.append(PROJRELROOT)
|
||||
|
||||
from config.projpaths import *
|
||||
from configure import *
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
Import('env', 'symbols')
|
||||
Import('env')
|
||||
|
||||
config = configuration_retrieve()
|
||||
symbols = config.all
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception-common.c', 'mapping-common.c', 'memset.S', 'memcpy.S']
|
||||
src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception-common.c',
|
||||
'mapping-common.c', 'memset.S', 'memcpy.S']
|
||||
|
||||
for name, val in symbols:
|
||||
if 'CONFIG_SMP' == name:
|
||||
src_local += ['head-smp.S']
|
||||
if 'CONFIG_SMP_' == name:
|
||||
src_local += ['head-smp.S']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
|
||||
Return('obj')
|
||||
|
||||
@@ -117,7 +117,7 @@ fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag)
|
||||
tcb_set_ipc_flags(current, IPC_FLAGS_SHORT);
|
||||
|
||||
/* Detect if a pager is self-faulting */
|
||||
if (current->tid == current->pagerid) {
|
||||
if (current == current->pager) {
|
||||
printk("Pager (%d) faulted on itself. "
|
||||
"FSR: 0x%x, FAR: 0x%x, PC: 0x%x pte: 0x%x CPU%d Exiting.\n",
|
||||
current->tid, fault->fsr, fault->far,
|
||||
@@ -126,8 +126,8 @@ fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag)
|
||||
}
|
||||
|
||||
/* Send ipc to the task's pager */
|
||||
if ((err = ipc_sendrecv(current->pagerid,
|
||||
current->pagerid, 0)) < 0) {
|
||||
if ((err = ipc_sendrecv(tcb_pagerid(current),
|
||||
tcb_pagerid(current), 0)) < 0) {
|
||||
BUG_ON(current->nlocks);
|
||||
|
||||
/* Return on interrupt */
|
||||
|
||||
@@ -96,7 +96,8 @@ virt_to_phys_by_task(struct ktcb *task, unsigned long vaddr)
|
||||
* Attaches a pmd to either a task or the global pgd
|
||||
* depending on the virtual address passed.
|
||||
*/
|
||||
void attach_pmd(pgd_table_t *task_pgd, pmd_table_t *pmd_table,
|
||||
void attach_pmd(struct address_space *space,
|
||||
pmd_table_t *pmd_table,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
u32 pmd_phys = virt_to_phys(pmd_table);
|
||||
@@ -108,15 +109,23 @@ void attach_pmd(pgd_table_t *task_pgd, pmd_table_t *pmd_table,
|
||||
* Pick the right pmd from the right pgd.
|
||||
* It makes a difference if split tables are used.
|
||||
*/
|
||||
pmd = arch_pick_pmd(task_pgd, vaddr);
|
||||
pmd = arch_pick_pmd(space->pgd, vaddr);
|
||||
|
||||
/* Write the pmd into hardware pgd */
|
||||
arch_write_pmd(pmd, pmd_phys, vaddr);
|
||||
arch_write_pmd(pmd, pmd_phys, vaddr, space->spid);
|
||||
}
|
||||
|
||||
void add_mapping_pgd(unsigned long physical, unsigned long virtual,
|
||||
unsigned int sz_bytes, unsigned int flags,
|
||||
pgd_table_t *task_pgd)
|
||||
/*
|
||||
* Maps a new address to given space, but charges another
|
||||
* capability owner task for the pmd, if any used.
|
||||
*
|
||||
* This is useful for when irqs force mapping of UTCBs of
|
||||
* other tasks to the preempted tasks for handling.
|
||||
*/
|
||||
int add_mapping_use_cap(unsigned long physical, unsigned long virtual,
|
||||
unsigned int sz_bytes, unsigned int flags,
|
||||
struct address_space *space,
|
||||
struct cap_list *clist)
|
||||
{
|
||||
unsigned long npages = (sz_bytes >> PFN_SHIFT);
|
||||
pmd_table_t *pmd_table;
|
||||
@@ -136,27 +145,80 @@ void add_mapping_pgd(unsigned long physical, unsigned long virtual,
|
||||
/* Map all pages that cover given size */
|
||||
for (int i = 0; i < npages; i++) {
|
||||
/* Check if a pmd was attached previously */
|
||||
if (!(pmd_table = pmd_exists(task_pgd, virtual))) {
|
||||
if (!(pmd_table = pmd_exists(space->pgd, virtual))) {
|
||||
|
||||
/* First mapping in pmd, allocate it */
|
||||
pmd_table = alloc_pmd();
|
||||
if (!(pmd_table = pmd_cap_alloc(clist)))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Prepare the pte but don't sync */
|
||||
arch_prepare_pte(physical, virtual, flags,
|
||||
&pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
|
||||
/* Attach pmd to its pgd and sync it */
|
||||
attach_pmd(task_pgd, pmd_table, virtual);
|
||||
attach_pmd(space, pmd_table, virtual);
|
||||
} else {
|
||||
/* Prepare, write the pte and sync */
|
||||
arch_prepare_write_pte(physical, virtual,
|
||||
flags, &pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
arch_prepare_write_pte(space, physical, virtual,
|
||||
flags,
|
||||
&pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
}
|
||||
|
||||
/* Move on to the next page */
|
||||
physical += PAGE_SIZE;
|
||||
virtual += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int add_mapping_space(unsigned long physical, unsigned long virtual,
|
||||
unsigned int sz_bytes, unsigned int flags,
|
||||
struct address_space *space)
|
||||
{
|
||||
unsigned long npages = (sz_bytes >> PFN_SHIFT);
|
||||
pmd_table_t *pmd_table;
|
||||
|
||||
if (sz_bytes < PAGE_SIZE) {
|
||||
print_early("Error: Mapping size less than PAGE_SIZE. "
|
||||
"Mapping size is in bytes not pages.\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (sz_bytes & PAGE_MASK)
|
||||
npages++;
|
||||
|
||||
/* Convert generic map flags to arch specific flags */
|
||||
BUG_ON(!(flags = space_flags_to_ptflags(flags)));
|
||||
|
||||
/* Map all pages that cover given size */
|
||||
for (int i = 0; i < npages; i++) {
|
||||
/* Check if a pmd was attached previously */
|
||||
if (!(pmd_table = pmd_exists(space->pgd, virtual))) {
|
||||
|
||||
/* First mapping in pmd, allocate it */
|
||||
if (!(pmd_table = pmd_cap_alloc(¤t->space->cap_list)))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Prepare the pte but don't sync */
|
||||
arch_prepare_pte(physical, virtual, flags,
|
||||
&pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
|
||||
/* Attach pmd to its pgd and sync it */
|
||||
attach_pmd(space, pmd_table, virtual);
|
||||
} else {
|
||||
/* Prepare, write the pte and sync */
|
||||
arch_prepare_write_pte(space, physical, virtual,
|
||||
flags,
|
||||
&pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
}
|
||||
|
||||
/* Move on to the next page */
|
||||
physical += PAGE_SIZE;
|
||||
virtual += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void add_boot_mapping(unsigned long physical, unsigned long virtual,
|
||||
@@ -191,11 +253,12 @@ void add_boot_mapping(unsigned long physical, unsigned long virtual,
|
||||
&pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
|
||||
/* Attach pmd to its pgd and sync it */
|
||||
attach_pmd(&init_pgd, pmd_table, virtual);
|
||||
attach_pmd(current->space, pmd_table, virtual);
|
||||
} else {
|
||||
/* Prepare, write the pte and sync */
|
||||
arch_prepare_write_pte(physical, virtual,
|
||||
flags, &pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
arch_prepare_write_pte(current->space, physical,
|
||||
virtual, flags,
|
||||
&pmd_table->entry[PMD_INDEX(virtual)]);
|
||||
}
|
||||
|
||||
/* Move on to the next page */
|
||||
@@ -204,10 +267,10 @@ void add_boot_mapping(unsigned long physical, unsigned long virtual,
|
||||
}
|
||||
}
|
||||
|
||||
void add_mapping(unsigned long paddr, unsigned long vaddr,
|
||||
unsigned int size, unsigned int flags)
|
||||
int add_mapping(unsigned long paddr, unsigned long vaddr,
|
||||
unsigned int size, unsigned int flags)
|
||||
{
|
||||
add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
|
||||
return add_mapping_space(paddr, vaddr, size, flags, current->space);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -247,7 +310,7 @@ int check_mapping(unsigned long vaddr, unsigned long size,
|
||||
* This can be made common for v5/v7, keeping split/page table
|
||||
* and cache flush parts in arch-specific files.
|
||||
*/
|
||||
int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
|
||||
int remove_mapping_space(struct address_space *space, unsigned long vaddr)
|
||||
{
|
||||
pmd_table_t *pmd_table;
|
||||
int pgd_i, pmd_i;
|
||||
@@ -262,7 +325,7 @@ int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
|
||||
* Get the right pgd's pmd according to whether
|
||||
* the address is global or task-specific.
|
||||
*/
|
||||
pmd = arch_pick_pmd(task_pgd, vaddr);
|
||||
pmd = arch_pick_pmd(space->pgd, vaddr);
|
||||
|
||||
pmd_type = *pmd & PMD_TYPE_MASK;
|
||||
|
||||
@@ -288,7 +351,7 @@ int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
|
||||
BUG();
|
||||
|
||||
/* Write to pte, also syncing it as required by arch */
|
||||
arch_prepare_write_pte(0, vaddr,
|
||||
arch_prepare_write_pte(space, 0, vaddr,
|
||||
space_flags_to_ptflags(MAP_FAULT),
|
||||
(pte_t *)&pmd_table->entry[pmd_i]);
|
||||
return 0;
|
||||
@@ -296,14 +359,14 @@ int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
|
||||
|
||||
int remove_mapping(unsigned long vaddr)
|
||||
{
|
||||
return remove_mapping_pgd(TASK_PGD(current), vaddr);
|
||||
return remove_mapping_space(current->space, vaddr);
|
||||
}
|
||||
|
||||
|
||||
int delete_page_tables(struct address_space *space)
|
||||
int delete_page_tables(struct address_space *space, struct cap_list *clist)
|
||||
{
|
||||
remove_mapping_pgd_all_user(space->pgd);
|
||||
free_pgd(space->pgd);
|
||||
remove_mapping_pgd_all_user(space, clist);
|
||||
pgd_free(space->pgd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -325,7 +388,7 @@ int copy_user_tables(struct address_space *new,
|
||||
((from->entry[i] & PMD_TYPE_MASK)
|
||||
== PMD_TYPE_PMD)) {
|
||||
/* Allocate new pmd */
|
||||
if (!(pmd = alloc_pmd()))
|
||||
if (!(pmd = pmd_cap_alloc(¤t->space->cap_list)))
|
||||
goto out_error;
|
||||
|
||||
/* Find original pmd */
|
||||
@@ -359,7 +422,7 @@ out_error:
|
||||
phys_to_virt((to->entry[i] &
|
||||
PMD_ALIGN_MASK));
|
||||
/* Free pmd */
|
||||
free_pmd(pmd);
|
||||
pmd_cap_free(pmd, ¤t->space->cap_list);
|
||||
}
|
||||
}
|
||||
return -ENOMEM;
|
||||
@@ -385,7 +448,6 @@ void remap_as_pages(void *vstart, void *vend)
|
||||
unsigned long paddr = pstart;
|
||||
unsigned long vaddr = (unsigned long)vstart;
|
||||
int pmd_i = PMD_INDEX(vstart);
|
||||
pgd_table_t *pgd = &init_pgd;
|
||||
pmd_table_t *pmd = alloc_boot_pmd();
|
||||
int npages = __pfn(pend - pstart);
|
||||
int map_flags;
|
||||
@@ -408,7 +470,7 @@ void remap_as_pages(void *vstart, void *vend)
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
attach_pmd(pgd, pmd, (unsigned long)vstart);
|
||||
attach_pmd(current->space, pmd, (unsigned long)vstart);
|
||||
|
||||
printk("%s: Kernel area 0x%lx - 0x%lx "
|
||||
"remapped as %d pages\n", __KERNELNAME__,
|
||||
|
||||
@@ -116,7 +116,7 @@ void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
|
||||
*ptep = paddr | flags | PTE_TYPE_SMALL;
|
||||
}
|
||||
|
||||
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
|
||||
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr, u32 asid)
|
||||
{
|
||||
/* FIXME:
|
||||
* Clean the dcache and invalidate the icache
|
||||
@@ -143,7 +143,8 @@ void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
|
||||
}
|
||||
|
||||
|
||||
void arch_prepare_write_pte(u32 paddr, u32 vaddr,
|
||||
void arch_prepare_write_pte(struct address_space *space,
|
||||
u32 paddr, u32 vaddr,
|
||||
unsigned int flags, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = 0;
|
||||
@@ -154,7 +155,7 @@ void arch_prepare_write_pte(u32 paddr, u32 vaddr,
|
||||
|
||||
arch_prepare_pte(paddr, vaddr, flags, &pte);
|
||||
|
||||
arch_write_pte(ptep, pte, vaddr);
|
||||
arch_write_pte(ptep, pte, vaddr, space->spid);
|
||||
}
|
||||
|
||||
pmd_t *
|
||||
@@ -166,7 +167,7 @@ arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
|
||||
/*
|
||||
* v5 pmd writes
|
||||
*/
|
||||
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
|
||||
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr, u32 asid)
|
||||
{
|
||||
/* FIXME: Clean the dcache if there was a valid entry */
|
||||
*pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
|
||||
@@ -205,8 +206,10 @@ int is_global_pgdi(int i)
|
||||
|
||||
extern pmd_table_t *pmd_array;
|
||||
|
||||
void remove_mapping_pgd_all_user(pgd_table_t *pgd)
|
||||
void remove_mapping_pgd_all_user(struct address_space *space,
|
||||
struct cap_list *clist)
|
||||
{
|
||||
pgd_table_t *pgd = space->pgd;
|
||||
pmd_table_t *pmd;
|
||||
|
||||
/* Traverse through all pgd entries. */
|
||||
@@ -221,34 +224,22 @@ void remove_mapping_pgd_all_user(pgd_table_t *pgd)
|
||||
phys_to_virt((pgd->entry[i] &
|
||||
PMD_ALIGN_MASK));
|
||||
/* Free it */
|
||||
free_pmd(pmd);
|
||||
pmd_cap_free(pmd, clist);
|
||||
}
|
||||
|
||||
/* Clear the pgd entry */
|
||||
pgd->entry[i] = PMD_TYPE_FAULT;
|
||||
}
|
||||
}
|
||||
/* FIXME: Flush tlbs here */
|
||||
}
|
||||
|
||||
|
||||
int pgd_count_boot_pmds()
|
||||
{
|
||||
int npmd = 0;
|
||||
pgd_table_t *pgd = &init_pgd;
|
||||
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
|
||||
if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
|
||||
npmd++;
|
||||
return npmd;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Jumps from boot pmd/pgd page tables to tables allocated from the cache.
|
||||
*/
|
||||
pgd_table_t *arch_realloc_page_tables(void)
|
||||
{
|
||||
pgd_table_t *pgd_new = alloc_pgd();
|
||||
pgd_table_t *pgd_new = pgd_alloc();
|
||||
pgd_table_t *pgd_old = &init_pgd;
|
||||
pmd_table_t *orig, *pmd;
|
||||
|
||||
@@ -260,7 +251,7 @@ pgd_table_t *arch_realloc_page_tables(void)
|
||||
/* Detect a pmd entry */
|
||||
if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
|
||||
/* Allocate new pmd */
|
||||
if (!(pmd = alloc_pmd())) {
|
||||
if (!(pmd = pmd_cap_alloc(¤t->space->cap_list))) {
|
||||
printk("FATAL: PMD allocation "
|
||||
"failed during system initialization\n");
|
||||
BUG();
|
||||
@@ -375,6 +366,9 @@ void idle_task(void)
|
||||
/* Do maintenance */
|
||||
tcb_delete_zombies();
|
||||
|
||||
/* Clear idle runnable flag */
|
||||
per_cpu(scheduler).flags &= ~SCHED_RUN_IDLE;
|
||||
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ void cpu_startup(void)
|
||||
//arm_set_cp15_cr(val);
|
||||
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
#if defined (CONFIG_SMP_)
|
||||
/* Enable SCU*/
|
||||
/* Enable SMP bit in CP15 */
|
||||
#endif
|
||||
|
||||
@@ -26,7 +26,7 @@ void __spin_lock(unsigned int *s)
|
||||
"teq %0, #0\n"
|
||||
"strexeq %0, %1, [%2]\n"
|
||||
"teq %0, #0\n"
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_SMP_
|
||||
"wfene\n"
|
||||
#endif
|
||||
"bne 1b\n"
|
||||
@@ -47,7 +47,7 @@ void __spin_unlock(unsigned int *s)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_SMP_
|
||||
dsb();
|
||||
__asm__ __volatile__ ("sev\n");
|
||||
#endif
|
||||
|
||||
@@ -565,7 +565,7 @@ current_irq_nest_count:
|
||||
.word 0
|
||||
.word 0
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
#if defined (CONFIG_SMP_)
|
||||
@ Rx contains the address of per cpu variable
|
||||
.macro per_cpu adr, temp, varname
|
||||
get_cpuid \temp
|
||||
|
||||
@@ -1,32 +1,31 @@
|
||||
import os, sys, glob
|
||||
|
||||
PROJRELROOT = '../../'
|
||||
import os
|
||||
from os.path import join
|
||||
|
||||
sys.path.append(PROJRELROOT)
|
||||
|
||||
from config.projpaths import *
|
||||
from configure import *
|
||||
|
||||
Import("env", "symbols", "platform", "bdir")
|
||||
Import('env', 'bdir')
|
||||
|
||||
src_local = []
|
||||
objs = []
|
||||
|
||||
for name, val in symbols:
|
||||
if "CONFIG_DRIVER_UART_PL011" == name:
|
||||
objs += SConscript("uart/pl011/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'pl011')
|
||||
if "CONFIG_DRIVER_TIMER_SP804" == name:
|
||||
objs += SConscript("timer/sp804/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'timer')
|
||||
if "CONFIG_DRIVER_IRQ_PL190" == name:
|
||||
objs += SConscript("irq/pl190/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'vic')
|
||||
if "CONFIG_DRIVER_IRQ_GIC" == name:
|
||||
objs += SConscript("irq/gic/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + 'gic')
|
||||
if "CONFIG_DRIVER_INTC_OMAP" == name:
|
||||
objs += SConscript("irq/omap3/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + '/omap/intc')
|
||||
if "CONFIG_DRIVER_UART_OMAP" == name:
|
||||
objs += SConscript("uart/omap/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + '/omap/uart')
|
||||
if "CONFIG_DRIVER_TIMER_OMAP" == name:
|
||||
objs += SConscript("timer/omap/SConscript", exports = {'env' : env}, duplicate=0, build_dir=bdir + '/omap/timer')
|
||||
objs += SConscript("uart/pl011/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'pl011'))
|
||||
|
||||
objs += SConscript("timer/sp804/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'timer'))
|
||||
|
||||
objs += SConscript("irq/pl190/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'vic'))
|
||||
|
||||
objs += SConscript("irq/gic/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'gic'))
|
||||
|
||||
objs += SConscript("irq/omap3/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'omap/intc'))
|
||||
|
||||
objs += SConscript("uart/omap/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'omap/uart'))
|
||||
|
||||
objs += SConscript("timer/omap/SConscript", exports = { 'env' : env },
|
||||
duplicate=0, build_dir = join(bdir, 'omap/timer'))
|
||||
|
||||
Return('objs')
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
# Inherit global environment
|
||||
|
||||
#import os, sys
|
||||
|
||||
#PROJRELROOT = '../../../'
|
||||
|
||||
#sys.path.append(PROJRELROOT)
|
||||
|
||||
#from config.projpaths import *
|
||||
#from configure import *
|
||||
|
||||
Import('env')
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['gic.c']
|
||||
obj = env.Object(src_local)
|
||||
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# Platforms using GIC
|
||||
plat_list = ('eb', 'pba9')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
for plat_supported in plat_list:
|
||||
if plat_supported == platform:
|
||||
src_local += ['gic.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = Glob('*.c')
|
||||
obj = env.Object(src_local)
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
#Platforms using omap_intc
|
||||
plat_list = 'beagle'
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
if plat_list == platform:
|
||||
src_local += Glob('*.c')
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,8 +1,20 @@
|
||||
# Inherit global environment
|
||||
|
||||
Import('env')
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['pl190_vic.c']
|
||||
obj = env.Object(src_local)
|
||||
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
# Platforms using pl190
|
||||
plat_list = 'pb926'
|
||||
|
||||
if plat_list == platform:
|
||||
src_local += ['pl190_vic.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['timer.c']
|
||||
obj = env.Object(src_local)
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# Platforms using omap_timer
|
||||
plat_list = 'beagle'
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
if plat_list == platform:
|
||||
src_local += ['timer.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,7 +1,21 @@
|
||||
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['timer.c']
|
||||
obj = env.Object(src_local)
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# Platforms using sp804 timer
|
||||
plat_list = ('eb', 'pba9', 'pb926')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
for plat_supported in plat_list:
|
||||
if plat_supported == platform:
|
||||
src_local += ['timer.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['uart.c']
|
||||
obj = env.Object(src_local)
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# Platforms using omap_uart
|
||||
plat_list = 'beagle'
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
if plat_list == platform:
|
||||
src_local += ['uart.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,7 +1,21 @@
|
||||
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['uart.c']
|
||||
obj = env.Object(src_local)
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# Platforms using pl011 uart
|
||||
plat_list = ('eb', 'pba9', 'pb926')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = []
|
||||
|
||||
for plat_supported in plat_list:
|
||||
if plat_supported == platform:
|
||||
src_local += ['uart.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -1,10 +1,28 @@
|
||||
|
||||
import sys
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
PROJROOT = '../..'
|
||||
sys.path.append(PROJROOT)
|
||||
|
||||
from scripts.kernel.generate_kernel_cinfo import*
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'space.c', 'bootmem.c', 'resource.c', 'container.c', 'capability.c', 'cinfo.c', 'debug.c']
|
||||
src_local = ['irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'space.c',
|
||||
'bootmem.c', 'resource.c', 'container.c', 'capability.c',
|
||||
'cinfo.c', 'debug.c', 'idle.c']
|
||||
|
||||
# Generate kernel cinfo structure for container definitions
|
||||
def generate_cinfo(target, source, env):
|
||||
generate_kernel_cinfo(target[0])
|
||||
return None
|
||||
|
||||
cinfo_generator = Builder(action = generate_cinfo)
|
||||
env.Append(BUILDERS = {'CINFO_GENERATOR' : cinfo_generator})
|
||||
env.CINFO_GENERATOR(KERNEL_CINFO_PATH, CONFIG_H)
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Depends(obj, KERNEL_CINFO_PATH)
|
||||
Return('obj')
|
||||
|
||||
@@ -16,15 +16,9 @@
|
||||
*/
|
||||
#define BOOTMEM_SIZE (SZ_4K * 4)
|
||||
SECTION(".init.bootmem") char bootmem[BOOTMEM_SIZE];
|
||||
struct address_space init_space;
|
||||
|
||||
static unsigned long cursor = (unsigned long)&bootmem;
|
||||
|
||||
unsigned long bootmem_free_pages(void)
|
||||
{
|
||||
return BOOTMEM_SIZE - (page_align_up(cursor) - (unsigned long)&bootmem);
|
||||
}
|
||||
|
||||
void *alloc_bootmem(int size, int alignment)
|
||||
{
|
||||
void *ptr;
|
||||
@@ -46,6 +40,9 @@ void *alloc_bootmem(int size, int alignment)
|
||||
/* Allocate from cursor */
|
||||
ptr = (void *)cursor;
|
||||
|
||||
/* Zero initialize */
|
||||
memset(ptr, 0, size);
|
||||
|
||||
/* Update cursor */
|
||||
cursor += size;
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <l4/api/cache.h>
|
||||
#include INC_GLUE(message.h)
|
||||
#include INC_GLUE(ipc.h)
|
||||
#include INC_PLAT(irq.h)
|
||||
|
||||
void capability_init(struct capability *cap)
|
||||
{
|
||||
@@ -26,32 +27,13 @@ void capability_init(struct capability *cap)
|
||||
link_init(&cap->list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Boot-time function to create capability without
|
||||
* capability checking
|
||||
*/
|
||||
struct capability *boot_capability_create(void)
|
||||
{
|
||||
struct capability *cap = boot_alloc_capability();
|
||||
|
||||
capability_init(cap);
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
struct capability *capability_create(void)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
if (!(cap = alloc_capability()))
|
||||
return 0;
|
||||
|
||||
capability_init(cap);
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CAPABILITIES)
|
||||
|
||||
/*
|
||||
* FIXME: These need locking. A child can quit without
|
||||
* pager's call.
|
||||
*/
|
||||
int capability_consume(struct capability *cap, int quantity)
|
||||
{
|
||||
if (cap->size < cap->used + quantity)
|
||||
@@ -107,16 +89,11 @@ struct capability *cap_list_find_by_rtype(struct cap_list *cap_list,
|
||||
* In conclusion freeing of pool-type capabilities need to be done
|
||||
* in order of privacy.
|
||||
*/
|
||||
struct capability *capability_find_by_rtype(struct ktcb *task,
|
||||
unsigned int rtype)
|
||||
struct capability *cap_find_by_rtype(struct ktcb *task,
|
||||
unsigned int rtype)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
/* Search task's own list */
|
||||
list_foreach_struct(cap, &task->cap_list.caps, list)
|
||||
if (cap_rtype(cap) == rtype)
|
||||
return cap;
|
||||
|
||||
/* Search space list */
|
||||
list_foreach_struct(cap, &task->space->cap_list.caps, list)
|
||||
if (cap_rtype(cap) == rtype)
|
||||
@@ -130,39 +107,9 @@ struct capability *capability_find_by_rtype(struct ktcb *task,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct capability *cap_find_by_capid(l4id_t capid, struct cap_list **cap_list)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct ktcb *task = current;
|
||||
|
||||
/* Search task's own list */
|
||||
list_foreach_struct(cap, &task->cap_list.caps, list)
|
||||
if (cap->capid == capid) {
|
||||
*cap_list = &task->cap_list;
|
||||
return cap;
|
||||
}
|
||||
|
||||
/* Search space list */
|
||||
list_foreach_struct(cap, &task->space->cap_list.caps, list)
|
||||
if (cap->capid == capid) {
|
||||
*cap_list = &task->space->cap_list;
|
||||
return cap;
|
||||
}
|
||||
|
||||
/* Search container list */
|
||||
list_foreach_struct(cap, &task->container->cap_list.caps, list)
|
||||
if (cap->capid == capid) {
|
||||
*cap_list = &task->container->cap_list;
|
||||
return cap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_count(struct ktcb *task)
|
||||
{
|
||||
return task->cap_list.ncaps +
|
||||
task->space->cap_list.ncaps +
|
||||
return task->space->cap_list.ncaps +
|
||||
task->container->cap_list.ncaps;
|
||||
}
|
||||
|
||||
@@ -174,16 +121,10 @@ typedef struct capability *(*cap_match_func_t) \
|
||||
* operation with a capability in a syscall-specific way.
|
||||
*/
|
||||
struct capability *cap_find(struct ktcb *task, cap_match_func_t cap_match_func,
|
||||
void *match_args, unsigned int cap_type)
|
||||
void *match_args, unsigned int cap_type)
|
||||
{
|
||||
struct capability *cap, *found;
|
||||
|
||||
/* Search task's own list */
|
||||
list_foreach_struct(cap, &task->cap_list.caps, list)
|
||||
if (cap_type(cap) == cap_type &&
|
||||
((found = cap_match_func(cap, match_args))))
|
||||
return found;
|
||||
|
||||
/* Search space list */
|
||||
list_foreach_struct(cap, &task->space->cap_list.caps, list)
|
||||
if (cap_type(cap) == cap_type &&
|
||||
@@ -199,129 +140,6 @@ struct capability *cap_find(struct ktcb *task, cap_match_func_t cap_match_func,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sys_mutex_args {
|
||||
unsigned long address;
|
||||
unsigned int op;
|
||||
};
|
||||
|
||||
/*
|
||||
* Check broadly the ability to do mutex ops. Check it by
|
||||
* the thread, space or container, (i.e. the group that can
|
||||
* do this operation broadly)
|
||||
*
|
||||
* Note, that we check mutex_address elsewhere as a quick,
|
||||
* per-task virt_to_phys translation that would not get
|
||||
* easily/quickly satisfied by a memory capability checking.
|
||||
*
|
||||
* While this is not %100 right from a capability checking
|
||||
* point-of-view, it is a shortcut that works and makes sense.
|
||||
*
|
||||
* For sake of completion, the right way to do it would be to
|
||||
* add MUTEX_LOCKABLE, MUTEX_UNLOCKABLE attributes to both
|
||||
* virtual and physical memory caps of a task, search those
|
||||
* to validate the address. But we would have to translate
|
||||
* from the page tables either ways.
|
||||
*/
|
||||
struct capability *
|
||||
cap_match_mutex(struct capability *cap, void *args)
|
||||
{
|
||||
/* Unconditionally expect these flags */
|
||||
unsigned int perms = CAP_UMUTEX_LOCK | CAP_UMUTEX_UNLOCK;
|
||||
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
|
||||
/* Now check the usual restype/resid pair */
|
||||
switch (cap_rtype(cap)) {
|
||||
case CAP_RTYPE_THREAD:
|
||||
if (current->tid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
case CAP_RTYPE_SPACE:
|
||||
if (current->space->spid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
case CAP_RTYPE_CONTAINER:
|
||||
if (current->container->cid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
BUG(); /* Unknown cap type is a bug */
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
struct sys_capctrl_args {
|
||||
unsigned int req;
|
||||
unsigned int flags;
|
||||
struct ktcb *task;
|
||||
};
|
||||
|
||||
struct capability *
|
||||
cap_match_capctrl(struct capability *cap, void *args_ptr)
|
||||
{
|
||||
struct sys_capctrl_args *args = args_ptr;
|
||||
unsigned int req = args->req;
|
||||
struct ktcb *target = args->task;
|
||||
|
||||
/* Check operation privileges */
|
||||
switch (req) {
|
||||
case CAP_CONTROL_NCAPS:
|
||||
case CAP_CONTROL_READ:
|
||||
if (!(cap->access & CAP_CAP_READ))
|
||||
return 0;
|
||||
break;
|
||||
case CAP_CONTROL_SHARE:
|
||||
if (!(cap->access & CAP_CAP_SHARE))
|
||||
return 0;
|
||||
break;
|
||||
case CAP_CONTROL_GRANT:
|
||||
if (!(cap->access & CAP_CAP_GRANT))
|
||||
return 0;
|
||||
break;
|
||||
case CAP_CONTROL_REPLICATE:
|
||||
if (!(cap->access & CAP_CAP_REPLICATE))
|
||||
return 0;
|
||||
break;
|
||||
case CAP_CONTROL_SPLIT:
|
||||
if (!(cap->access & CAP_CAP_SPLIT))
|
||||
return 0;
|
||||
break;
|
||||
case CAP_CONTROL_DEDUCE:
|
||||
if (!(cap->access & CAP_CAP_DEDUCE))
|
||||
return 0;
|
||||
break;
|
||||
case CAP_CONTROL_DESTROY:
|
||||
if (!(cap->access & CAP_CAP_DESTROY))
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
/* We refuse to accept anything else */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Now check the usual restype/resid pair */
|
||||
switch (cap_rtype(cap)) {
|
||||
case CAP_RTYPE_THREAD:
|
||||
if (target->tid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
case CAP_RTYPE_SPACE:
|
||||
if (target->space->spid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
case CAP_RTYPE_CONTAINER:
|
||||
if (target->container->cid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
BUG(); /* Unknown cap type is a bug */
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
struct sys_ipc_args {
|
||||
struct ktcb *task;
|
||||
unsigned int ipc_type;
|
||||
@@ -664,73 +482,12 @@ struct capability *cap_match_mem(struct capability *cap,
|
||||
}
|
||||
|
||||
struct sys_irqctrl_args {
|
||||
struct ktcb *registrant;
|
||||
struct ktcb *task;
|
||||
unsigned int req;
|
||||
unsigned int flags;
|
||||
l4id_t irq;
|
||||
};
|
||||
|
||||
/*
|
||||
* CAP_TYPE_MAP already matched upon entry.
|
||||
*
|
||||
* Match only device-specific details, e.g. irq registration
|
||||
* capability
|
||||
*/
|
||||
struct capability *cap_match_devmem(struct capability *cap,
|
||||
void *args_ptr)
|
||||
{
|
||||
struct sys_irqctrl_args *args = args_ptr;
|
||||
struct ktcb *target = args->registrant;
|
||||
unsigned int perms;
|
||||
|
||||
/* It must be a physmem type */
|
||||
if (cap_type(cap) != CAP_TYPE_MAP_PHYSMEM)
|
||||
return 0;
|
||||
|
||||
/* It must be a device */
|
||||
if (!cap_is_devmem(cap))
|
||||
return 0;
|
||||
|
||||
/* Irq numbers should match */
|
||||
if (cap->irq != args->irq)
|
||||
return 0;
|
||||
|
||||
/* Check permissions, we only check irq specific */
|
||||
switch (args->req) {
|
||||
case IRQ_CONTROL_REGISTER:
|
||||
perms = CAP_IRQCTRL_REGISTER;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
/* Anything else is an invalid/unrecognised argument */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that irq registration to target is covered
|
||||
* by the capability containment rules.
|
||||
*/
|
||||
switch (cap_rtype(cap)) {
|
||||
case CAP_RTYPE_THREAD:
|
||||
if (target->tid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
case CAP_RTYPE_SPACE:
|
||||
if (target->space->spid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
case CAP_RTYPE_CONTAINER:
|
||||
if (target->container->cid != cap->resid)
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
BUG(); /* Unknown cap type is a bug */
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
/*
|
||||
* CAP_TYPE_IRQCTRL already matched
|
||||
*/
|
||||
@@ -738,7 +495,7 @@ struct capability *cap_match_irqctrl(struct capability *cap,
|
||||
void *args_ptr)
|
||||
{
|
||||
struct sys_irqctrl_args *args = args_ptr;
|
||||
struct ktcb *target = args->registrant;
|
||||
struct ktcb *target = args->task;
|
||||
|
||||
/* Check operation privileges */
|
||||
switch (args->req) {
|
||||
@@ -755,6 +512,11 @@ struct capability *cap_match_irqctrl(struct capability *cap,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Irq number should match range */
|
||||
if (args->irq < cap->start &&
|
||||
args->irq > cap->end)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Target thread is the thread that is going to
|
||||
* handle the irqs. Check if capability matches
|
||||
@@ -828,35 +590,6 @@ struct capability *cap_match_cache(struct capability *cap, void *args_ptr)
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CAPABILITIES)
|
||||
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
|
||||
{
|
||||
struct sys_mutex_args args = {
|
||||
.address = mutex_address,
|
||||
.op = mutex_op,
|
||||
};
|
||||
|
||||
if (!(cap_find(current, cap_match_mutex,
|
||||
&args, CAP_TYPE_UMUTEX)))
|
||||
return -ENOCAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_cap_check(struct ktcb *task, unsigned int req, unsigned int flags)
|
||||
{
|
||||
struct sys_capctrl_args args = {
|
||||
.req = req,
|
||||
.flags = flags,
|
||||
.task = task,
|
||||
};
|
||||
|
||||
if (!(cap_find(current, cap_match_capctrl,
|
||||
&args, CAP_TYPE_CAP)))
|
||||
return -ENOCAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_map_check(struct ktcb *target, unsigned long phys, unsigned long virt,
|
||||
unsigned long npages, unsigned int flags)
|
||||
{
|
||||
@@ -967,11 +700,11 @@ int cap_thread_check(struct ktcb *task,
|
||||
}
|
||||
|
||||
|
||||
int cap_irq_check(struct ktcb *registrant, unsigned int req,
|
||||
int cap_irq_check(struct ktcb *task, unsigned int req,
|
||||
unsigned int flags, l4id_t irq)
|
||||
{
|
||||
struct sys_irqctrl_args args = {
|
||||
.registrant = registrant,
|
||||
.task = task,
|
||||
.req = req,
|
||||
.flags = flags,
|
||||
.irq = irq,
|
||||
@@ -981,15 +714,6 @@ int cap_irq_check(struct ktcb *registrant, unsigned int req,
|
||||
if (!(cap_find(current, cap_match_irqctrl,
|
||||
&args, CAP_TYPE_IRQCTRL)))
|
||||
return -ENOCAP;
|
||||
|
||||
/*
|
||||
* If it is an irq registration, find the device
|
||||
* capability and check that it allows irq registration.
|
||||
*/
|
||||
if (req == IRQ_CONTROL_REGISTER)
|
||||
if (!cap_find(current, cap_match_devmem,
|
||||
&args, CAP_TYPE_MAP_PHYSMEM))
|
||||
return -ENOCAP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1019,16 +743,6 @@ int cap_cache_check(unsigned long start, unsigned long end, unsigned int flags)
|
||||
}
|
||||
|
||||
#else /* Meaning !CONFIG_CAPABILITIES */
|
||||
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_cap_check(struct ktcb *task, unsigned int req, unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_ipc_check(l4id_t to, l4id_t from,
|
||||
unsigned int flags, unsigned int ipc_type)
|
||||
{
|
||||
|
||||
@@ -15,8 +15,11 @@
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_ARCH(linker.h)
|
||||
|
||||
int container_init(struct container *c)
|
||||
struct container *container_alloc_init()
|
||||
{
|
||||
/* Allocate container */
|
||||
struct container *c = alloc_bootmem(sizeof(*c), 0);
|
||||
|
||||
/* Allocate new container id */
|
||||
c->cid = id_new(&kernel_resources.container_ids);
|
||||
|
||||
@@ -34,15 +37,6 @@ int container_init(struct container *c)
|
||||
cap_list_init(&c->pager[i].cap_list);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct container *container_create(void)
|
||||
{
|
||||
struct container *c = alloc_container();
|
||||
|
||||
container_init(c);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
@@ -70,6 +64,63 @@ struct container *container_find(struct kernel_resources *kres, l4id_t cid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map pagers based on section flags
|
||||
*/
|
||||
void map_pager_sections(struct pager *pager, struct container *cont,
|
||||
struct ktcb *task)
|
||||
{
|
||||
unsigned long size_rx = 0;
|
||||
unsigned long size_rw = 0;
|
||||
|
||||
if ((size_rx = page_align_up(pager->rx_pheader_end) -
|
||||
pager->rx_pheader_start) >= PAGE_SIZE) {
|
||||
printk("%s: Mapping 0x%lx bytes as RX "
|
||||
"from 0x%lx physical to 0x%lx virtual for %s\n",
|
||||
__KERNELNAME__, size_rx,
|
||||
(pager->rx_pheader_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
pager->rx_pheader_start, cont->name);
|
||||
|
||||
add_mapping_space((pager->rx_pheader_start - pager->start_vma +
|
||||
pager->start_lma),
|
||||
pager->rx_pheader_start, size_rx,
|
||||
MAP_USR_RX, task->space);
|
||||
}
|
||||
|
||||
if ((size_rw = page_align_up(pager->rw_pheader_end) -
|
||||
pager->rw_pheader_start) >= PAGE_SIZE) {
|
||||
printk("%s: Mapping 0x%lx bytes as RW "
|
||||
"from 0x%lx physical to 0x%lx virtual for %s\n",
|
||||
__KERNELNAME__, size_rw,
|
||||
(pager->rw_pheader_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
pager->rw_pheader_start, cont->name);
|
||||
|
||||
add_mapping_space((pager->rw_pheader_start - pager->start_vma +
|
||||
pager->start_lma),
|
||||
pager->rw_pheader_start, size_rw,
|
||||
MAP_USR_RW, task->space);
|
||||
}
|
||||
|
||||
/*
|
||||
* If no RX, RW sections are there, map full image as RWX
|
||||
* TODO: This doesnot look like the best way.
|
||||
*/
|
||||
if (!size_rx && !size_rw) {
|
||||
printk("%s: Mapping 0x%lx bytes (%lu pages) "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, pager->memsize,
|
||||
__pfn(page_align_up(pager->memsize)),
|
||||
pager->start_lma, pager->start_vma, cont->name);
|
||||
|
||||
/* Map the task's space */
|
||||
add_mapping_space(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_RWX, task->space);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
*
|
||||
@@ -96,7 +147,7 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
* can be done to this pager. Note, that we're still on
|
||||
* idle task stack.
|
||||
*/
|
||||
cap_list_move(¤t->cap_list, &pager->cap_list);
|
||||
cap_list_move(¤t->space->cap_list, &pager->cap_list);
|
||||
|
||||
/* Setup dummy container pointer so that curcont works */
|
||||
current->container = cont;
|
||||
@@ -111,7 +162,7 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
task_init_registers(task, pager->start_address);
|
||||
|
||||
/* Initialize container/pager relationships */
|
||||
task->pagerid = task->tid;
|
||||
task->pager = task;
|
||||
task->tgid = task->tid;
|
||||
task->container = cont;
|
||||
|
||||
@@ -121,72 +172,11 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
/* Add the address space to container space list */
|
||||
address_space_add(task->space);
|
||||
|
||||
#if 0
|
||||
printk("%s: Mapping 0x%lx bytes (%lu pages) "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, pager->memsize,
|
||||
__pfn(page_align_up(pager->memsize)),
|
||||
pager->start_lma, pager->start_vma, cont->name);
|
||||
|
||||
/* Map the task's space */
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_RWX, TASK_PGD(task));
|
||||
#else
|
||||
/*
|
||||
* Map pager with appropriate section flags
|
||||
* We do page_align_down() to do a page alignment for
|
||||
* various kinds of sections, this automatically
|
||||
* takes care of the case where we have different kinds of
|
||||
* data lying on same page, eg: RX, RO etc.
|
||||
* Here one assumption made is, starting of first
|
||||
* RW section will be already page aligned, if this is
|
||||
* not true then we have to take special care of this.
|
||||
*/
|
||||
if(pager->rx_sections_end >= pager->rw_sections_start) {
|
||||
pager->rx_sections_end = page_align(pager->rx_sections_end);
|
||||
pager->rw_sections_start = page_align(pager->rw_sections_start);
|
||||
}
|
||||
|
||||
unsigned long size = 0;
|
||||
if((size = page_align_up(pager->rx_sections_end) -
|
||||
page_align_up(pager->rx_sections_start))) {
|
||||
add_mapping_pgd(page_align_up(pager->rx_sections_start -
|
||||
pager->start_vma +
|
||||
pager->start_lma),
|
||||
page_align_up(pager->rx_sections_start),
|
||||
size, MAP_USR_RX, TASK_PGD(task));
|
||||
|
||||
printk("%s: Mapping 0x%lx bytes as RX "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, size,
|
||||
page_align_up(pager->rx_sections_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
page_align_up(pager->rx_sections_start),
|
||||
cont->name);
|
||||
}
|
||||
|
||||
if((size = page_align_up(pager->rw_sections_end) -
|
||||
page_align_up(pager->rw_sections_start))) {
|
||||
add_mapping_pgd(page_align_up(pager->rw_sections_start -
|
||||
pager->start_vma +
|
||||
pager->start_lma),
|
||||
page_align_up(pager->rw_sections_start),
|
||||
size, MAP_USR_RW, TASK_PGD(task));
|
||||
|
||||
printk("%s: Mapping 0x%lx bytes as RW "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, size,
|
||||
page_align_up(pager->rw_sections_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
page_align_up(pager->rw_sections_start),
|
||||
cont->name);
|
||||
}
|
||||
|
||||
#endif
|
||||
/* Map various pager sections based on section flags */
|
||||
map_pager_sections(pager, cont, task);
|
||||
|
||||
/* Move capability list from dummy to task's space cap list */
|
||||
cap_list_move(&task->space->cap_list, ¤t->cap_list);
|
||||
cap_list_move(&task->space->cap_list, ¤t->space->cap_list);
|
||||
|
||||
/* Initialize task scheduler parameters */
|
||||
sched_init_task(task, TASK_PRIO_PAGER);
|
||||
|
||||
72
src/generic/idle.c
Normal file
72
src/generic/idle.c
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Idle task initialization and maintenance
|
||||
*
|
||||
* Copyright (C) 2010 B Labs Ltd.
|
||||
* Author: Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/api/capability.h>
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/smp.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/bootmem.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include INC_GLUE(mapping.h)
|
||||
|
||||
/*
|
||||
* Set up current stack's beginning, and initial page tables
|
||||
* as a valid task environment for idle task for current cpu
|
||||
*
|
||||
* Note, container pointer is initialized later when
|
||||
* containers are in shape.
|
||||
*/
|
||||
void setup_idle_task()
|
||||
{
|
||||
memset(current, 0, sizeof(struct ktcb));
|
||||
|
||||
current->space = &kernel_resources.init_space;
|
||||
TASK_PGD(current) = &init_pgd;
|
||||
|
||||
/* Initialize space caps list */
|
||||
cap_list_init(¤t->space->cap_list);
|
||||
|
||||
/* Init scheduler structs */
|
||||
sched_init_task(current, TASK_PRIO_NORMAL);
|
||||
|
||||
/* Set up never-to-be used fields as invalid for precaution */
|
||||
current->pager = 0;
|
||||
current->tgid = -1;
|
||||
|
||||
/*
|
||||
* Set pointer to global kernel page tables.
|
||||
* This is required early for early-stage pgd mappings
|
||||
*/
|
||||
#if defined(CONFIG_SUBARCH_V7)
|
||||
kernel_resources.pgd_global = &init_global_pgd;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void secondary_idle_task_init(void)
|
||||
{
|
||||
/* This also has its spid allocated by primary */
|
||||
current->space = &kernel_resources.init_space;
|
||||
TASK_PGD(current) = &init_pgd;
|
||||
|
||||
/* Need to assign a thread id */
|
||||
current->tid = id_new(&kernel_resources.ktcb_ids);
|
||||
|
||||
/* Set affinity */
|
||||
current->affinity = smp_get_cpuid();
|
||||
|
||||
/* Set up never-to-be used fields as invalid for precaution */
|
||||
current->pager = 0;
|
||||
current->tgid = -1;
|
||||
|
||||
/* Init scheduler structs */
|
||||
sched_init_task(current, TASK_PRIO_NORMAL);
|
||||
|
||||
sched_resume_async(current);
|
||||
}
|
||||
|
||||
@@ -16,20 +16,21 @@
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include <l4/api/errno.h>
|
||||
#include <l4/generic/idle.h>
|
||||
|
||||
struct kernel_resources kernel_resources;
|
||||
|
||||
pgd_table_t *alloc_pgd(void)
|
||||
pgd_table_t *pgd_alloc(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_resources.pgd_cache);
|
||||
}
|
||||
|
||||
pmd_table_t *alloc_pmd(void)
|
||||
pmd_table_t *pmd_cap_alloc(struct cap_list *clist)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
if (!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_MAPPOOL)))
|
||||
if (!(cap = cap_list_find_by_rtype(clist,
|
||||
CAP_RTYPE_MAPPOOL)))
|
||||
return 0;
|
||||
|
||||
if (capability_consume(cap, 1) < 0)
|
||||
@@ -38,12 +39,12 @@ pmd_table_t *alloc_pmd(void)
|
||||
return mem_cache_zalloc(kernel_resources.pmd_cache);
|
||||
}
|
||||
|
||||
struct address_space *alloc_space(void)
|
||||
struct address_space *space_cap_alloc(struct cap_list *clist)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
if (!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_SPACEPOOL)))
|
||||
if (!(cap = cap_list_find_by_rtype(clist,
|
||||
CAP_RTYPE_SPACEPOOL)))
|
||||
return 0;
|
||||
|
||||
if (capability_consume(cap, 1) < 0)
|
||||
@@ -52,20 +53,12 @@ struct address_space *alloc_space(void)
|
||||
return mem_cache_zalloc(kernel_resources.space_cache);
|
||||
}
|
||||
|
||||
struct ktcb *alloc_ktcb_use_capability(struct capability *cap)
|
||||
{
|
||||
if (capability_consume(cap, 1) < 0)
|
||||
return 0;
|
||||
|
||||
return mem_cache_zalloc(kernel_resources.ktcb_cache);
|
||||
}
|
||||
|
||||
struct ktcb *alloc_ktcb(void)
|
||||
struct ktcb *ktcb_cap_alloc(struct cap_list *clist)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
if (!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_THREADPOOL)))
|
||||
if (!(cap = cap_list_find_by_rtype(clist,
|
||||
CAP_RTYPE_THREADPOOL)))
|
||||
return 0;
|
||||
|
||||
if (capability_consume(cap, 1) < 0)
|
||||
@@ -74,42 +67,12 @@ struct ktcb *alloc_ktcb(void)
|
||||
return mem_cache_zalloc(kernel_resources.ktcb_cache);
|
||||
}
|
||||
|
||||
/*
|
||||
* This version is boot-time only and it has no
|
||||
* capability checking. Imagine the case where the
|
||||
* initial capabilities are created and there is no
|
||||
* capability to check this allocation.
|
||||
*/
|
||||
struct capability *boot_alloc_capability(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_resources.cap_cache);
|
||||
}
|
||||
|
||||
struct capability *alloc_capability(void)
|
||||
struct mutex_queue *mutex_cap_alloc()
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
if (!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_CAPPOOL)))
|
||||
return 0;
|
||||
|
||||
if (capability_consume(cap, 1) < 0)
|
||||
return 0;
|
||||
|
||||
return mem_cache_zalloc(kernel_resources.cap_cache);
|
||||
}
|
||||
|
||||
struct container *alloc_container(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_resources.cont_cache);
|
||||
}
|
||||
|
||||
struct mutex_queue *alloc_user_mutex(void)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
if (!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_MUTEXPOOL)))
|
||||
if (!(cap = cap_find_by_rtype(current,
|
||||
CAP_RTYPE_MUTEXPOOL)))
|
||||
return 0;
|
||||
|
||||
if (capability_consume(cap, 1) < 0)
|
||||
@@ -118,28 +81,28 @@ struct mutex_queue *alloc_user_mutex(void)
|
||||
return mem_cache_zalloc(kernel_resources.mutex_cache);
|
||||
}
|
||||
|
||||
void free_pgd(void *addr)
|
||||
void pgd_free(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_resources.pgd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_pmd(void *addr)
|
||||
void pmd_cap_free(void *addr, struct cap_list *clist)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
BUG_ON(!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_MAPPOOL)));
|
||||
BUG_ON(!(cap = cap_list_find_by_rtype(clist,
|
||||
CAP_RTYPE_MAPPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
BUG_ON(mem_cache_free(kernel_resources.pmd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_space(void *addr, struct ktcb *task)
|
||||
void space_cap_free(void *addr, struct cap_list *clist)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
BUG_ON(!(cap = capability_find_by_rtype(task,
|
||||
CAP_RTYPE_SPACEPOOL)));
|
||||
BUG_ON(!(cap = cap_list_find_by_rtype(clist,
|
||||
CAP_RTYPE_SPACEPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
BUG_ON(mem_cache_free(kernel_resources.space_cache, addr) < 0);
|
||||
@@ -150,40 +113,24 @@ void free_space(void *addr, struct ktcb *task)
|
||||
* Account it to pager, but if it doesn't exist,
|
||||
* to current idle task
|
||||
*/
|
||||
void free_ktcb(void *addr, struct ktcb *acc_task)
|
||||
void ktcb_cap_free(void *addr, struct cap_list *clist)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
/* Account it to task's pager if it exists */
|
||||
BUG_ON(!(cap = capability_find_by_rtype(acc_task,
|
||||
CAP_RTYPE_THREADPOOL)));
|
||||
BUG_ON(!(cap = cap_list_find_by_rtype(clist,
|
||||
CAP_RTYPE_THREADPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
BUG_ON(mem_cache_free(kernel_resources.ktcb_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_capability(void *addr)
|
||||
void mutex_cap_free(void *addr)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
BUG_ON(!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_CAPPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
BUG_ON(mem_cache_free(kernel_resources.cap_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_container(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_resources.cont_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_user_mutex(void *addr)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
BUG_ON(!(cap = capability_find_by_rtype(current,
|
||||
CAP_RTYPE_MUTEXPOOL)));
|
||||
BUG_ON(!(cap = cap_find_by_rtype(current,
|
||||
CAP_RTYPE_MUTEXPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
BUG_ON(mem_cache_free(kernel_resources.mutex_cache, addr) < 0);
|
||||
@@ -307,7 +254,7 @@ int memcap_unmap(struct cap_list *used_list,
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
ret = -EEXIST;
|
||||
return -EEXIST;
|
||||
|
||||
out_err:
|
||||
if (ret == -ENOMEM)
|
||||
@@ -321,232 +268,48 @@ out_err:
|
||||
"Virtual" : "Physical",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
else if (ret == -EEXIST)
|
||||
printk("%s: FATAL: %s memory capability range "
|
||||
"does not match with any available free range. "
|
||||
"start=0x%lx, end=0x%lx\n", __KERNELNAME__,
|
||||
cap_type(cap) == CAP_TYPE_MAP_VIRTMEM ?
|
||||
"Virtual" : "Physical",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Finds a device memory capability and deletes it from
|
||||
* the available device capabilities list
|
||||
* Copies cap_info structures to real capabilities in a list
|
||||
*/
|
||||
int memcap_request_device(struct cap_list *cap_list,
|
||||
struct cap_info *devcap)
|
||||
void copy_cap_info(struct cap_list *clist, struct cap_info *cap_info, int ncaps)
|
||||
{
|
||||
struct capability *cap, *n;
|
||||
struct capability *cap;
|
||||
struct cap_info *cinfo;
|
||||
|
||||
list_foreach_removable_struct(cap, n, &cap_list->caps, list) {
|
||||
if (cap->start == devcap->start &&
|
||||
cap->end == devcap->end &&
|
||||
cap_is_devmem(cap)) {
|
||||
/* Unlink only. This is boot memory */
|
||||
list_remove(&cap->list);
|
||||
return 0;
|
||||
}
|
||||
for (int i = 0; i < ncaps; i++) {
|
||||
cinfo = &cap_info[i];
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
|
||||
cap->resid = cinfo->target;
|
||||
cap->type = cinfo->type;
|
||||
cap->access = cinfo->access;
|
||||
cap->start = cinfo->start;
|
||||
cap->end = cinfo->end;
|
||||
cap->size = cinfo->size;
|
||||
|
||||
cap_list_insert(cap, clist);
|
||||
}
|
||||
printk("%s: FATAL: Device memory requested "
|
||||
"does not match any available device "
|
||||
"capabilities start=0x%lx, end=0x%lx "
|
||||
"attr=0x%x\n", __KERNELNAME__,
|
||||
__pfn_to_addr(devcap->start),
|
||||
__pfn_to_addr(devcap->end), devcap->attr);
|
||||
BUG();
|
||||
}
|
||||
/*
|
||||
* TODO: Evaluate if access bits are needed and add new cap ranges
|
||||
* only if their access bits match.
|
||||
*
|
||||
* Maps a memory range as a capability to a list of capabilities either by
|
||||
* merging the given range to an existing capability or creating a new one.
|
||||
*/
|
||||
int memcap_map(struct cap_list *cap_list,
|
||||
const unsigned long map_start,
|
||||
const unsigned long map_end)
|
||||
{
|
||||
struct capability *cap, *n;
|
||||
|
||||
list_foreach_removable_struct(cap, n, &cap_list->caps, list) {
|
||||
if (cap->start == map_end) {
|
||||
cap->start = map_start;
|
||||
return 0;
|
||||
} else if(cap->end == map_start) {
|
||||
cap->end = map_end;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* No capability could be extended, we create a new one */
|
||||
cap = alloc_capability();
|
||||
cap->start = map_start;
|
||||
cap->end = map_end;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, cap_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Delete all boot memory and add it to physical memory pool. */
|
||||
int free_boot_memory(struct kernel_resources *kres)
|
||||
{
|
||||
struct container *c;
|
||||
unsigned long pfn_start =
|
||||
__pfn(virt_to_phys(_start_init));
|
||||
unsigned long pfn_end =
|
||||
__pfn(page_align_up(virt_to_phys(_end_init)));
|
||||
unsigned long init_pfns = pfn_end - pfn_start;
|
||||
|
||||
/* Trim kernel used memory cap */
|
||||
memcap_unmap(0, &kres->physmem_used, pfn_start, pfn_end);
|
||||
|
||||
/* Add it to unused physical memory */
|
||||
memcap_map(&kres->physmem_free, pfn_start, pfn_end);
|
||||
|
||||
/* Remove the init memory from the page tables */
|
||||
for (unsigned long i = pfn_start; i < pfn_end; i++)
|
||||
remove_mapping(phys_to_virt(__pfn_to_addr(i)));
|
||||
|
||||
/* Reset pointers that will remain in system as precaution */
|
||||
list_foreach_struct(c, &kres->containers.list, list)
|
||||
c->pager = 0;
|
||||
|
||||
printk("%s: Freed %lu KB init memory, "
|
||||
"of which %lu KB was used.\n",
|
||||
__KERNELNAME__, init_pfns * 4,
|
||||
(init_pfns -
|
||||
__pfn(page_align_up(bootmem_free_pages()))) * 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes kernel caplists, and sets up total of physical
|
||||
* and virtual memory as single capabilities of the kernel.
|
||||
* They will then get split into caps of different lengths
|
||||
* during the traversal of container capabilities, and memcache
|
||||
* allocations.
|
||||
*/
|
||||
void init_kernel_resources(struct kernel_resources *kres)
|
||||
{
|
||||
struct capability *physmem, *virtmem, *kernel_area;
|
||||
|
||||
/* Initialize system id pools */
|
||||
kres->space_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->ktcb_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->resource_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->container_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->mutex_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->capability_ids.nwords = SYSTEM_IDS_MAX;
|
||||
|
||||
/* Initialize container head */
|
||||
container_head_init(&kres->containers);
|
||||
|
||||
/* Initialize kernel capability lists */
|
||||
cap_list_init(&kres->physmem_used);
|
||||
cap_list_init(&kres->physmem_free);
|
||||
cap_list_init(&kres->virtmem_used);
|
||||
cap_list_init(&kres->virtmem_free);
|
||||
cap_list_init(&kres->devmem_used);
|
||||
cap_list_init(&kres->devmem_free);
|
||||
cap_list_init(&kres->non_memory_caps);
|
||||
|
||||
/* Set up total physical memory as single capability */
|
||||
physmem = alloc_bootmem(sizeof(*physmem), 0);
|
||||
physmem->start = __pfn(PLATFORM_PHYS_MEM_START);
|
||||
physmem->end = __pfn(PLATFORM_PHYS_MEM_END);
|
||||
link_init(&physmem->list);
|
||||
cap_list_insert(physmem, &kres->physmem_free);
|
||||
|
||||
/* Set up total virtual memory as single capability */
|
||||
virtmem = alloc_bootmem(sizeof(*virtmem), 0);
|
||||
virtmem->start = __pfn(VIRT_MEM_START);
|
||||
virtmem->end = __pfn(VIRT_MEM_END);
|
||||
link_init(&virtmem->list);
|
||||
cap_list_insert(virtmem, &kres->virtmem_free);
|
||||
|
||||
/* Set up kernel used area as a single capability */
|
||||
kernel_area = alloc_bootmem(sizeof(*physmem), 0);
|
||||
kernel_area->start = __pfn(virt_to_phys(_start_kernel));
|
||||
kernel_area->end = __pfn(virt_to_phys(_end_kernel));
|
||||
link_init(&kernel_area->list);
|
||||
cap_list_insert(kernel_area, &kres->physmem_used);
|
||||
|
||||
/* Unmap kernel used area from free physical memory capabilities */
|
||||
memcap_unmap(0, &kres->physmem_free, kernel_area->start,
|
||||
kernel_area->end);
|
||||
|
||||
/* Set up platform-specific device capabilities */
|
||||
platform_setup_device_caps(kres);
|
||||
|
||||
/* TODO:
|
||||
* Add all virtual memory areas used by the kernel
|
||||
* e.g. kernel virtual area, syscall page, kip page,
|
||||
* vectors page, timer, sysctl and uart device pages
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Copies cinfo structures to real capabilities for each pager.
|
||||
*/
|
||||
int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct cap_info *cap_info;
|
||||
|
||||
pager->start_address = pinfo->start_address;
|
||||
pager->start_lma = __pfn_to_addr(pinfo->pager_lma);
|
||||
pager->start_vma = __pfn_to_addr(pinfo->pager_vma);
|
||||
pager->memsize = __pfn_to_addr(pinfo->pager_size);
|
||||
pager->rw_sections_start = pinfo->rw_sections_start;
|
||||
pager->rw_sections_end = pinfo->rw_sections_end;
|
||||
pager->rx_sections_start = pinfo->rx_sections_start;
|
||||
pager->rx_sections_end = pinfo->rx_sections_end;
|
||||
pager->rw_pheader_start = pinfo->rw_pheader_start;
|
||||
pager->rw_pheader_end = pinfo->rw_pheader_end;
|
||||
pager->rx_pheader_start = pinfo->rx_pheader_start;
|
||||
pager->rx_pheader_end = pinfo->rx_pheader_end;
|
||||
|
||||
/* Copy all cinfo structures into real capabilities */
|
||||
for (int i = 0; i < pinfo->ncaps; i++) {
|
||||
cap = boot_capability_create();
|
||||
|
||||
cap_info = &pinfo->caps[i];
|
||||
|
||||
cap->resid = cap_info->target;
|
||||
cap->type = cap_info->type;
|
||||
cap->access = cap_info->access;
|
||||
cap->start = cap_info->start;
|
||||
cap->end = cap_info->end;
|
||||
cap->size = cap_info->size;
|
||||
cap->attr = cap_info->attr;
|
||||
cap->irq = cap_info->irq;
|
||||
|
||||
cap_list_insert(cap, &pager->cap_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if pager has enough resources to create its caps:
|
||||
*
|
||||
* Find pager's capability capability, check its
|
||||
* current use count and initialize it
|
||||
*/
|
||||
cap = cap_list_find_by_rtype(&pager->cap_list,
|
||||
CAP_RTYPE_CAPPOOL);
|
||||
|
||||
/* Verify that we did not excess allocated */
|
||||
if (!cap || cap->size < pinfo->ncaps) {
|
||||
printk("FATAL: Pager needs more capabilities "
|
||||
"than allocated for initialization.\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize used count. The rest of the spending
|
||||
* checks on this cap will be done in the cap syscall
|
||||
*/
|
||||
cap->used = pinfo->ncaps;
|
||||
copy_cap_info(&pager->cap_list, pinfo->caps, pinfo->ncaps);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -560,144 +323,16 @@ int copy_container_info(struct container *c, struct container_info *cinfo)
|
||||
strncpy(c->name, cinfo->name, CONFIG_CONTAINER_NAMESIZE);
|
||||
c->npagers = cinfo->npagers;
|
||||
|
||||
/* Copy capabilities */
|
||||
/* Copy container capabilities */
|
||||
copy_cap_info(&c->cap_list, cinfo->caps, cinfo->ncaps);
|
||||
|
||||
/* Copy pager capabilities and boot info */
|
||||
for (int i = 0; i < c->npagers; i++)
|
||||
copy_pager_info(&c->pager[i], &cinfo->pager[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy boot-time allocated kernel capabilities to ones that
|
||||
* are allocated from the capability memcache
|
||||
*/
|
||||
void copy_boot_capabilities(struct cap_list *caplist)
|
||||
{
|
||||
struct capability *bootcap, *n, *realcap;
|
||||
|
||||
/* For every bootmem-allocated capability */
|
||||
list_foreach_removable_struct(bootcap, n,
|
||||
&caplist->caps,
|
||||
list) {
|
||||
/* Create new one from capability cache */
|
||||
realcap = capability_create();
|
||||
|
||||
/* Copy all fields except id to real */
|
||||
realcap->owner = bootcap->owner;
|
||||
realcap->resid = bootcap->resid;
|
||||
realcap->type = bootcap->type;
|
||||
realcap->access = bootcap->access;
|
||||
realcap->start = bootcap->start;
|
||||
realcap->end = bootcap->end;
|
||||
realcap->size = bootcap->size;
|
||||
realcap->attr = bootcap->attr;
|
||||
realcap->irq = bootcap->irq;
|
||||
|
||||
/* Unlink boot one */
|
||||
list_remove(&bootcap->list);
|
||||
|
||||
/* Add real one to head */
|
||||
list_insert(&realcap->list,
|
||||
&caplist->caps);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates capabilities allocated with a real id, and from the
|
||||
* capability cache, in place of ones allocated at boot-time.
|
||||
*/
|
||||
void setup_kernel_resources(struct boot_resources *bootres,
|
||||
struct kernel_resources *kres)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct container *container;
|
||||
//pgd_table_t *current_pgd;
|
||||
|
||||
/* First initialize the list of non-memory capabilities */
|
||||
cap = boot_capability_create();
|
||||
cap->type = CAP_TYPE_QUANTITY | CAP_RTYPE_MAPPOOL;
|
||||
cap->size = bootres->nkpmds;
|
||||
cap->owner = kres->cid;
|
||||
cap_list_insert(cap, &kres->non_memory_caps);
|
||||
|
||||
cap = boot_capability_create();
|
||||
cap->type = CAP_TYPE_QUANTITY | CAP_RTYPE_SPACEPOOL;
|
||||
cap->size = bootres->nkpgds;
|
||||
cap->owner = kres->cid;
|
||||
cap_list_insert(cap, &kres->non_memory_caps);
|
||||
|
||||
cap = boot_capability_create();
|
||||
cap->type = CAP_TYPE_QUANTITY | CAP_RTYPE_CAPPOOL;
|
||||
cap->size = bootres->nkcaps;
|
||||
cap->owner = kres->cid;
|
||||
cap->used = 3;
|
||||
cap_list_insert(cap, &kres->non_memory_caps);
|
||||
|
||||
/* Set up dummy current cap-list for below functions to use */
|
||||
cap_list_move(¤t->cap_list, &kres->non_memory_caps);
|
||||
|
||||
copy_boot_capabilities(&kres->physmem_used);
|
||||
copy_boot_capabilities(&kres->physmem_free);
|
||||
copy_boot_capabilities(&kres->virtmem_used);
|
||||
copy_boot_capabilities(&kres->virtmem_free);
|
||||
copy_boot_capabilities(&kres->devmem_used);
|
||||
copy_boot_capabilities(&kres->devmem_free);
|
||||
|
||||
/*
|
||||
* Move to real page tables, accounted by
|
||||
* pgds and pmds provided from the caches
|
||||
*
|
||||
* We do not want to delay this too much,
|
||||
* since we want to avoid allocating an uncertain
|
||||
* amount of memory from the boot allocators.
|
||||
*/
|
||||
// current_pgd = arch_realloc_page_tables();
|
||||
|
||||
/* Move it back */
|
||||
cap_list_move(&kres->non_memory_caps, ¤t->cap_list);
|
||||
|
||||
|
||||
/*
|
||||
* Setting up ids used internally.
|
||||
*
|
||||
* See how many containers we have. Assign next
|
||||
* unused container id for kernel resources
|
||||
*/
|
||||
kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
|
||||
// kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
|
||||
|
||||
/*
|
||||
* Assign thread and space ids to current which will later
|
||||
* become the idle task
|
||||
*/
|
||||
current->tid = id_new(&kres->ktcb_ids);
|
||||
current->space->spid = id_new(&kres->space_ids);
|
||||
|
||||
/*
|
||||
* Init per-cpu zombie lists
|
||||
*/
|
||||
for (int i = 0; i < CONFIG_NCPU; i++)
|
||||
init_ktcb_list(&per_cpu_byid(kres->zombie_list, i));
|
||||
|
||||
/*
|
||||
* Create real containers from compile-time created
|
||||
* cinfo structures
|
||||
*/
|
||||
for (int i = 0; i < bootres->nconts; i++) {
|
||||
/* Allocate & init container */
|
||||
container = container_create();
|
||||
|
||||
/* Fill in its information */
|
||||
copy_container_info(container, &cinfo[i]);
|
||||
|
||||
/* Add it to kernel resources list */
|
||||
kres_insert_container(container, kres);
|
||||
}
|
||||
|
||||
/* Initialize pagers */
|
||||
container_init_pagers(kres);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a structure size and numbers, it initializes a memory cache
|
||||
* using free memory available from free kernel memory capabilities.
|
||||
@@ -732,9 +367,9 @@ struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
page_align_up(bufsize),
|
||||
MAP_KERN_RW);
|
||||
} else {
|
||||
add_mapping_pgd(__pfn_to_addr(cap->start),
|
||||
virtual, page_align_up(bufsize),
|
||||
MAP_KERN_RW, &init_pgd);
|
||||
add_mapping_space(__pfn_to_addr(cap->start),
|
||||
virtual, page_align_up(bufsize),
|
||||
MAP_KERN_RW, current->space);
|
||||
}
|
||||
/* Unmap area from memcap */
|
||||
memcap_unmap_range(cap, &kres->physmem_free,
|
||||
@@ -761,19 +396,12 @@ struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
void init_resource_allocators(struct boot_resources *bootres,
|
||||
struct kernel_resources *kres)
|
||||
{
|
||||
/*
|
||||
* An extra space reserved for kernel
|
||||
* in case all containers quit
|
||||
*/
|
||||
bootres->nspaces++;
|
||||
bootres->nkpgds++;
|
||||
|
||||
/* Initialise PGD cache */
|
||||
kres->pgd_cache =
|
||||
init_resource_cache(bootres->nspaces,
|
||||
PGD_SIZE, kres, 1);
|
||||
|
||||
/* Initialise struct address_space cache */
|
||||
/* Initialise address space cache */
|
||||
kres->space_cache =
|
||||
init_resource_cache(bootres->nspaces,
|
||||
sizeof(struct address_space),
|
||||
@@ -789,48 +417,6 @@ void init_resource_allocators(struct boot_resources *bootres,
|
||||
init_resource_cache(bootres->nmutex,
|
||||
sizeof(struct mutex_queue),
|
||||
kres, 0);
|
||||
/* Initialise container cache */
|
||||
kres->cont_cache =
|
||||
init_resource_cache(bootres->nconts,
|
||||
sizeof(struct container),
|
||||
kres, 0);
|
||||
|
||||
/*
|
||||
* Add all caps used by the kernel
|
||||
* Two extra in case more memcaps get split after
|
||||
* cap cache init below. Three extra for quantitative
|
||||
* kernel caps for pmds, pgds, caps.
|
||||
*/
|
||||
bootres->nkcaps += kres->virtmem_used.ncaps +
|
||||
kres->virtmem_free.ncaps +
|
||||
kres->physmem_used.ncaps +
|
||||
kres->physmem_free.ncaps +
|
||||
kres->devmem_free.ncaps +
|
||||
kres->devmem_used.ncaps + 2 + 3;
|
||||
|
||||
/* Add that to all cap count */
|
||||
bootres->ncaps += bootres->nkcaps;
|
||||
|
||||
/* Initialise capability cache */
|
||||
kres->cap_cache =
|
||||
init_resource_cache(bootres->ncaps,
|
||||
sizeof(struct capability),
|
||||
kres, 0);
|
||||
|
||||
/* Count boot pmds used so far and add them */
|
||||
bootres->nkpmds += pgd_count_boot_pmds();
|
||||
|
||||
/*
|
||||
* Calculate maximum possible pmds that may be used
|
||||
* during this pmd cache initialization and add them.
|
||||
*/
|
||||
bootres->nkpmds += ((bootres->npmds * PMD_SIZE) / PMD_MAP_SIZE);
|
||||
if (!is_aligned(bootres->npmds * PMD_SIZE,
|
||||
PMD_MAP_SIZE))
|
||||
bootres->nkpmds++;
|
||||
|
||||
/* Add kernel pmds to all pmd count */
|
||||
bootres->npmds += bootres->nkpmds;
|
||||
|
||||
/* Initialise PMD cache */
|
||||
kres->pmd_cache =
|
||||
@@ -867,11 +453,6 @@ int process_cap_info(struct cap_info *cap,
|
||||
/* Speficies how many pmds can be mapped */
|
||||
bootres->npmds += cap->size;
|
||||
break;
|
||||
|
||||
case CAP_RTYPE_CAPPOOL:
|
||||
/* Specifies how many new caps can be created */
|
||||
bootres->ncaps += cap->size;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cap_type(cap) == CAP_TYPE_MAP_VIRTMEM) {
|
||||
@@ -879,50 +460,57 @@ int process_cap_info(struct cap_info *cap,
|
||||
&kres->virtmem_free,
|
||||
cap->start, cap->end);
|
||||
} else if (cap_type(cap) == CAP_TYPE_MAP_PHYSMEM) {
|
||||
if (!cap_is_devmem(cap))
|
||||
memcap_unmap(&kres->physmem_used,
|
||||
&kres->physmem_free,
|
||||
cap->start, cap->end);
|
||||
else /* Delete device from free list */
|
||||
memcap_request_device(&kres->devmem_free, cap);
|
||||
/* Try physical ram ranges */
|
||||
if ((ret = memcap_unmap(&kres->physmem_used,
|
||||
&kres->physmem_free,
|
||||
cap->start, cap->end))
|
||||
== -EEXIST) {
|
||||
/* Try physical device ranges */
|
||||
if ((ret = memcap_unmap(&kres->devmem_used,
|
||||
&kres->devmem_free,
|
||||
cap->start, cap->end))
|
||||
== -EEXIST) {
|
||||
/* Neither is a match */
|
||||
printk("%s: FATAL: Physical memory "
|
||||
"capability range does not match "
|
||||
"with any available physmem or devmem "
|
||||
"free range. start=0x%lx, end=0x%lx\n",
|
||||
__KERNELNAME__,
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes the kernel resources by describing both virtual
|
||||
* and physical memory. Then traverses cap_info structures
|
||||
* to figure out resource requirements of containers.
|
||||
* Initialize kernel managed physical memory capabilities
|
||||
* based on the multiple physical memory and device regions
|
||||
* defined by the specific platform.
|
||||
*/
|
||||
int setup_boot_resources(struct boot_resources *bootres,
|
||||
struct kernel_resources *kres)
|
||||
void kernel_setup_physmem(struct kernel_resources *kres,
|
||||
struct platform_mem_regions *mem_regions)
|
||||
{
|
||||
struct cap_info *cap;
|
||||
struct capability *physmem;
|
||||
|
||||
init_kernel_resources(kres);
|
||||
for (int i = 0; i < mem_regions->nregions; i++) {
|
||||
/* Allocate new physical memory capability */
|
||||
physmem = alloc_bootmem(sizeof(*physmem), 0);
|
||||
|
||||
/* Number of containers known at compile-time */
|
||||
bootres->nconts = CONFIG_CONTAINERS;
|
||||
/* Assign its range from platform range at this index */
|
||||
physmem->start = __pfn(mem_regions->mem_range[i].start);
|
||||
physmem->end = __pfn(mem_regions->mem_range[i].end);
|
||||
|
||||
/* Traverse all containers */
|
||||
for (int i = 0; i < bootres->nconts; i++) {
|
||||
/* Traverse all pagers */
|
||||
for (int j = 0; j < cinfo[i].npagers; j++) {
|
||||
int ncaps = cinfo[i].pager[j].ncaps;
|
||||
|
||||
/* Count all capabilities */
|
||||
bootres->ncaps += ncaps;
|
||||
|
||||
/* Count all resources */
|
||||
for (int k = 0; k < ncaps; k++) {
|
||||
cap = &cinfo[i].pager[j].caps[k];
|
||||
process_cap_info(cap, bootres, kres);
|
||||
}
|
||||
}
|
||||
/* Init and insert the capability to keep permanently */
|
||||
link_init(&physmem->list);
|
||||
if (mem_regions->mem_range[i].type == MEM_TYPE_RAM)
|
||||
cap_list_insert(physmem, &kres->physmem_free);
|
||||
else if (mem_regions->mem_range[i].type == MEM_TYPE_DEV)
|
||||
cap_list_insert(physmem, &kres->devmem_free);
|
||||
else BUG();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -933,15 +521,145 @@ int setup_boot_resources(struct boot_resources *bootres,
|
||||
*/
|
||||
int init_system_resources(struct kernel_resources *kres)
|
||||
{
|
||||
struct cap_info *cap_info;
|
||||
// struct capability *cap;
|
||||
struct container *container;
|
||||
struct capability *virtmem, *kernel_area;
|
||||
struct boot_resources bootres;
|
||||
|
||||
/* Initialize system id pools */
|
||||
kres->space_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->ktcb_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->resource_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->container_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->mutex_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kres->capability_ids.nwords = SYSTEM_IDS_MAX;
|
||||
|
||||
/* Initialize container head */
|
||||
container_head_init(&kres->containers);
|
||||
|
||||
/* Initialize kernel capability lists */
|
||||
cap_list_init(&kres->physmem_used);
|
||||
cap_list_init(&kres->physmem_free);
|
||||
cap_list_init(&kres->devmem_used);
|
||||
cap_list_init(&kres->devmem_free);
|
||||
cap_list_init(&kres->virtmem_used);
|
||||
cap_list_init(&kres->virtmem_free);
|
||||
|
||||
/* Initialize kernel address space */
|
||||
link_init(&kres->init_space.list);
|
||||
cap_list_init(&kres->init_space.cap_list);
|
||||
spin_lock_init(&kres->init_space.lock);
|
||||
|
||||
// Shall we have this?
|
||||
// space->spid = id_new(&kernel_resources.space_ids);
|
||||
|
||||
/*
|
||||
* Set up total physical memory capabilities as many
|
||||
* as the platform-defined physical memory regions
|
||||
*/
|
||||
kernel_setup_physmem(kres, &platform_mem_regions);
|
||||
|
||||
/* Set up total virtual memory as single capability */
|
||||
virtmem = alloc_bootmem(sizeof(*virtmem), 0);
|
||||
virtmem->start = __pfn(VIRT_MEM_START);
|
||||
virtmem->end = __pfn(VIRT_MEM_END);
|
||||
link_init(&virtmem->list);
|
||||
cap_list_insert(virtmem, &kres->virtmem_free);
|
||||
|
||||
/* Set up kernel used area as a single capability */
|
||||
kernel_area = alloc_bootmem(sizeof(*kernel_area), 0);
|
||||
kernel_area->start = __pfn(virt_to_phys(_start_kernel));
|
||||
kernel_area->end = __pfn(virt_to_phys(_end_kernel));
|
||||
link_init(&kernel_area->list);
|
||||
cap_list_insert(kernel_area, &kres->physmem_used);
|
||||
|
||||
/* Unmap kernel used area from free physical memory capabilities */
|
||||
memcap_unmap(0, &kres->physmem_free, kernel_area->start,
|
||||
kernel_area->end);
|
||||
|
||||
/* TODO:
|
||||
* Add all virtual memory areas used by the kernel
|
||||
* e.g. kernel virtual area, syscall page, kip page,
|
||||
*/
|
||||
|
||||
memset(&bootres, 0, sizeof(bootres));
|
||||
|
||||
setup_boot_resources(&bootres, kres);
|
||||
/* Number of containers known at compile-time */
|
||||
bootres.nconts = CONFIG_CONTAINERS;
|
||||
|
||||
/* Traverse all containers */
|
||||
for (int i = 0; i < bootres.nconts; i++) {
|
||||
|
||||
/* Process container-wide capabilities */
|
||||
bootres.ncaps += cinfo[i].ncaps;
|
||||
for (int g = 0; g < cinfo[i].ncaps; g++) {
|
||||
cap_info = &cinfo[i].caps[g];
|
||||
process_cap_info(cap_info, &bootres, kres);
|
||||
}
|
||||
|
||||
/* Traverse all pagers */
|
||||
for (int j = 0; j < cinfo[i].npagers; j++) {
|
||||
int ncaps = cinfo[i].pager[j].ncaps;
|
||||
|
||||
/* Count all capabilities */
|
||||
bootres.ncaps += ncaps;
|
||||
|
||||
/*
|
||||
* Count all resources,
|
||||
*
|
||||
* Remove all container memory resources
|
||||
* from global.
|
||||
*/
|
||||
for (int k = 0; k < ncaps; k++) {
|
||||
cap_info = &cinfo[i].pager[j].caps[k];
|
||||
process_cap_info(cap_info, &bootres, kres);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
init_resource_allocators(&bootres, kres);
|
||||
|
||||
setup_kernel_resources(&bootres, kres);
|
||||
/*
|
||||
* Setting up ids used internally.
|
||||
*
|
||||
* See how many containers we have. Assign next
|
||||
* unused container id for kernel resources
|
||||
*/
|
||||
kres->cid = id_get(&kres->container_ids, bootres.nconts + 1);
|
||||
// kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
|
||||
|
||||
/*
|
||||
* Assign thread and space ids to current which will later
|
||||
* become the idle task
|
||||
* TODO: Move this to idle task initialization?
|
||||
*/
|
||||
current->tid = id_new(&kres->ktcb_ids);
|
||||
current->space->spid = id_new(&kres->space_ids);
|
||||
|
||||
/*
|
||||
* Init per-cpu zombie lists
|
||||
*/
|
||||
for (int i = 0; i < CONFIG_NCPU; i++)
|
||||
init_ktcb_list(&per_cpu_byid(kres->zombie_list, i));
|
||||
|
||||
/*
|
||||
* Create real containers from compile-time created
|
||||
* cinfo structures
|
||||
*/
|
||||
for (int i = 0; i < bootres.nconts; i++) {
|
||||
/* Allocate & init container */
|
||||
container = container_alloc_init();
|
||||
|
||||
/* Fill in its information */
|
||||
copy_container_info(container, &cinfo[i]);
|
||||
|
||||
/* Add it to kernel resources list */
|
||||
kres_insert_container(container, kres);
|
||||
}
|
||||
|
||||
/* Initialize pagers */
|
||||
container_init_pagers(kres);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(irq.h)
|
||||
|
||||
#define is_idle_task(task) (task == per_cpu(scheduler).idle_task)
|
||||
|
||||
DECLARE_PERCPU(struct scheduler, scheduler);
|
||||
|
||||
/* This is incremented on each irq or voluntarily by preempt_disable() */
|
||||
@@ -119,8 +121,6 @@ void sched_init()
|
||||
|
||||
sched->rq_runnable = &sched->sched_rq[0];
|
||||
sched->rq_expired = &sched->sched_rq[1];
|
||||
sched->rq_rt_runnable = &sched->sched_rq[2];
|
||||
sched->rq_rt_expired = &sched->sched_rq[3];
|
||||
sched->prio_total = TASK_PRIO_TOTAL;
|
||||
sched->idle_task = current;
|
||||
}
|
||||
@@ -138,18 +138,6 @@ static void sched_rq_swap_queues(void)
|
||||
per_cpu(scheduler).rq_expired = temp;
|
||||
}
|
||||
|
||||
static void sched_rq_swap_rtqueues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
BUG_ON(list_empty(&per_cpu(scheduler).rq_rt_expired->task_list));
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = per_cpu(scheduler).rq_rt_runnable;
|
||||
per_cpu(scheduler).rq_rt_runnable = per_cpu(scheduler).rq_rt_expired;
|
||||
per_cpu(scheduler).rq_rt_expired = temp;
|
||||
}
|
||||
|
||||
/* Set policy on where to add tasks in the runqueue */
|
||||
#define RQ_ADD_BEHIND 0
|
||||
#define RQ_ADD_FRONT 1
|
||||
@@ -199,29 +187,6 @@ static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
sched_unlock_runqueues(sched, irqflags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_run_task(struct ktcb *task, struct scheduler *sched)
|
||||
{
|
||||
if (task->flags & TASK_REALTIME)
|
||||
sched_rq_add_task(task, sched->rq_rt_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(task, sched->rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_expire_task(struct ktcb *task, struct scheduler *sched)
|
||||
{
|
||||
|
||||
if (task->flags & TASK_REALTIME)
|
||||
sched_rq_add_task(current, sched->rq_rt_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(current, sched->rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
void sched_init_task(struct ktcb *task, int prio)
|
||||
{
|
||||
link_init(&task->rq_list);
|
||||
@@ -237,7 +202,9 @@ void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
||||
sched_rq_add_task(task, per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
1);
|
||||
schedule();
|
||||
}
|
||||
|
||||
@@ -250,7 +217,9 @@ void sched_resume_sync(struct ktcb *task)
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
||||
sched_rq_add_task(task, per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -279,7 +248,7 @@ void sched_suspend_sync(void)
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
|
||||
if (current->pagerid != current->tid)
|
||||
if (current->pager != current)
|
||||
wake_up(¤t->wqh_pager, 0);
|
||||
preempt_enable();
|
||||
|
||||
@@ -293,7 +262,7 @@ void sched_suspend_async(void)
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
|
||||
if (current->pagerid != current->tid)
|
||||
if (current->pager != current)
|
||||
wake_up(¤t->wqh_pager, 0);
|
||||
preempt_enable();
|
||||
|
||||
@@ -341,18 +310,6 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
|
||||
CONFIG_SCHED_TICKS * task->priority / prio_total;
|
||||
}
|
||||
|
||||
/*
|
||||
* Select a real-time task 1/8th of any one selection
|
||||
*/
|
||||
static inline int sched_select_rt(struct scheduler *sched)
|
||||
{
|
||||
int ctr = sched->task_select_ctr++ & 0xF;
|
||||
|
||||
if (ctr == 0 || ctr == 8 || ctr == 15)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Selection happens as follows:
|
||||
@@ -370,28 +327,13 @@ static inline int sched_select_rt(struct scheduler *sched)
|
||||
struct ktcb *sched_select_next(void)
|
||||
{
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
int realtime = sched_select_rt(sched);
|
||||
struct ktcb *next = 0;
|
||||
struct ktcb *next = NULL;
|
||||
|
||||
for (;;) {
|
||||
|
||||
/* Decision to run an RT task? */
|
||||
if (realtime && sched->rq_rt_runnable->total > 0) {
|
||||
/* Get a real-time task, if available */
|
||||
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (realtime && sched->rq_rt_expired->total > 0) {
|
||||
/* Swap real-time queues */
|
||||
sched_rq_swap_rtqueues();
|
||||
/* Get a real-time task */
|
||||
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
/* Idle flagged for run? */
|
||||
} else if (sched->flags & SCHED_RUN_IDLE) {
|
||||
/* Clear idle flag */
|
||||
sched->flags &= ~SCHED_RUN_IDLE;
|
||||
if (sched->flags & SCHED_RUN_IDLE) {
|
||||
/* Select and add to runqueue */
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else if (sched->rq_runnable->total > 0) {
|
||||
@@ -421,6 +363,7 @@ struct ktcb *sched_select_next(void)
|
||||
return next;
|
||||
}
|
||||
|
||||
|
||||
/* Prepare next runnable task right before switching to it */
|
||||
void sched_prepare_next(struct ktcb *next)
|
||||
{
|
||||
@@ -439,6 +382,10 @@ void sched_prepare_next(struct ktcb *next)
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
/* Idle task needs adding to queue so its schedulable */
|
||||
if (is_idle_task(next))
|
||||
sched_rq_add_task(next, per_cpu(scheduler).rq_runnable, 1);
|
||||
|
||||
/* Reinitialise task's schedule granularity boundary */
|
||||
next->sched_granule = SCHED_GRANULARITY;
|
||||
}
|
||||
@@ -491,13 +438,20 @@ void schedule()
|
||||
/* Reset schedule flag */
|
||||
need_resched = 0;
|
||||
|
||||
/* Remove from runnable and put into appropriate runqueue */
|
||||
/* Remove runnable task from queue */
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_run_task(current, &per_cpu(scheduler));
|
||||
else
|
||||
sched_expire_task(current, &per_cpu(scheduler));
|
||||
/* Non-idle tasks go back to a runqueue */
|
||||
if (!is_idle_task(current)) {
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current,
|
||||
per_cpu(scheduler).rq_runnable,
|
||||
0);
|
||||
else
|
||||
sched_rq_add_task(current,
|
||||
per_cpu(scheduler).rq_expired,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -21,7 +21,7 @@ void init_address_space_list(struct address_space_list *space_list)
|
||||
memset(space_list, 0, sizeof(*space_list));
|
||||
|
||||
link_init(&space_list->list);
|
||||
mutex_init(&space_list->lock);
|
||||
spin_lock_init(&space_list->lock);
|
||||
}
|
||||
|
||||
void address_space_attach(struct ktcb *tcb, struct address_space *space)
|
||||
@@ -54,20 +54,22 @@ void address_space_remove(struct address_space *space, struct container *cont)
|
||||
list_remove_init(&space->list);
|
||||
}
|
||||
|
||||
|
||||
/* Assumes address space reflock is already held */
|
||||
void address_space_delete(struct address_space *space,
|
||||
struct ktcb *task_accounted)
|
||||
void address_space_delete(struct address_space *space, struct cap_list *clist)
|
||||
{
|
||||
BUG_ON(space->ktcb_refs);
|
||||
BUG_ON(!list_empty(&space->cap_list.caps));
|
||||
BUG_ON(space->cap_list.ncaps);
|
||||
|
||||
/* Traverse the page tables and delete private pmds */
|
||||
delete_page_tables(space);
|
||||
delete_page_tables(space, clist);
|
||||
|
||||
/* Return the space id */
|
||||
id_del(&kernel_resources.space_ids, space->spid);
|
||||
|
||||
/* Deallocate the space structure */
|
||||
free_space(space, task_accounted);
|
||||
space_cap_free(space, clist);
|
||||
}
|
||||
|
||||
struct address_space *address_space_create(struct address_space *orig)
|
||||
@@ -77,19 +79,19 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
int err;
|
||||
|
||||
/* Allocate space structure */
|
||||
if (!(space = alloc_space()))
|
||||
if (!(space = space_cap_alloc(¤t->space->cap_list)))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
/* Allocate pgd */
|
||||
if (!(pgd = alloc_pgd())) {
|
||||
free_space(space, current);
|
||||
if (!(pgd = pgd_alloc())) {
|
||||
space_cap_free(space, ¤t->space->cap_list);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Initialize space structure */
|
||||
link_init(&space->list);
|
||||
cap_list_init(&space->cap_list);
|
||||
mutex_init(&space->lock);
|
||||
spin_lock_init(&space->lock);
|
||||
space->pgd = pgd;
|
||||
|
||||
/* Copy all kernel entries */
|
||||
@@ -106,8 +108,8 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
if (orig) {
|
||||
/* Copy its user entries/tables */
|
||||
if ((err = copy_user_tables(space, orig)) < 0) {
|
||||
free_pgd(pgd);
|
||||
free_space(space, current);
|
||||
pgd_free(pgd);
|
||||
space_cap_free(space, ¤t->space->cap_list);
|
||||
return PTR_ERR(err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,8 +34,6 @@ void tcb_init(struct ktcb *new)
|
||||
|
||||
spin_lock_init(&new->thread_lock);
|
||||
|
||||
cap_list_init(&new->cap_list);
|
||||
|
||||
/* Initialise task's scheduling state and parameters. */
|
||||
sched_init_task(new, TASK_PRIO_NORMAL);
|
||||
|
||||
@@ -51,7 +49,7 @@ struct ktcb *tcb_alloc_init(l4id_t cid)
|
||||
struct ktcb *tcb;
|
||||
struct task_ids ids;
|
||||
|
||||
if (!(tcb = alloc_ktcb()))
|
||||
if (!(tcb = ktcb_cap_alloc(¤t->space->cap_list)))
|
||||
return 0;
|
||||
|
||||
ids.tid = id_new(&kernel_resources.ktcb_ids);
|
||||
@@ -66,9 +64,14 @@ struct ktcb *tcb_alloc_init(l4id_t cid)
|
||||
return tcb;
|
||||
}
|
||||
|
||||
void tcb_delete(struct ktcb *tcb)
|
||||
/*
|
||||
* Deletes tcb but moves capability list to struct pager
|
||||
* Also the last child wakes up the pager which is absent here.
|
||||
*/
|
||||
void tcb_delete_pager(struct ktcb *tcb)
|
||||
{
|
||||
struct ktcb *pager, *acc_task;
|
||||
struct cap_list *pager_cap_list =
|
||||
&tcb->container->pager->cap_list;
|
||||
|
||||
/* Sanity checks first */
|
||||
BUG_ON(!is_page_aligned(tcb));
|
||||
@@ -83,15 +86,7 @@ void tcb_delete(struct ktcb *tcb)
|
||||
BUG_ON(tcb->nlocks);
|
||||
BUG_ON(tcb->waiting_on);
|
||||
BUG_ON(tcb->wq);
|
||||
|
||||
/* Remove from zombie list */
|
||||
list_remove(&tcb->task_list);
|
||||
|
||||
/* Determine task to account deletions */
|
||||
if (!(pager = tcb_find(tcb->pagerid)))
|
||||
acc_task = current;
|
||||
else
|
||||
acc_task = pager;
|
||||
BUG_ON(tcb->nchild);
|
||||
|
||||
/*
|
||||
* NOTE: This protects single threaded space
|
||||
@@ -101,19 +96,69 @@ void tcb_delete(struct ktcb *tcb)
|
||||
* traversal would be needed to ensure list is
|
||||
* still there.
|
||||
*/
|
||||
mutex_lock(&tcb->container->space_list.lock);
|
||||
mutex_lock(&tcb->space->lock);
|
||||
spin_lock(&tcb->container->space_list.lock);
|
||||
spin_lock(&tcb->space->lock);
|
||||
BUG_ON(--tcb->space->ktcb_refs != 0);
|
||||
|
||||
address_space_remove(tcb->space, tcb->container);
|
||||
|
||||
cap_list_move(pager_cap_list, &tcb->space->cap_list);
|
||||
|
||||
spin_unlock(&tcb->space->lock);
|
||||
spin_unlock(&tcb->container->space_list.lock);
|
||||
|
||||
address_space_delete(tcb->space, pager_cap_list);
|
||||
|
||||
/* Clear container id part */
|
||||
tcb->tid &= ~TASK_CID_MASK;
|
||||
|
||||
/* Deallocate tcb ids */
|
||||
id_del(&kernel_resources.ktcb_ids, tcb->tid);
|
||||
|
||||
/* Free the tcb */
|
||||
ktcb_cap_free(tcb, pager_cap_list);
|
||||
}
|
||||
|
||||
void tcb_delete(struct ktcb *tcb)
|
||||
{
|
||||
struct ktcb *pager = tcb->pager;
|
||||
|
||||
/* Sanity checks first */
|
||||
BUG_ON(!is_page_aligned(tcb));
|
||||
BUG_ON(tcb->wqh_pager.sleepers > 0);
|
||||
BUG_ON(tcb->wqh_send.sleepers > 0);
|
||||
BUG_ON(tcb->wqh_recv.sleepers > 0);
|
||||
BUG_ON(tcb->affinity != current->affinity);
|
||||
BUG_ON(tcb->state != TASK_INACTIVE);
|
||||
BUG_ON(!list_empty(&tcb->rq_list));
|
||||
BUG_ON(tcb->rq);
|
||||
BUG_ON(tcb == current);
|
||||
BUG_ON(tcb->nlocks);
|
||||
BUG_ON(tcb->waiting_on);
|
||||
BUG_ON(tcb->wq);
|
||||
BUG_ON(tcb->nchild);
|
||||
|
||||
/*
|
||||
* NOTE: This protects single threaded space
|
||||
* deletion against space modification.
|
||||
*
|
||||
* If space deletion were multi-threaded, list
|
||||
* traversal would be needed to ensure list is
|
||||
* still there.
|
||||
*/
|
||||
spin_lock(&tcb->container->space_list.lock);
|
||||
spin_lock(&tcb->space->lock);
|
||||
BUG_ON(--tcb->space->ktcb_refs < 0);
|
||||
|
||||
/* No refs left for the space, delete it */
|
||||
if (tcb->space->ktcb_refs == 0) {
|
||||
address_space_remove(tcb->space, tcb->container);
|
||||
mutex_unlock(&tcb->space->lock);
|
||||
address_space_delete(tcb->space, acc_task);
|
||||
mutex_unlock(&tcb->container->space_list.lock);
|
||||
spin_unlock(&tcb->space->lock);
|
||||
spin_unlock(&tcb->container->space_list.lock);
|
||||
address_space_delete(tcb->space, &tcb->pager->space->cap_list);
|
||||
} else {
|
||||
mutex_unlock(&tcb->space->lock);
|
||||
mutex_unlock(&tcb->container->space_list.lock);
|
||||
spin_unlock(&tcb->space->lock);
|
||||
spin_unlock(&tcb->container->space_list.lock);
|
||||
}
|
||||
|
||||
/* Clear container id part */
|
||||
@@ -123,7 +168,17 @@ void tcb_delete(struct ktcb *tcb)
|
||||
id_del(&kernel_resources.ktcb_ids, tcb->tid);
|
||||
|
||||
/* Free the tcb */
|
||||
free_ktcb(tcb, acc_task);
|
||||
ktcb_cap_free(tcb, &tcb->pager->space->cap_list);
|
||||
|
||||
/* Reduce child count after freeing tcb, so that
|
||||
* pager does not release capabilities until then */
|
||||
spin_lock(&pager->thread_lock);
|
||||
BUG_ON(pager->nchild-- < 0);
|
||||
spin_unlock(&pager->thread_lock);
|
||||
|
||||
/* Wake up pager if this was the last child */
|
||||
if (pager->nchild == 0)
|
||||
wake_up(&pager->wqh_pager, 0);
|
||||
}
|
||||
|
||||
struct ktcb *tcb_find_by_space(l4id_t spid)
|
||||
@@ -248,13 +303,30 @@ void tcb_delete_zombies(void)
|
||||
struct ktcb_list *ktcb_list =
|
||||
&per_cpu(kernel_resources.zombie_list);
|
||||
|
||||
/* Traverse the per-cpu zombie list */
|
||||
/* Lock and traverse the per-cpu zombie list */
|
||||
spin_lock(&ktcb_list->list_lock);
|
||||
list_foreach_removable_struct(zombie, n,
|
||||
&ktcb_list->list,
|
||||
task_list)
|
||||
/* Delete all zombies one by one */
|
||||
tcb_delete(zombie);
|
||||
task_list) {
|
||||
/* Lock zombie */
|
||||
spin_lock(&zombie->thread_lock);
|
||||
|
||||
/* Remove from zombie list */
|
||||
list_remove(&zombie->task_list);
|
||||
|
||||
/* Unlock all locks */
|
||||
spin_unlock(&zombie->thread_lock);
|
||||
spin_unlock(&ktcb_list->list_lock);
|
||||
|
||||
/* Delete zombie as lock-free */
|
||||
if (thread_is_pager(zombie))
|
||||
tcb_delete_pager(zombie);
|
||||
else
|
||||
tcb_delete(zombie);
|
||||
|
||||
/* Lock back the list */
|
||||
spin_lock(&ktcb_list->list_lock);
|
||||
}
|
||||
spin_unlock(&ktcb_list->list_lock);
|
||||
}
|
||||
|
||||
@@ -367,10 +439,16 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
|
||||
* Update it with privileged flags,
|
||||
* so that only kernel can access.
|
||||
*/
|
||||
add_mapping_pgd(phys, page_align(task->utcb_address),
|
||||
page_align_up(UTCB_SIZE),
|
||||
MAP_KERN_RW,
|
||||
TASK_PGD(current));
|
||||
if ((ret = add_mapping_use_cap(phys,
|
||||
page_align(task->utcb_address),
|
||||
page_align_up(UTCB_SIZE),
|
||||
MAP_KERN_RW,
|
||||
current->space,
|
||||
&task->space->cap_list)) < 0) {
|
||||
printk("Warning: Irq owner thread is "
|
||||
"out of pmds. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
BUG_ON(!phys);
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ int do_timer_irq(void)
|
||||
update_process_times();
|
||||
update_system_time();
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
#if defined (CONFIG_SMP_)
|
||||
smp_send_ipi(cpu_mask_others(), IPI_TIMER_EVENT);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1,22 +1,25 @@
|
||||
|
||||
# Inherit global environment
|
||||
import os, sys, glob
|
||||
import os, sys
|
||||
|
||||
PROJRELROOT = '../../'
|
||||
|
||||
sys.path.append(PROJRELROOT)
|
||||
|
||||
from config.projpaths import *
|
||||
from configure import *
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
Import('env', 'symbols')
|
||||
Import('env')
|
||||
|
||||
config = configuration_retrieve()
|
||||
symbols = config.all
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c', 'cache.c', 'debug.c']
|
||||
src_local = ['init.c', 'memory.c', 'systable.c',
|
||||
'irq.c', 'cache.c', 'debug.c']
|
||||
|
||||
for name, val in symbols:
|
||||
if 'CONFIG_SMP' == name:
|
||||
src_local += ['smp.c', 'ipi.c']
|
||||
if 'CONFIG_SMP_' == name:
|
||||
src_local += ['smp.c', 'ipi.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/idle.h>
|
||||
#include <l4/generic/bootmem.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/container.h>
|
||||
@@ -106,60 +107,6 @@ void vectors_init()
|
||||
}
|
||||
|
||||
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/api/capability.h>
|
||||
#include <l4/generic/capability.h>
|
||||
|
||||
/* This is what an idle task needs */
|
||||
static DECLARE_PERCPU(struct capability, pmd_cap);
|
||||
|
||||
/*
|
||||
* FIXME: Add this when initializing kernel resources
|
||||
* This is a hack.
|
||||
*/
|
||||
void setup_idle_caps()
|
||||
{
|
||||
struct capability *cap = &per_cpu(pmd_cap);
|
||||
|
||||
cap_list_init(¤t->cap_list);
|
||||
cap->type = CAP_RTYPE_MAPPOOL | CAP_TYPE_QUANTITY;
|
||||
cap->size = 50;
|
||||
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, ¤t->cap_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up current stack's beginning, and initial page tables
|
||||
* as a valid task environment for idle task for current cpu
|
||||
*/
|
||||
void setup_idle_task()
|
||||
{
|
||||
memset(current, 0, sizeof(struct ktcb));
|
||||
|
||||
current->space = &init_space;
|
||||
TASK_PGD(current) = &init_pgd;
|
||||
|
||||
/* Initialize space caps list */
|
||||
cap_list_init(¤t->space->cap_list);
|
||||
|
||||
/*
|
||||
* FIXME: This must go to kernel resources init.
|
||||
*/
|
||||
|
||||
/* Init scheduler structs */
|
||||
sched_init_task(current, TASK_PRIO_NORMAL);
|
||||
|
||||
/*
|
||||
* If using split page tables, kernel
|
||||
* resources must point at the global pgd
|
||||
* TODO: We may need this for V6, in the future
|
||||
*/
|
||||
#if defined(CONFIG_SUBARCH_V7)
|
||||
kernel_resources.pgd_global = &init_global_pgd;
|
||||
#endif
|
||||
}
|
||||
|
||||
void remove_initial_mapping(void)
|
||||
{
|
||||
/* At this point, execution is on virtual addresses. */
|
||||
@@ -168,17 +115,15 @@ void remove_initial_mapping(void)
|
||||
|
||||
void init_finalize(void)
|
||||
{
|
||||
/* Set up idle task capabilities */
|
||||
setup_idle_caps();
|
||||
|
||||
platform_timer_start();
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
#if defined (CONFIG_SMP_)
|
||||
/* Tell other cores to continue */
|
||||
secondary_run_signal = 1;
|
||||
dmb();
|
||||
#endif
|
||||
|
||||
sched_resume_async(current);
|
||||
idle_task();
|
||||
}
|
||||
|
||||
|
||||
@@ -60,16 +60,6 @@ void init_smp(void)
|
||||
}
|
||||
}
|
||||
|
||||
void secondary_setup_idle_task(void)
|
||||
{
|
||||
/* This also has its spid allocated by primary */
|
||||
current->space = &init_space;
|
||||
TASK_PGD(current) = &init_pgd;
|
||||
|
||||
/* We need a thread id */
|
||||
current->tid = id_new(&kernel_resources.ktcb_ids);
|
||||
}
|
||||
|
||||
/*
|
||||
* Idle wait before any tasks become available for running.
|
||||
*
|
||||
@@ -85,16 +75,13 @@ void sched_secondary_start(void)
|
||||
while (!secondary_run_signal)
|
||||
dmb();
|
||||
|
||||
secondary_setup_idle_task();
|
||||
|
||||
setup_idle_caps();
|
||||
secondary_idle_task_init();
|
||||
|
||||
idle_task();
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* this is where it jumps from secondary_start(), which is called from
|
||||
* board_smp_start() to align each core to start here
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
Import('symbols')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['printk.c', 'putc.c', 'string.c', 'bit.c', 'wait.c', 'mutex.c', 'idpool.c', 'memcache.c']
|
||||
src_local = ['printk.c', 'putc.c', 'string.c', 'bit.c',
|
||||
'wait.c', 'mutex.c', 'idpool.c', 'memcache.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
|
||||
Return('obj')
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008-2010 B Labs Ltd.
|
||||
* Copyright (C) 2008-2010 B Labs Ltd.
|
||||
*/
|
||||
|
||||
void *_memset(void *p, int c, int size);
|
||||
|
||||
@@ -7,5 +7,4 @@ Import('env')
|
||||
src_local = ['print-early.c', 'platform.c', 'perfmon.c', 'irq.c', 'cm.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
|
||||
Return('obj')
|
||||
|
||||
@@ -5,20 +5,23 @@
|
||||
import os, sys
|
||||
|
||||
PROJRELROOT = '../../../'
|
||||
|
||||
sys.path.append(PROJRELROOT)
|
||||
|
||||
from config.projpaths import *
|
||||
from configure import *
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
Import('env', 'platform', 'symbols')
|
||||
Import('env')
|
||||
|
||||
config = configuration_retrieve()
|
||||
platform = config.platform
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['platform.c', 'irq.c']
|
||||
obj = env.Object(src_local)
|
||||
|
||||
# This is arealview platform, include corresponding files.
|
||||
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'), exports = {'env' : env, 'symbols' : symbols},
|
||||
duplicate=0, build_dir='realview')
|
||||
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'),
|
||||
exports = { 'env' : env }, duplicate = 0,
|
||||
build_dir = 'realview')
|
||||
|
||||
Return('obj')
|
||||
|
||||
@@ -49,6 +49,7 @@ int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_CLCD, 0, PLATFORM_CLCD0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
|
||||
@@ -23,42 +23,21 @@
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/cap-types.h>
|
||||
|
||||
/*
|
||||
* FIXME: This is not a platform specific
|
||||
* call, we will move this out later
|
||||
*/
|
||||
void device_cap_init(struct kernel_resources *kres, int devtype,
|
||||
int devnum, unsigned long base)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
cap_set_devtype(cap, devtype);
|
||||
cap_set_devnum(cap, devnum);
|
||||
cap->start = __pfn(base);
|
||||
cap->end = cap->start + 1;
|
||||
cap->size = cap->end - cap->start;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, &kres->devmem_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* The devices that are used by the kernel are mapped
|
||||
* independent of these capabilities, but these provide a
|
||||
* concise description of what is used by the kernel.
|
||||
*/
|
||||
int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
{
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_CLCD, 0, PLATFORM_CLCD0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
struct platform_mem_regions platform_mem_regions = {
|
||||
.nregions = 1,
|
||||
.mem_range = {
|
||||
[0] = {
|
||||
.start = PLATFORM_PHYS_MEM_START,
|
||||
.end = PLATFORM_PHYS_MEM_END,
|
||||
.type = MEM_TYPE_RAM,
|
||||
},
|
||||
[1] = {
|
||||
.start = PLATFORM_SYSTEM_REGISTERS,
|
||||
.end = PLATFORM_SYSTEM_REGISTERS + PLATFORM_SYSREGS_SIZE,
|
||||
.type = MEM_TYPE_DEV,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* We will use UART0 for kernel as well as user tasks,
|
||||
|
||||
@@ -1,24 +1,22 @@
|
||||
|
||||
|
||||
# Inherit global environment
|
||||
|
||||
import os, sys
|
||||
|
||||
PROJRELROOT = '../../../'
|
||||
|
||||
sys.path.append(PROJRELROOT)
|
||||
|
||||
from config.projpaths import *
|
||||
from configure import *
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
Import('env', 'platform', 'symbols')
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['platform.c', 'irq.c']
|
||||
obj = env.Object(src_local)
|
||||
|
||||
# This is arealview platform, include corresponding files.
|
||||
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'), exports = {'env' : env, 'symbols' : symbols},
|
||||
duplicate=0, build_dir='realview')
|
||||
obj += SConscript(join(PROJROOT, 'src/platform/realview/SConscript'),
|
||||
exports = {'env' : env }, duplicate = 0,
|
||||
build_dir = 'realview')
|
||||
|
||||
Return('obj')
|
||||
|
||||
@@ -19,42 +19,26 @@
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
/*
|
||||
* FIXME: This is not a platform specific
|
||||
* call, we will move this out later
|
||||
*/
|
||||
void device_cap_init(struct kernel_resources *kres, int devtype,
|
||||
int devnum, unsigned long base)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
cap_set_devtype(cap, devtype);
|
||||
cap_set_devnum(cap, devnum);
|
||||
cap->start = __pfn(base);
|
||||
cap->end = cap->start + 1;
|
||||
cap->size = cap->end - cap->start;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, &kres->devmem_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* The devices that are used by the kernel are mapped
|
||||
* independent of these capabilities, but these provide a
|
||||
* concise description of what is used by the kernel.
|
||||
*/
|
||||
int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
{
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_CLCD, 0, PLATFORM_CLCD0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
struct platform_mem_regions platform_mem_regions = {
|
||||
.nregions = 3,
|
||||
.mem_range = {
|
||||
[0] = {
|
||||
.start = PLATFORM_PHYS_MEM_START,
|
||||
.end = PLATFORM_PHYS_MEM_END,
|
||||
.type = MEM_TYPE_RAM,
|
||||
},
|
||||
[1] = {
|
||||
.start = PLATFORM_DEVICES_START,
|
||||
.end = PLATFORM_DEVICES_END,
|
||||
.type = MEM_TYPE_DEV,
|
||||
},
|
||||
[2] = {
|
||||
.start = PLATFORM_DEVICES1_START,
|
||||
.end = PLATFORM_DEVICES1_END,
|
||||
.type = MEM_TYPE_DEV,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
void init_platform_irq_controller()
|
||||
{
|
||||
@@ -85,5 +69,8 @@ void init_platform_devices()
|
||||
add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* System registers */
|
||||
add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
|
||||
# Inherit global environment
|
||||
import os, sys, glob
|
||||
import os, sys
|
||||
|
||||
PROJRELROOT = '../../'
|
||||
|
||||
sys.path.append(PROJRELROOT)
|
||||
|
||||
from config.projpaths import *
|
||||
from configure import *
|
||||
from scripts.config.projpaths import *
|
||||
from scripts.config.config_invoke import *
|
||||
|
||||
Import('env', 'symbols')
|
||||
Import('env')
|
||||
|
||||
config = configuration_retrieve()
|
||||
symbols = config.all
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['irq.c', 'platform.c', 'print-early.c', 'perfmon.c', 'cpuperf.S']
|
||||
|
||||
for name, val in symbols:
|
||||
if 'CONFIG_SMP' == name:
|
||||
if 'CONFIG_SMP_' == name:
|
||||
src_local += ['smp.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
|
||||
@@ -51,7 +51,7 @@ int platform_mouse_user_handler(struct irq_desc *desc)
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
clrbit((unsigned int *)PLATFORM_MOUSE0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
|
||||
@@ -84,7 +84,7 @@ void platform_init(void)
|
||||
init_platform_irq_controller();
|
||||
init_platform_devices();
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
#if defined (CONFIG_SMP_)
|
||||
init_smp();
|
||||
scu_init();
|
||||
#endif
|
||||
|
||||
@@ -55,9 +55,6 @@ void platform_smp_init(int ncpus)
|
||||
irq_desc_array[i].handler = &ipi_handler;
|
||||
}
|
||||
|
||||
add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
|
||||
int platform_smp_start(int cpu, void (*smp_start_func)(int))
|
||||
|
||||
Reference in New Issue
Block a user