mirror of
https://github.com/drasko/codezero.git
synced 2026-01-24 00:33:14 +01:00
Code that compiles until initialization of containers and pagers.
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'pgalloc.c', 'kmalloc.c', 'space.c', 'bootm.c', 'resource.c', 'container.c']
|
||||
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'kmalloc.c', 'space.c', 'bootm.c', 'resource.c', 'container.c', 'capability.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -3,5 +3,21 @@
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/capability.h>
|
||||
|
||||
struct capability *capability_create(void)
|
||||
{
|
||||
struct capability *cap = alloc_capability();
|
||||
|
||||
capability_init(cap);
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
void capability_init(struct capability *cap)
|
||||
{
|
||||
cap->capid = id_new(&kernel_container.capability_ids);
|
||||
link_init(&cap->list);
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/*
|
||||
@@ -22,7 +24,7 @@ struct container_info cinfo[] = {
|
||||
.pager_lma = __pfn(0x38000),
|
||||
.pager_vma = __pfn(0xE0000000),
|
||||
.pager_size = __pfn(0x96000),
|
||||
.ncaps = 11,
|
||||
.ncaps = 14,
|
||||
.caps = {
|
||||
[0] = {
|
||||
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
|
||||
@@ -51,6 +53,16 @@ struct container_info cinfo[] = {
|
||||
.size = __pfn(0x10000000),
|
||||
},
|
||||
[3] = {
|
||||
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
|
||||
.access = CAP_MAP_READ | CAP_MAP_WRITE
|
||||
| CAP_MAP_EXEC | CAP_MAP_UNMAP
|
||||
| CAP_MAP_UTCB,
|
||||
.start = __pfn(0xF8000000),
|
||||
.end = __pfn(0xF9000000),
|
||||
.size = __pfn(0x1000000),
|
||||
},
|
||||
|
||||
[4] = {
|
||||
.type = CAP_TYPE_MAP | CAP_RTYPE_PHYSMEM,
|
||||
.access = CAP_MAP_CACHED | CAP_MAP_UNCACHED
|
||||
| CAP_MAP_READ | CAP_MAP_WRITE
|
||||
@@ -58,51 +70,244 @@ struct container_info cinfo[] = {
|
||||
.start = __pfn(0x38000),
|
||||
.end = __pfn(0x1000000), /* 16 MB for all posix services */
|
||||
},
|
||||
[4] = {
|
||||
[5] = {
|
||||
.type = CAP_TYPE_IPC | CAP_RTYPE_CONTAINER,
|
||||
.access = CAP_IPC_SEND | CAP_IPC_RECV
|
||||
| CAP_IPC_FULL | CAP_IPC_SHORT
|
||||
| CAP_IPC_EXTENDED,
|
||||
.start = 0, .end = 0, .size = 0,
|
||||
},
|
||||
[5] = {
|
||||
[6] = {
|
||||
.type = CAP_TYPE_TCTRL | CAP_RTYPE_CONTAINER,
|
||||
.access = CAP_TCTRL_CREATE | CAP_TCTRL_DESTROY
|
||||
| CAP_TCTRL_SUSPEND | CAP_TCTRL_RESUME
|
||||
| CAP_TCTRL_RECYCLE,
|
||||
.start = 0, .end = 0, .size = 0,
|
||||
},
|
||||
[6] = {
|
||||
[7] = {
|
||||
.type = CAP_TYPE_EXREGS | CAP_RTYPE_CONTAINER,
|
||||
.access = CAP_EXREGS_RW_PAGER
|
||||
| CAP_EXREGS_RW_UTCB | CAP_EXREGS_RW_SP
|
||||
| CAP_EXREGS_RW_PC | CAP_EXREGS_RW_REGS,
|
||||
.start = 0, .end = 0, .size = 0,
|
||||
},
|
||||
[7] = {
|
||||
[8] = {
|
||||
.type = CAP_TYPE_QUANTITY
|
||||
| CAP_RTYPE_THREADPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 64,
|
||||
},
|
||||
[8] = {
|
||||
[9] = {
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_SPACEPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 64,
|
||||
},
|
||||
[9] = {
|
||||
[10] = {
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_CPUPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 50, /* Percentage */
|
||||
},
|
||||
[10] = {
|
||||
[11] = {
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_MUTEXPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 100,
|
||||
},
|
||||
[12] = {
|
||||
/* For pmd accounting */
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_MAPPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
/* Function of mem regions, nthreads etc. */
|
||||
.size = (64 * 30 + 100),
|
||||
},
|
||||
[13] = {
|
||||
/* For cap spliting, creating, etc. */
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_CAPPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
/* This may be existing caps X 2 etc. */
|
||||
.size = 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
int container_init(struct container *c)
|
||||
{
|
||||
/* Allocate new container id */
|
||||
c->cid = id_new(&kernel_container.container_ids);
|
||||
|
||||
/* Init data structures */
|
||||
link_init(&c->pager_list);
|
||||
init_address_space_list(&c->space_list);
|
||||
init_ktcb_list(&c->ktcb_list);
|
||||
init_mutex_queue_head(&c->mutex_queue_head);
|
||||
|
||||
/* Init scheduler */
|
||||
sched_init(&c->scheduler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct container *container_create(void)
|
||||
{
|
||||
struct container *c = alloc_container();
|
||||
|
||||
container_init(c);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
void kcont_insert_container(struct container *c,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
list_insert(&c->list, &kcont->containers.list);
|
||||
kcont->containers.ncont++;
|
||||
}
|
||||
|
||||
void task_setup_utcb(struct ktcb *task, struct pager *pager)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
/* Find a virtual memory capability with UTCB map permissions */
|
||||
list_foreach_struct(cap, &pager->cap_list.caps, list) {
|
||||
if (((cap->type & CAP_RTYPE_MASK) ==
|
||||
CAP_RTYPE_VIRTMEM) &&
|
||||
(cap->access & CAP_MAP_UTCB)) {
|
||||
/* Use first address slot as pager's utcb */
|
||||
task->utcb_address = __pfn_to_addr(cap->start);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
*
|
||||
* Create a purer address_space_create that takes
|
||||
* flags for extra ops such as copying kernel tables,
|
||||
* user tables of an existing pgd etc.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The first pager initialization is a special-case
|
||||
* since it uses the current kernel pgd.
|
||||
*/
|
||||
int init_first_pager(struct pager *pager,
|
||||
struct container *cont,
|
||||
pgd_table_t *current_pgd)
|
||||
{
|
||||
struct ktcb *task = tcb_alloc_init();
|
||||
struct address_space *space;
|
||||
|
||||
/* Initialize ktcb */
|
||||
task_init_registers(task, pager->start_vma);
|
||||
task_setup_utcb(task, pager);
|
||||
|
||||
/* Allocate space structure */
|
||||
if (!(space = alloc_space()))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set up space id */
|
||||
space->spid = id_new(&kernel_container.space_ids);
|
||||
|
||||
/* Initialize space structure */
|
||||
link_init(&space->list);
|
||||
mutex_init(&space->lock);
|
||||
space->pgd = current_pgd;
|
||||
|
||||
task->space = space;
|
||||
task->container = cont;
|
||||
|
||||
/* Map the task's space */
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
|
||||
|
||||
printk("Mapping %lu pages from 0x%lx to 0x%lx for %s\n",
|
||||
__pfn(page_align_up(pager->memsize)),
|
||||
pager->start_lma, pager->start_vma, cont->name);
|
||||
|
||||
/* Initialize task scheduler parameters */
|
||||
sched_init_task(task, TASK_PRIO_PAGER);
|
||||
|
||||
/* Give it a kick-start tick and make runnable */
|
||||
task->ticks_left = 1;
|
||||
sched_resume_async(task);
|
||||
|
||||
/* Container list that keeps all tasks */
|
||||
tcb_add(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Inspects pager parameters defined in the container,
|
||||
* and sets up an execution environment for the pager.
|
||||
*
|
||||
* This involves setting up pager's ktcb, space, utcb,
|
||||
* all ids, registers, and mapping its (perhaps) first
|
||||
* few pages in order to make it runnable.
|
||||
*/
|
||||
int init_pager(struct pager *pager, struct container *cont)
|
||||
{
|
||||
struct ktcb *task = tcb_alloc_init();
|
||||
|
||||
task_init_registers(task, pager->start_vma);
|
||||
|
||||
task_setup_utcb(task, pager);
|
||||
|
||||
task->space = address_space_create(0);
|
||||
|
||||
task->container = cont;
|
||||
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
|
||||
|
||||
printk("Mapping %lu pages from 0x%lx to 0x%lx for %s\n",
|
||||
__pfn(page_align_up(pager->memsize)),
|
||||
pager->start_lma, pager->start_vma, cont->name);
|
||||
|
||||
/* Initialize task scheduler parameters */
|
||||
sched_init_task(task, TASK_PRIO_PAGER);
|
||||
|
||||
/* Give it a kick-start tick and make runnable */
|
||||
task->ticks_left = 1;
|
||||
sched_resume_async(task);
|
||||
|
||||
/* Container list that keeps all tasks */
|
||||
tcb_add(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize all containers with their initial set of tasks,
|
||||
* spaces, scheduler parameters such that they can be started.
|
||||
*/
|
||||
int container_init_pagers(struct kernel_container *kcont,
|
||||
pgd_table_t *current_pgd)
|
||||
{
|
||||
struct container *cont;
|
||||
struct pager *pager;
|
||||
int pgidx = 0;
|
||||
|
||||
list_foreach_struct(cont, &kcont->containers.list, list) {
|
||||
for (int i = 0; i < cont->npagers; i++) {
|
||||
pager = &cont->pager[i];
|
||||
|
||||
/* First pager initializes specially */
|
||||
if (pgidx == 0)
|
||||
init_first_pager(pager, cont,
|
||||
current_pgd);
|
||||
else
|
||||
init_pager(pager, cont);
|
||||
pgidx++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/memcache.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* Supports this many different kmalloc sizes */
|
||||
@@ -36,117 +36,18 @@ void init_kmalloc()
|
||||
mutex_init(&km_pool.kmalloc_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* KMALLOC implementation:
|
||||
*
|
||||
* Allocates memory from mem_caches that it generates on-the-fly,
|
||||
* for up to KMALLOC_POOLS_MAX different sizes.
|
||||
*/
|
||||
void *__kmalloc(int size)
|
||||
{
|
||||
struct mem_cache *cache;
|
||||
int right_sized_pool_idx = -1;
|
||||
int index;
|
||||
|
||||
BUG_ON(!size); /* It is a kernel bug if size is 0 */
|
||||
|
||||
for (int i = 0; i < km_pool.total; i++) {
|
||||
/* Check if this pool has right size */
|
||||
if (km_pool.pool_head[i].cache_size == size) {
|
||||
right_sized_pool_idx = i;
|
||||
/*
|
||||
* Found the pool, now see if any
|
||||
* cache has available slots
|
||||
*/
|
||||
list_foreach_struct(cache, &km_pool.pool_head[i].cache_list,
|
||||
list) {
|
||||
if (cache->free)
|
||||
return mem_cache_alloc(cache);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* All pools are allocated and none has requested size
|
||||
*/
|
||||
if ((right_sized_pool_idx < 0) &&
|
||||
(km_pool.total == KMALLOC_POOLS_MAX - 1)) {
|
||||
printk("kmalloc: Too many types of pool sizes requested. "
|
||||
"Giving up.\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* A pool exists with given size? (But no cache in it is free) */
|
||||
if (right_sized_pool_idx >= 0)
|
||||
index = right_sized_pool_idx;
|
||||
else /* No pool of this size, allocate new by incrementing total */
|
||||
index = km_pool.total++;
|
||||
|
||||
/* Only allow up to page size */
|
||||
BUG_ON(size >= PAGE_SIZE);
|
||||
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
|
||||
size, 0)));
|
||||
// printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
|
||||
list_insert(&cache->list, &km_pool.pool_head[index].cache_list);
|
||||
km_pool.pool_head[index].occupied = 1;
|
||||
km_pool.pool_head[index].total_caches++;
|
||||
km_pool.pool_head[index].cache_size = size;
|
||||
return mem_cache_alloc(cache);
|
||||
}
|
||||
|
||||
void *kmalloc(int size)
|
||||
{
|
||||
void *p;
|
||||
|
||||
mutex_lock(&km_pool.kmalloc_mutex);
|
||||
p = __kmalloc(size);
|
||||
mutex_unlock(&km_pool.kmalloc_mutex);
|
||||
return p;
|
||||
}
|
||||
|
||||
/* FIXME:
|
||||
* Horrible complexity O(n^2) because we don't know which cache
|
||||
* we're freeing from!!! But its simple. ;-)
|
||||
*/
|
||||
int __kfree(void *p)
|
||||
{
|
||||
struct mem_cache *cache, *tmp;
|
||||
|
||||
for (int i = 0; i < km_pool.total; i++)
|
||||
list_foreach_removable_struct(cache, tmp,
|
||||
&km_pool.pool_head[i].cache_list,
|
||||
list) {
|
||||
if (!mem_cache_free(cache, p)) {
|
||||
if (mem_cache_is_empty(cache)) {
|
||||
km_pool.pool_head[i].total_caches--;
|
||||
list_remove(&cache->list);
|
||||
free_page(cache);
|
||||
/*
|
||||
* Total remains the same but slot
|
||||
* may have no caches left.
|
||||
*/
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kfree(void *p)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&km_pool.kmalloc_mutex);
|
||||
ret = __kfree(p);
|
||||
mutex_unlock(&km_pool.kmalloc_mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *kzalloc(int size)
|
||||
{
|
||||
void *p = kmalloc(size);
|
||||
memset(p, 0, size);
|
||||
return p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
/*
|
||||
* Simple kernel memory allocator built on top of memcache
|
||||
* implementation.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/memcache.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/physmem.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* FIXME:
|
||||
*
|
||||
* mem_cache_alloc() now has an interruptible mutex.
|
||||
* All routines defined here should check returned errors.
|
||||
*/
|
||||
|
||||
#define PGALLOC_PGD_CACHE 0
|
||||
#define PGALLOC_PMD_CACHE 1
|
||||
#define PGALLOC_PG_CACHE 2
|
||||
#define PGALLOC_CACHE_TOTAL 3
|
||||
|
||||
/* The initial chunk of physical memory allocated before any pagers. */
|
||||
#define PGALLOC_INIT_GRANT SZ_1MB
|
||||
|
||||
/* Covers 3 main types of memory needed by the kernel. */
|
||||
struct pgalloc {
|
||||
struct link cache_list[3];
|
||||
};
|
||||
static struct pgalloc pgalloc;
|
||||
|
||||
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
|
||||
{
|
||||
link_init(&cache->list);
|
||||
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
|
||||
list_insert(&cache->list, &pgalloc.cache_list[cidx]);
|
||||
}
|
||||
|
||||
void print_kmem_grant_params(grant_kmem_usage_t *params)
|
||||
{
|
||||
printk("%s: %lu bytes physical memory granted.\n", __KERNELNAME__, params->total_size);
|
||||
printk("%s: Possible kmem usage on this memory grant:\n", __KERNELNAME__);
|
||||
printk("%s: PGDs: %lu, PMDs: %lu, TCBs: %lu, Extra: %lu bytes.\n", __KERNELNAME__,
|
||||
params->total_pgds, params->total_pmds, params->total_tcbs,
|
||||
params->extra);
|
||||
}
|
||||
|
||||
#define TASK_AVERAGE_SIZE SZ_16MB
|
||||
#define TASK_AVERAGE_PMDS TASK_AVERAGE_SIZE / PMD_MAP_SIZE
|
||||
|
||||
void calc_grant_kmem_usage(grant_kmem_usage_t *params, unsigned long total_size)
|
||||
{
|
||||
/* Kmem usage per task */
|
||||
unsigned long task_avg_kmem_usage = PGD_SIZE + PMD_SIZE * 16 + PAGE_SIZE;
|
||||
unsigned long total_tasks = total_size / task_avg_kmem_usage;
|
||||
unsigned long extra = total_size - total_tasks * task_avg_kmem_usage;
|
||||
|
||||
params->total_size = total_size;
|
||||
params->total_tasks = total_tasks;
|
||||
params->total_pgds = total_tasks;
|
||||
params->total_pmds = total_tasks * 16;
|
||||
params->total_tcbs = total_tasks;
|
||||
params->extra = extra;
|
||||
|
||||
print_kmem_grant_params(params);
|
||||
}
|
||||
|
||||
|
||||
int pgalloc_add_new_grant(unsigned long pfn, int npages)
|
||||
{
|
||||
unsigned long physical = __pfn_to_addr(pfn);
|
||||
void *virtual = (void *)phys_to_virt(physical);
|
||||
struct mem_cache *pgd_cache, *pmd_cache, *pg_cache;
|
||||
grant_kmem_usage_t params;
|
||||
|
||||
/* First map the whole grant */
|
||||
add_mapping(physical, phys_to_virt(physical), __pfn_to_addr(npages),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
|
||||
/* Calculate how to divide buffer into different caches */
|
||||
calc_grant_kmem_usage(¶ms, __pfn_to_addr(npages));
|
||||
|
||||
/* Create the caches, least alignment-needing, most, then others. */
|
||||
pmd_cache = mem_cache_init(virtual, params.total_pmds * PMD_SIZE,
|
||||
PMD_SIZE, 1);
|
||||
virtual += params.total_pmds * PMD_SIZE;
|
||||
|
||||
pgd_cache = mem_cache_init(virtual, params.total_pgds * PGD_SIZE,
|
||||
PGD_SIZE, 1);
|
||||
virtual += params.total_pgds * PGD_SIZE;
|
||||
|
||||
pg_cache = mem_cache_init(virtual, params.total_tcbs * PAGE_SIZE
|
||||
+ params.extra, PAGE_SIZE, 1);
|
||||
|
||||
/* Add the caches */
|
||||
pgalloc_add_new_cache(pgd_cache, PGALLOC_PGD_CACHE);
|
||||
pgalloc_add_new_cache(pmd_cache, PGALLOC_PMD_CACHE);
|
||||
pgalloc_add_new_cache(pg_cache, PGALLOC_PG_CACHE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void init_pgalloc(void)
|
||||
{
|
||||
int initial_grant = PGALLOC_INIT_GRANT;
|
||||
|
||||
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
|
||||
link_init(&pgalloc.cache_list[i]);
|
||||
|
||||
/* Grant ourselves with an initial chunk of physical memory */
|
||||
physmem.free_cur = page_align_up(physmem.free_cur);
|
||||
set_page_map(physmem.free_cur, __pfn(initial_grant), 1);
|
||||
pgalloc_add_new_grant(__pfn(physmem.free_cur), __pfn(initial_grant));
|
||||
physmem.free_cur += initial_grant;
|
||||
|
||||
/* Activate kmalloc */
|
||||
init_kmalloc();
|
||||
}
|
||||
|
||||
void pgalloc_remove_cache(struct mem_cache *cache)
|
||||
{
|
||||
list_remove_init(&cache->list);
|
||||
}
|
||||
|
||||
static inline void *pgalloc_from_cache(int cidx)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
|
||||
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
|
||||
if (mem_cache_total_empty(cache))
|
||||
return mem_cache_zalloc(cache);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kfree_to_cache(int cidx, void *virtual)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
|
||||
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
|
||||
if (mem_cache_free(cache, virtual) == 0)
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void *alloc_page(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PG_CACHE);
|
||||
}
|
||||
|
||||
void *alloc_pmd(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PMD_CACHE);
|
||||
}
|
||||
|
||||
void *alloc_pgd(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PGD_CACHE);
|
||||
}
|
||||
|
||||
int free_page(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PG_CACHE, v);
|
||||
}
|
||||
|
||||
int free_pmd(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PMD_CACHE, v);
|
||||
}
|
||||
|
||||
int free_pgd(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PGD_CACHE, v);
|
||||
}
|
||||
|
||||
void *zalloc_page(void)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (!(p = alloc_page()))
|
||||
return 0;
|
||||
|
||||
memset(p, 0, PAGE_SIZE);
|
||||
return p;
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/physmem.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
@@ -88,6 +88,6 @@ void physmem_init()
|
||||
|
||||
void memory_init()
|
||||
{
|
||||
init_pgalloc();
|
||||
//init_pgalloc();
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/generic/container.h>
|
||||
@@ -16,6 +15,76 @@
|
||||
|
||||
struct kernel_container kernel_container;
|
||||
|
||||
pgd_table_t *alloc_pgd(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.pgd_cache);
|
||||
}
|
||||
|
||||
pmd_table_t *alloc_pmd(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.pmd_cache);
|
||||
}
|
||||
|
||||
struct address_space *alloc_space(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.space_cache);
|
||||
}
|
||||
|
||||
struct ktcb *alloc_ktcb(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.ktcb_cache);
|
||||
}
|
||||
|
||||
struct capability *alloc_capability(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.cap_cache);
|
||||
}
|
||||
|
||||
struct container *alloc_container(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.cont_cache);
|
||||
}
|
||||
|
||||
struct mutex_queue *alloc_user_mutex(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.mutex_cache);
|
||||
}
|
||||
|
||||
void free_pgd(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.pgd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_pmd(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.pmd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_space(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.space_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_ktcb(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.ktcb_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_capability(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.cap_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_container(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.cont_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_user_mutex(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.mutex_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void cap_list_init(struct cap_list *clist)
|
||||
{
|
||||
clist->ncaps = 0;
|
||||
@@ -78,6 +147,10 @@ int memcap_shrink(struct capability *cap, struct cap_list *cap_list,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a single memory cap (that definitely overlaps) removes
|
||||
* the portion of pfns specified by start/end.
|
||||
*/
|
||||
int memcap_unmap_range(struct capability *cap,
|
||||
struct cap_list *cap_list,
|
||||
const unsigned long start,
|
||||
@@ -128,65 +201,6 @@ int memcap_unmap(struct cap_list *cap_list,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do all system accounting for this capability info
|
||||
* structure that belongs to a container, such as
|
||||
* count its resource requirements, remove its portion
|
||||
* from global kernel capabilities etc.
|
||||
*/
|
||||
int process_cap_info(struct cap_info *cap,
|
||||
struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (cap->type & CAP_RTYPE_MASK) {
|
||||
case CAP_RTYPE_THREADPOOL:
|
||||
bootres->nthreads += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_SPACEPOOL:
|
||||
bootres->nspaces += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_MUTEXPOOL:
|
||||
bootres->nmutex += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_VIRTMEM:
|
||||
/* Area size in pages divided by mapsize in pages */
|
||||
bootres->npmds +=
|
||||
cap->size / __pfn(PMD_MAP_SIZE);
|
||||
if ((ret = memcap_unmap(&kcont->virtmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
case CAP_RTYPE_PHYSMEM:
|
||||
if ((ret = memcap_unmap(&kcont->physmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Migrate any boot allocations to their relevant caches.
|
||||
*/
|
||||
@@ -219,65 +233,28 @@ int free_boot_memory(struct boot_resources *bootres,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
struct kernel_container *kcont,
|
||||
int aligned)
|
||||
{
|
||||
struct capability *cap;
|
||||
unsigned long bufsize;
|
||||
|
||||
/* In all unused physical memory regions */
|
||||
list_foreach_struct(cap, &kcont->physmem_free.caps, list) {
|
||||
/* Get buffer size needed for cache */
|
||||
bufsize = mem_cache_bufsize((void *)__pfn_to_addr(cap->start),
|
||||
struct_size, nstruct,
|
||||
aligned);
|
||||
/*
|
||||
* Check if memcap region size is enough to cover
|
||||
* resource allocation
|
||||
*/
|
||||
if (__pfn_to_addr(cap->end - cap->start) >= bufsize) {
|
||||
unsigned long virtual =
|
||||
phys_to_virt(__pfn_to_addr(cap->start));
|
||||
/*
|
||||
* Map the buffer as boot mapping if pmd caches
|
||||
* are not initialized
|
||||
*/
|
||||
if (!kcont->pmd_cache) {
|
||||
add_boot_mapping(__pfn_to_addr(cap->start),
|
||||
virtual, bufsize,
|
||||
MAP_SVC_RW_FLAGS);
|
||||
} else {
|
||||
add_mapping(__pfn_to_addr(cap->start),
|
||||
virtual, bufsize,
|
||||
MAP_SVC_RW_FLAGS);
|
||||
}
|
||||
/* Unmap area from memcap */
|
||||
memcap_unmap_range(cap, &kcont->physmem_free,
|
||||
cap->start, cap->start +
|
||||
__pfn(page_align_up((bufsize))));
|
||||
|
||||
/* TODO: Manipulate memcaps for virtual range??? */
|
||||
|
||||
/* Initialize the cache */
|
||||
return mem_cache_init((void *)virtual, bufsize,
|
||||
PGD_SIZE, 1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes kernel caplists, and sets up total of physical
|
||||
* and virtual memory as single capabilities of the kernel.
|
||||
* They will then get split into caps of different lengths
|
||||
* during the traversal of container capabilities.
|
||||
* during the traversal of container capabilities, and memcache
|
||||
* allocations.
|
||||
*/
|
||||
void init_kernel_container(struct kernel_container *kcont)
|
||||
{
|
||||
struct capability *physmem, *virtmem, *kernel_area;
|
||||
|
||||
/* Initialize system id pools */
|
||||
kcont->space_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->ktcb_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->resource_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->container_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->mutex_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->capability_ids.nwords = SYSTEM_IDS_MAX;
|
||||
|
||||
/* Get first container id for itself */
|
||||
kcont->cid = id_new(&kcont->container_ids);
|
||||
|
||||
/* Initialize kernel capability lists */
|
||||
cap_list_init(&kcont->physmem_used);
|
||||
cap_list_init(&kcont->physmem_free);
|
||||
@@ -318,40 +295,164 @@ void init_kernel_container(struct kernel_container *kcont)
|
||||
*/
|
||||
}
|
||||
|
||||
void create_containers(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
/*
|
||||
* Copies cinfo structures to real capabilities for each pager.
|
||||
*
|
||||
* FIXME: Check if pager has enough resources to create its caps.
|
||||
*/
|
||||
int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct cap_info *cap_info;
|
||||
|
||||
pager->start_lma = pinfo->pager_lma;
|
||||
pager->start_vma = pinfo->pager_vma;
|
||||
pager->memsize = pinfo->pager_size;
|
||||
|
||||
/* Copy all cinfo structures into real capabilities */
|
||||
for (int i = 0; i < pinfo->ncaps; i++) {
|
||||
cap = capability_create();
|
||||
|
||||
cap_info = &pinfo->caps[i];
|
||||
|
||||
cap->type = cap_info->type;
|
||||
cap->access = cap_info->access;
|
||||
cap->start = cap_info->start;
|
||||
cap->end = cap_info->end;
|
||||
cap->size = cap_info->size;
|
||||
|
||||
cap_list_insert(cap, &pager->cap_list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void create_capabilities(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
/*
|
||||
* Copies container info from a given compact container descriptor to
|
||||
* a real container
|
||||
*/
|
||||
int copy_container_info(struct container *c, struct container_info *cinfo)
|
||||
{
|
||||
strncpy(c->name, cinfo->name, CONFIG_CONTAINER_NAMESIZE);
|
||||
c->npagers = cinfo->npagers;
|
||||
|
||||
/* Copy capabilities */
|
||||
for (int i = 0; i < c->npagers; i++)
|
||||
copy_pager_info(&c->pager[i], &cinfo->pager[i]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create real containers from compile-time created cinfo structures
|
||||
*/
|
||||
void setup_containers(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
struct container *container;
|
||||
pgd_table_t *current_pgd;
|
||||
|
||||
/*
|
||||
* Move to real page tables, accounted by
|
||||
* pgds and pmds provided from the caches
|
||||
*/
|
||||
current_pgd = realloc_page_tables();
|
||||
|
||||
/* Create all containers but leave pagers */
|
||||
for (int i = 0; i < bootres->nconts; i++) {
|
||||
/* Allocate & init container */
|
||||
container = container_create();
|
||||
|
||||
/* Fill in its information */
|
||||
copy_container_info(container, &cinfo[i]);
|
||||
|
||||
/* Add it to kernel container list */
|
||||
kcont_insert_container(container, kcont);
|
||||
}
|
||||
|
||||
/* Initialize pagers */
|
||||
container_init_pagers(kcont, current_pgd);
|
||||
}
|
||||
|
||||
void setup_capabilities(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure to count boot pmds, and kernel capabilities
|
||||
* created in boot memory.
|
||||
* Given a structure size and numbers, it initializes a memory cache
|
||||
* using free memory available from free kernel memory capabilities.
|
||||
*/
|
||||
struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
struct kernel_container *kcont,
|
||||
int aligned)
|
||||
{
|
||||
struct capability *cap;
|
||||
unsigned long bufsize;
|
||||
|
||||
/* In all unused physical memory regions */
|
||||
list_foreach_struct(cap, &kcont->physmem_free.caps, list) {
|
||||
/* Get buffer size needed for cache */
|
||||
bufsize = mem_cache_bufsize((void *)__pfn_to_addr(cap->start),
|
||||
struct_size, nstruct,
|
||||
aligned);
|
||||
/*
|
||||
* Check if memcap region size is enough to cover
|
||||
* resource allocation
|
||||
*/
|
||||
if (__pfn_to_addr(cap->end - cap->start) >= bufsize) {
|
||||
unsigned long virtual =
|
||||
phys_to_virt(__pfn_to_addr(cap->start));
|
||||
/*
|
||||
* Map the buffer as boot mapping if pmd caches
|
||||
* are not initialized
|
||||
*/
|
||||
if (!kcont->pmd_cache) {
|
||||
add_boot_mapping(__pfn_to_addr(cap->start),
|
||||
virtual,
|
||||
page_align_up(bufsize),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
} else {
|
||||
add_mapping(__pfn_to_addr(cap->start),
|
||||
virtual, page_align_up(bufsize),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
}
|
||||
/* Unmap area from memcap */
|
||||
memcap_unmap_range(cap, &kcont->physmem_free,
|
||||
cap->start, cap->start +
|
||||
__pfn(page_align_up((bufsize))));
|
||||
|
||||
/* TODO: Manipulate memcaps for virtual range??? */
|
||||
|
||||
/* Initialize the cache */
|
||||
return mem_cache_init((void *)virtual, bufsize,
|
||||
struct_size, 1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Initialize ID cache
|
||||
*
|
||||
* Also total capabilities in the system + number of
|
||||
* capabilities containers are allowed to create dynamically.
|
||||
*
|
||||
* Count the extra pgd + space needed in case all containers quit
|
||||
* Given a kernel container and the set of boot resources required,
|
||||
* initializes all memory caches for allocations. Once caches are
|
||||
* initialized, earlier boot allocations are migrated to caches.
|
||||
*/
|
||||
void init_resource_allocators(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
/*
|
||||
* An extra space reserved for kernel
|
||||
* in case all containers quit
|
||||
*/
|
||||
bootres->nspaces++;
|
||||
|
||||
/* Initialise PGD cache */
|
||||
kcont->pgd_cache = init_resource_cache(bootres->nspaces,
|
||||
PGD_SIZE, kcont, 1);
|
||||
|
||||
/* Initialise PMD cache */
|
||||
kcont->pmd_cache = init_resource_cache(bootres->npmds,
|
||||
PMD_SIZE, kcont, 1);
|
||||
|
||||
/* Initialise struct address_space cache */
|
||||
kcont->address_space_cache =
|
||||
kcont->space_cache =
|
||||
init_resource_cache(bootres->nspaces,
|
||||
sizeof(struct address_space),
|
||||
kcont, 0);
|
||||
@@ -364,33 +465,122 @@ void init_resource_allocators(struct boot_resources *bootres,
|
||||
kcont->mutex_cache = init_resource_cache(bootres->nmutex,
|
||||
sizeof(struct mutex_queue),
|
||||
kcont, 0);
|
||||
/* TODO: Initialize ID cache */
|
||||
|
||||
/* Initialise capability cache */
|
||||
kcont->cap_cache = init_resource_cache(bootres->ncaps, /* FIXME: Count correctly */
|
||||
sizeof(struct capability),
|
||||
kcont, 0);
|
||||
/* Initialise container cache */
|
||||
kcont->cont_cache = init_resource_cache(bootres->nconts,
|
||||
sizeof(struct container),
|
||||
kcont, 0);
|
||||
|
||||
/* Create system containers */
|
||||
create_containers(bootres, kcont);
|
||||
/*
|
||||
* Add all caps used by the kernel + two extra in case
|
||||
* more memcaps get split after cap cache init below.
|
||||
*/
|
||||
bootres->ncaps += kcont->virtmem_used.ncaps +
|
||||
kcont->virtmem_free.ncaps +
|
||||
kcont->physmem_used.ncaps +
|
||||
kcont->physmem_free.ncaps + 2;
|
||||
|
||||
/* Create capabilities */
|
||||
create_capabilities(bootres, kcont);
|
||||
/* Initialise capability cache */
|
||||
kcont->cap_cache = init_resource_cache(bootres->ncaps,
|
||||
sizeof(struct capability),
|
||||
kcont, 0);
|
||||
|
||||
/* Count boot pmds used so far and add them */
|
||||
bootres->npmds += pgd_count_pmds(&init_pgd);
|
||||
|
||||
/*
|
||||
* Calculate maximum possible pmds
|
||||
* that may be used during this pmd
|
||||
* cache init and add them.
|
||||
*/
|
||||
bootres->npmds += ((bootres->npmds * PMD_SIZE) / PMD_MAP_SIZE);
|
||||
if (!is_aligned(bootres->npmds * PMD_SIZE, PMD_MAP_SIZE))
|
||||
bootres->npmds++;
|
||||
|
||||
/* Initialise PMD cache */
|
||||
kcont->pmd_cache = init_resource_cache(bootres->npmds,
|
||||
PMD_SIZE, kcont, 1);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Do all system accounting for a given capability info
|
||||
* structure that belongs to a container, such as
|
||||
* count its resource requirements, remove its portion
|
||||
* from global kernel resource capabilities etc.
|
||||
*/
|
||||
int process_cap_info(struct cap_info *cap,
|
||||
struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (cap->type & CAP_RTYPE_MASK) {
|
||||
case CAP_RTYPE_THREADPOOL:
|
||||
bootres->nthreads += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_SPACEPOOL:
|
||||
bootres->nspaces += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_MUTEXPOOL:
|
||||
bootres->nmutex += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_MAPPOOL:
|
||||
/* Speficies how many pmds can be mapped */
|
||||
bootres->npmds += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_CAPPOOL:
|
||||
/* Specifies how many new caps can be created */
|
||||
bootres->ncaps += cap->size;
|
||||
break;
|
||||
|
||||
case CAP_RTYPE_VIRTMEM:
|
||||
if ((ret = memcap_unmap(&kcont->virtmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
case CAP_RTYPE_PHYSMEM:
|
||||
if ((ret = memcap_unmap(&kcont->physmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int init_boot_resources(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
/*
|
||||
* Initializes the kernel container by describing both virtual
|
||||
* and physical memory. Then traverses cap_info structures
|
||||
* to figure out resource requirements of containers.
|
||||
*/
|
||||
int setup_boot_resources(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
struct cap_info *cap;
|
||||
|
||||
init_kernel_container(kcont);
|
||||
|
||||
/* Number of containers known at compile-time */
|
||||
bootres->nconts = TOTAL_CONTAINERS;
|
||||
bootres->nconts = CONFIG_TOTAL_CONTAINERS;
|
||||
|
||||
/* Traverse all containers */
|
||||
for (int i = 0; i < bootres->nconts; i++) {
|
||||
@@ -409,29 +599,34 @@ int init_boot_resources(struct boot_resources *bootres,
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Count all ids needed to represent all */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Add error handling
|
||||
*
|
||||
* Initializes all system resources and handling of those
|
||||
* resources. First descriptions are done by allocating from
|
||||
* boot memory, once memory caches are initialized, boot
|
||||
* memory allocations are migrated over to caches.
|
||||
*/
|
||||
int init_system_resources(struct kernel_container *kcont)
|
||||
{
|
||||
|
||||
struct boot_resources bootres;
|
||||
|
||||
memset(&bootres, 0, sizeof(bootres));
|
||||
|
||||
init_boot_resources(&bootres, kcont);
|
||||
setup_boot_resources(&bootres, kcont);
|
||||
|
||||
init_resource_allocators(&bootres, kcont);
|
||||
|
||||
free_boot_memory(&bootres, kcont);
|
||||
/* Create system containers */
|
||||
setup_containers(&bootres, kcont);
|
||||
|
||||
/* Create capabilities */
|
||||
setup_capabilities(&bootres, kcont);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
@@ -23,9 +25,8 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
|
||||
|
||||
static struct runqueue sched_rq[SCHED_RQ_TOTAL];
|
||||
static struct runqueue *rq_runnable, *rq_expired;
|
||||
static int prio_total; /* Total priority of all tasks */
|
||||
//static struct runqueue *rq_runnable, *rq_expired;
|
||||
//static int prio_total; /* Total priority of all tasks */
|
||||
|
||||
/* This is incremented on each irq or voluntarily by preempt_disable() */
|
||||
extern unsigned int current_irq_nest_count;
|
||||
@@ -35,14 +36,14 @@ static int voluntary_preempt = 0;
|
||||
|
||||
void sched_lock_runqueues(void)
|
||||
{
|
||||
spin_lock(&sched_rq[0].lock);
|
||||
spin_lock(&sched_rq[1].lock);
|
||||
spin_lock(&curcont->scheduler.sched_rq[0].lock);
|
||||
spin_lock(&curcont->scheduler.sched_rq[1].lock);
|
||||
}
|
||||
|
||||
void sched_unlock_runqueues(void)
|
||||
{
|
||||
spin_unlock(&sched_rq[0].lock);
|
||||
spin_unlock(&sched_rq[1].lock);
|
||||
spin_unlock(&curcont->scheduler.sched_rq[0].lock);
|
||||
spin_unlock(&curcont->scheduler.sched_rq[1].lock);
|
||||
}
|
||||
|
||||
int preemptive()
|
||||
@@ -105,17 +106,21 @@ void idle_task(void)
|
||||
while(1);
|
||||
}
|
||||
|
||||
void sched_init_runqueues(void)
|
||||
void sched_init_runqueue(struct runqueue *rq)
|
||||
{
|
||||
for (int i = 0; i < SCHED_RQ_TOTAL; i++) {
|
||||
memset(&sched_rq[i], 0, sizeof(struct runqueue));
|
||||
link_init(&sched_rq[i].task_list);
|
||||
spin_lock_init(&sched_rq[i].lock);
|
||||
}
|
||||
memset(rq, 0, sizeof(struct runqueue));
|
||||
link_init(&rq->task_list);
|
||||
spin_lock_init(&rq->lock);
|
||||
}
|
||||
|
||||
rq_runnable = &sched_rq[0];
|
||||
rq_expired = &sched_rq[1];
|
||||
prio_total = 0;
|
||||
void sched_init(struct scheduler *scheduler)
|
||||
{
|
||||
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
|
||||
sched_init_runqueue(&scheduler->sched_rq[i]);
|
||||
|
||||
scheduler->rq_runnable = &scheduler->sched_rq[0];
|
||||
scheduler->rq_expired = &scheduler->sched_rq[1];
|
||||
scheduler->prio_total = 0;
|
||||
}
|
||||
|
||||
/* Swap runnable and expired runqueues. */
|
||||
@@ -123,13 +128,14 @@ static void sched_rq_swap_runqueues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
BUG_ON(list_empty(&rq_expired->task_list));
|
||||
BUG_ON(rq_expired->total == 0);
|
||||
BUG_ON(list_empty(&curcont->scheduler.rq_expired->task_list));
|
||||
BUG_ON(curcont->scheduler.rq_expired->total == 0);
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = rq_runnable;
|
||||
rq_runnable = rq_expired;
|
||||
rq_expired = temp;
|
||||
temp = curcont->scheduler.rq_runnable;
|
||||
curcont->scheduler.rq_runnable =
|
||||
curcont->scheduler.rq_expired;
|
||||
curcont->scheduler.rq_expired = temp;
|
||||
}
|
||||
|
||||
/* Set policy on where to add tasks in the runqueue */
|
||||
@@ -202,7 +208,9 @@ void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
sched_rq_add_task(task,
|
||||
curcont->scheduler.rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
|
||||
@@ -215,7 +223,7 @@ void sched_resume_sync(struct ktcb *task)
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
sched_rq_add_task(task, curcont->scheduler.rq_runnable, RQ_ADD_FRONT);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -228,8 +236,8 @@ void sched_suspend_sync(void)
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
prio_total -= current->priority;
|
||||
BUG_ON(prio_total <= 0);
|
||||
curcont->scheduler.prio_total -= current->priority;
|
||||
BUG_ON(curcont->scheduler.prio_total <= 0);
|
||||
preempt_enable();
|
||||
|
||||
/* Async wake up any waiters */
|
||||
@@ -243,8 +251,8 @@ void sched_suspend_async(void)
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
prio_total -= current->priority;
|
||||
BUG_ON(prio_total <= 0);
|
||||
curcont->scheduler.prio_total -= current->priority;
|
||||
BUG_ON(curcont->scheduler.prio_total <= 0);
|
||||
|
||||
/* This will make sure we yield soon */
|
||||
preempt_enable();
|
||||
@@ -338,9 +346,13 @@ void schedule()
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current, rq_runnable, RQ_ADD_BEHIND);
|
||||
sched_rq_add_task(current,
|
||||
curcont->scheduler.rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(current, rq_expired, RQ_ADD_BEHIND);
|
||||
sched_rq_add_task(current,
|
||||
curcont->scheduler.rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -352,14 +364,16 @@ void schedule()
|
||||
wake_up_task(current, WAKEUP_INTERRUPT);
|
||||
|
||||
/* Determine the next task to be run */
|
||||
if (rq_runnable->total > 0) {
|
||||
next = link_to_struct(rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
if (curcont->scheduler.rq_runnable->total > 0) {
|
||||
next = link_to_struct(
|
||||
curcont->scheduler.rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
} else {
|
||||
if (rq_expired->total > 0) {
|
||||
if (curcont->scheduler.rq_expired->total > 0) {
|
||||
sched_rq_swap_runqueues();
|
||||
next = link_to_struct(rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
next = link_to_struct(
|
||||
curcont->scheduler.rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
} else {
|
||||
idle_task();
|
||||
}
|
||||
@@ -367,7 +381,7 @@ void schedule()
|
||||
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING) {
|
||||
prio_total += next->priority;
|
||||
curcont->scheduler.prio_total += next->priority;
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
}
|
||||
|
||||
@@ -378,7 +392,7 @@ void schedule()
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, prio_total);
|
||||
sched_recalc_ticks(next, curcont->scheduler.prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
@@ -392,25 +406,11 @@ void schedule()
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise pager as runnable for first-ever scheduling,
|
||||
* and start the scheduler.
|
||||
* Start the timer and switch to current task
|
||||
* for first-ever scheduling.
|
||||
*/
|
||||
void scheduler_start()
|
||||
{
|
||||
/* Initialise runqueues */
|
||||
sched_init_runqueues();
|
||||
|
||||
/* Initialise scheduler fields of pager */
|
||||
sched_init_task(current, TASK_PRIO_PAGER);
|
||||
|
||||
/* Add task to runqueue first */
|
||||
sched_rq_add_task(current, rq_runnable, RQ_ADD_FRONT);
|
||||
|
||||
/* Give it a kick-start tick and make runnable */
|
||||
current->ticks_left = 1;
|
||||
current->state = TASK_RUNNABLE;
|
||||
|
||||
/* Start the timer and switch */
|
||||
timer_start();
|
||||
switch_to_user(current);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/api/space.h>
|
||||
@@ -16,25 +17,23 @@
|
||||
#include <l4/lib/idpool.h>
|
||||
|
||||
|
||||
static struct address_space_list address_space_list;
|
||||
|
||||
void init_address_space_list(void)
|
||||
void init_address_space_list(struct address_space_list *space_list)
|
||||
{
|
||||
memset(&address_space_list, 0, sizeof(address_space_list));
|
||||
memset(space_list, 0, sizeof(*space_list));
|
||||
|
||||
mutex_init(&address_space_list.ref_lock);
|
||||
spin_lock_init(&address_space_list.list_lock);
|
||||
link_init(&address_space_list.list);
|
||||
mutex_init(&space_list->ref_lock);
|
||||
spin_lock_init(&space_list->list_lock);
|
||||
link_init(&space_list->list);
|
||||
}
|
||||
|
||||
void address_space_reference_lock()
|
||||
{
|
||||
mutex_lock(&address_space_list.ref_lock);
|
||||
mutex_lock(&curcont->space_list.ref_lock);
|
||||
}
|
||||
|
||||
void address_space_reference_unlock()
|
||||
{
|
||||
mutex_unlock(&address_space_list.ref_lock);
|
||||
mutex_unlock(&curcont->space_list.ref_lock);
|
||||
}
|
||||
|
||||
void address_space_attach(struct ktcb *tcb, struct address_space *space)
|
||||
@@ -47,33 +46,33 @@ struct address_space *address_space_find(l4id_t spid)
|
||||
{
|
||||
struct address_space *space;
|
||||
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
list_foreach_struct(space, &address_space_list.list, list) {
|
||||
spin_lock(&curcont->space_list.list_lock);
|
||||
list_foreach_struct(space, &curcont->space_list.list, list) {
|
||||
if (space->spid == spid) {
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
return space;
|
||||
}
|
||||
}
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void address_space_add(struct address_space *space)
|
||||
{
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
spin_lock(&curcont->space_list.list_lock);
|
||||
BUG_ON(!list_empty(&space->list));
|
||||
list_insert(&space->list, &address_space_list.list);
|
||||
BUG_ON(!++address_space_list.count);
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
list_insert(&space->list, &curcont->space_list.list);
|
||||
BUG_ON(!++curcont->space_list.count);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
}
|
||||
|
||||
void address_space_remove(struct address_space *space)
|
||||
{
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
spin_lock(&curcont->space_list.list_lock);
|
||||
BUG_ON(list_empty(&space->list));
|
||||
BUG_ON(--address_space_list.count < 0);
|
||||
BUG_ON(--curcont->space_list.count < 0);
|
||||
list_remove_init(&space->list);
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
}
|
||||
|
||||
/* Assumes address space reflock is already held */
|
||||
@@ -98,12 +97,12 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
int err;
|
||||
|
||||
/* Allocate space structure */
|
||||
if (!(space = kzalloc(sizeof(*space))))
|
||||
if (!(space = alloc_space()))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
/* Allocate pgd */
|
||||
if (!(pgd = alloc_pgd())) {
|
||||
kfree(space);
|
||||
free_space(space);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -120,7 +119,7 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
* is not allowed since spid field is used to indicate the space to
|
||||
* copy from.
|
||||
*/
|
||||
space->spid = id_new(space_id_pool);
|
||||
space->spid = id_new(&kernel_container.space_ids);
|
||||
|
||||
/* If an original space is supplied */
|
||||
if (orig) {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
@@ -20,17 +21,18 @@ struct id_pool *thread_id_pool;
|
||||
struct id_pool *space_id_pool;
|
||||
|
||||
|
||||
static struct ktcb_list ktcb_list;
|
||||
|
||||
void init_ktcb_list(void)
|
||||
void init_ktcb_list(struct ktcb_list *ktcb_list)
|
||||
{
|
||||
memset(&ktcb_list, 0, sizeof(ktcb_list));
|
||||
spin_lock_init(&ktcb_list.list_lock);
|
||||
link_init(&ktcb_list.list);
|
||||
memset(ktcb_list, 0, sizeof(*ktcb_list));
|
||||
spin_lock_init(&ktcb_list->list_lock);
|
||||
link_init(&ktcb_list->list);
|
||||
}
|
||||
|
||||
void tcb_init(struct ktcb *new)
|
||||
{
|
||||
new->tid = id_new(&kernel_container.ktcb_ids);
|
||||
new->tgid = new->tid;
|
||||
|
||||
link_init(&new->task_list);
|
||||
mutex_init(&new->thread_control_lock);
|
||||
|
||||
@@ -46,7 +48,7 @@ void tcb_init(struct ktcb *new)
|
||||
|
||||
struct ktcb *tcb_alloc(void)
|
||||
{
|
||||
return zalloc_page();
|
||||
return alloc_ktcb();
|
||||
}
|
||||
|
||||
struct ktcb *tcb_alloc_init(void)
|
||||
@@ -93,21 +95,21 @@ void tcb_delete(struct ktcb *tcb)
|
||||
id_del(thread_id_pool, tcb->tid);
|
||||
|
||||
/* Free the tcb */
|
||||
free_page(tcb);
|
||||
free_ktcb(tcb);
|
||||
}
|
||||
|
||||
struct ktcb *tcb_find_by_space(l4id_t spid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &ktcb_list.list, task_list) {
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
|
||||
if (task->space->spid == spid) {
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -115,33 +117,33 @@ struct ktcb *tcb_find(l4id_t tid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &ktcb_list.list, task_list) {
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
|
||||
if (task->tid == tid) {
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tcb_add(struct ktcb *new)
|
||||
{
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
BUG_ON(!list_empty(&new->task_list));
|
||||
BUG_ON(!++ktcb_list.count);
|
||||
list_insert(&new->task_list, &ktcb_list.list);
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
BUG_ON(!++curcont->ktcb_list.count);
|
||||
list_insert(&new->task_list, &curcont->ktcb_list.list);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
void tcb_remove(struct ktcb *new)
|
||||
{
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
BUG_ON(list_empty(&new->task_list));
|
||||
BUG_ON(--ktcb_list.count < 0);
|
||||
BUG_ON(--curcont->ktcb_list.count < 0);
|
||||
list_remove_init(&new->task_list);
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
/* Offsets for ktcb fields that are accessed from assembler */
|
||||
|
||||
Reference in New Issue
Block a user