Container/Pager/Capability initialization that works.

Need to safely free boot memory and jump to first task's stack.
Need to test scheduler and all syscall entries.
This commit is contained in:
Bahadir Balban
2009-08-04 13:56:11 +03:00
parent 7e8845abf8
commit 519bfba8c7
12 changed files with 150 additions and 126 deletions

View File

@@ -72,11 +72,12 @@ SECTIONS
{ {
. = ALIGN(16K); /* For initial pgd */ . = ALIGN(16K); /* For initial pgd */
*(.init.pgd) *(.init.pgd)
*(.init.data)
*(.init.bootmem) *(.init.bootmem)
*(.init.data)
} }
/* Space for boot stack */ /* Space for boot stack */
. += 0x1000; . += 0x1000;
. = ALIGN(4K); /* A page aligned stack of at least 4KB */
_end_init = .; _end_init = .;
_bootstack = .; _bootstack = .;
_end_kernel = .; _end_kernel = .;

View File

@@ -69,6 +69,20 @@ struct cap_list {
void capability_init(struct capability *cap); void capability_init(struct capability *cap);
struct capability *capability_create(void); struct capability *capability_create(void);
static inline void cap_list_init(struct cap_list *clist)
{
clist->ncaps = 0;
link_init(&clist->caps);
}
static inline void cap_list_insert(struct capability *cap,
struct cap_list *clist)
{
list_insert(&cap->list, &clist->caps);
clist->ncaps++;
}
#if 0 #if 0
/* Virtual memory space allocated to container */ /* Virtual memory space allocated to container */
struct capability cap_virtmap = { struct capability cap_virtmap = {

View File

@@ -35,6 +35,13 @@ struct container_head {
struct link list; struct link list;
}; };
static inline void
container_head_init(struct container_head *chead)
{
chead->ncont = 0;
link_init(&chead->list);
}
/* /*
* Everything on the platform is described and stored * Everything on the platform is described and stored
* in the structure below. * in the structure below.

View File

@@ -2,8 +2,11 @@
#define __ARM_GLUE_INIT_H__ #define __ARM_GLUE_INIT_H__
#include <l4/generic/tcb.h> #include <l4/generic/tcb.h>
#include <l4/generic/space.h>
void switch_to_user(struct ktcb *inittask); void switch_to_user(struct ktcb *inittask);
void timer_start(void); void timer_start(void);
extern struct address_space init_space;
#endif /* __ARM_GLUE_INIT_H__ */ #endif /* __ARM_GLUE_INIT_H__ */

View File

@@ -8,6 +8,7 @@
#include INC_ARCH(linker.h) #include INC_ARCH(linker.h)
#include INC_GLUE(memory.h) #include INC_GLUE(memory.h)
#include <l4/lib/printk.h> #include <l4/lib/printk.h>
#include <l4/generic/space.h>
/* All memory allocated here is discarded after boot */ /* All memory allocated here is discarded after boot */
@@ -15,6 +16,7 @@
SECTION(".init.pgd") pgd_table_t init_pgd; SECTION(".init.pgd") pgd_table_t init_pgd;
SECTION(".init.bootmem") char bootmem[BOOTMEM_SIZE]; SECTION(".init.bootmem") char bootmem[BOOTMEM_SIZE];
SECTION(".init.data") struct address_space init_space;
static unsigned long cursor = (unsigned long)&bootmem; static unsigned long cursor = (unsigned long)&bootmem;

View File

@@ -144,6 +144,10 @@ int container_init(struct container *c)
init_ktcb_list(&c->ktcb_list); init_ktcb_list(&c->ktcb_list);
init_mutex_queue_head(&c->mutex_queue_head); init_mutex_queue_head(&c->mutex_queue_head);
/* Ini pager structs */
for (int i = 0; i < CONFIG_MAX_PAGERS_USED; i++) {
cap_list_init(&c->pager[i].cap_list);
}
/* Init scheduler */ /* Init scheduler */
sched_init(&c->scheduler); sched_init(&c->scheduler);
@@ -166,6 +170,11 @@ void kcont_insert_container(struct container *c,
kcont->containers.ncont++; kcont->containers.ncont++;
} }
/*
* This searches pager capabilities and if it has a virtual memory area
* defined as a UTCB, uses the first entry as its utcb. If not, it is
* not an error, perhaps another pager will map its utcb.
*/
void task_setup_utcb(struct ktcb *task, struct pager *pager) void task_setup_utcb(struct ktcb *task, struct pager *pager)
{ {
struct capability *cap; struct capability *cap;
@@ -177,10 +186,30 @@ void task_setup_utcb(struct ktcb *task, struct pager *pager)
(cap->access & CAP_MAP_UTCB)) { (cap->access & CAP_MAP_UTCB)) {
/* Use first address slot as pager's utcb */ /* Use first address slot as pager's utcb */
task->utcb_address = __pfn_to_addr(cap->start); task->utcb_address = __pfn_to_addr(cap->start);
return;
} }
} }
} }
#if 0
void switch_stack(struct ktcb *task)
{
register u32 stack asm("sp");
register u32 fp asm("fp");
register u32 newstack = align((unsigned long)task + PAGE_SIZE, sizeof(short));
signed long long offset = newstack - __bootstack;
fp += offset;
sp += offset;
/* Copy stack contents to new stack */
memcpy(&newstack, __bootstack, stack - __bootstack);
/* Switch to new stack */
}
#endif
/* /*
* TODO: * TODO:
* *

View File

@@ -85,18 +85,6 @@ void free_user_mutex(void *addr)
BUG_ON(mem_cache_free(kernel_container.mutex_cache, addr) < 0); BUG_ON(mem_cache_free(kernel_container.mutex_cache, addr) < 0);
} }
void cap_list_init(struct cap_list *clist)
{
clist->ncaps = 0;
link_init(&clist->caps);
}
void cap_list_insert(struct capability *cap, struct cap_list *clist)
{
list_insert(&cap->list, &clist->caps);
clist->ncaps++;
}
/* /*
* This splits a capability, splitter region must be in * This splits a capability, splitter region must be in
* the *middle* of original capability * the *middle* of original capability
@@ -252,6 +240,9 @@ void init_kernel_container(struct kernel_container *kcont)
kcont->mutex_ids.nwords = SYSTEM_IDS_MAX; kcont->mutex_ids.nwords = SYSTEM_IDS_MAX;
kcont->capability_ids.nwords = SYSTEM_IDS_MAX; kcont->capability_ids.nwords = SYSTEM_IDS_MAX;
/* Initialize container head */
container_head_init(&kcont->containers);
/* Get first container id for itself */ /* Get first container id for itself */
kcont->cid = id_new(&kcont->container_ids); kcont->cid = id_new(&kcont->container_ids);
@@ -305,9 +296,9 @@ int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
struct capability *cap; struct capability *cap;
struct cap_info *cap_info; struct cap_info *cap_info;
pager->start_lma = pinfo->pager_lma; pager->start_lma = __pfn_to_addr(pinfo->pager_lma);
pager->start_vma = pinfo->pager_vma; pager->start_vma = __pfn_to_addr(pinfo->pager_vma);
pager->memsize = pinfo->pager_size; pager->memsize = __pfn_to_addr(pinfo->pager_size);
/* Copy all cinfo structures into real capabilities */ /* Copy all cinfo structures into real capabilities */
for (int i = 0; i < pinfo->ncaps; i++) { for (int i = 0; i < pinfo->ncaps; i++) {
@@ -353,6 +344,10 @@ void setup_containers(struct boot_resources *bootres,
/* /*
* Move to real page tables, accounted by * Move to real page tables, accounted by
* pgds and pmds provided from the caches * pgds and pmds provided from the caches
*
* We do not want to delay this too much,
* since we want to avoid allocating an uncertain
* amount of memory from the boot allocators.
*/ */
current_pgd = realloc_page_tables(); current_pgd = realloc_page_tables();
@@ -372,10 +367,51 @@ void setup_containers(struct boot_resources *bootres,
container_init_pagers(kcont, current_pgd); container_init_pagers(kcont, current_pgd);
} }
void setup_capabilities(struct boot_resources *bootres, /*
struct kernel_container *kcont) * Copy boot-time allocated kernel capabilities to ones that
* are allocated from the capability memcache
*/
void copy_boot_capabilities(struct cap_list *caplist)
{ {
struct capability *bootcap, *n, *realcap;
/* For every bootmem-allocated capability */
list_foreach_removable_struct(bootcap, n,
&caplist->caps,
list) {
/* Create new one from capability cache */
realcap = capability_create();
/* Copy all fields except id to real */
realcap->owner = bootcap->owner;
realcap->resid = bootcap->resid;
realcap->type = bootcap->type;
realcap->access = bootcap->access;
realcap->start = bootcap->start;
realcap->end = bootcap->end;
/* Unlink boot one */
list_remove(&bootcap->list);
/* Add real one to head */
list_insert(&realcap->list,
&caplist->caps);
}
}
/*
* Creates capabilities allocated with a real id, and from the
* capability cache, in place of ones allocated at boot-time.
*/
void kcont_setup_capabilities(struct boot_resources *bootres,
struct kernel_container *kcont)
{
copy_boot_capabilities(&kcont->physmem_used);
copy_boot_capabilities(&kcont->physmem_free);
copy_boot_capabilities(&kcont->virtmem_used);
copy_boot_capabilities(&kcont->virtmem_free);
copy_boot_capabilities(&kcont->devmem_used);
copy_boot_capabilities(&kcont->devmem_free);
} }
/* /*
@@ -412,9 +448,9 @@ struct mem_cache *init_resource_cache(int nstruct, int struct_size,
page_align_up(bufsize), page_align_up(bufsize),
MAP_SVC_RW_FLAGS); MAP_SVC_RW_FLAGS);
} else { } else {
add_mapping(__pfn_to_addr(cap->start), add_mapping_pgd(__pfn_to_addr(cap->start),
virtual, page_align_up(bufsize), virtual, page_align_up(bufsize),
MAP_SVC_RW_FLAGS); MAP_SVC_RW_FLAGS, &init_pgd);
} }
/* Unmap area from memcap */ /* Unmap area from memcap */
memcap_unmap_range(cap, &kcont->physmem_free, memcap_unmap_range(cap, &kcont->physmem_free,
@@ -425,7 +461,7 @@ struct mem_cache *init_resource_cache(int nstruct, int struct_size,
/* Initialize the cache */ /* Initialize the cache */
return mem_cache_init((void *)virtual, bufsize, return mem_cache_init((void *)virtual, bufsize,
struct_size, 1); struct_size, aligned);
} }
} }
return 0; return 0;
@@ -602,6 +638,7 @@ int setup_boot_resources(struct boot_resources *bootres,
return 0; return 0;
} }
/* /*
* FIXME: Add error handling * FIXME: Add error handling
* *
@@ -612,6 +649,7 @@ int setup_boot_resources(struct boot_resources *bootres,
*/ */
int init_system_resources(struct kernel_container *kcont) int init_system_resources(struct kernel_container *kcont)
{ {
/* FIXME: Count kernel resources */
struct boot_resources bootres; struct boot_resources bootres;
memset(&bootres, 0, sizeof(bootres)); memset(&bootres, 0, sizeof(bootres));
@@ -623,8 +661,8 @@ int init_system_resources(struct kernel_container *kcont)
/* Create system containers */ /* Create system containers */
setup_containers(&bootres, kcont); setup_containers(&bootres, kcont);
/* Create capabilities */ /* Create real capabilities */
setup_capabilities(&bootres, kcont); kcont_setup_capabilities(&bootres, kcont);
return 0; return 0;
} }

View File

@@ -209,7 +209,7 @@ void sched_resume_sync(struct ktcb *task)
BUG_ON(task == current); BUG_ON(task == current);
task->state = TASK_RUNNABLE; task->state = TASK_RUNNABLE;
sched_rq_add_task(task, sched_rq_add_task(task,
curcont->scheduler.rq_runnable, task->container->scheduler.rq_runnable,
RQ_ADD_FRONT); RQ_ADD_FRONT);
schedule(); schedule();
} }
@@ -223,7 +223,9 @@ void sched_resume_sync(struct ktcb *task)
void sched_resume_async(struct ktcb *task) void sched_resume_async(struct ktcb *task)
{ {
task->state = TASK_RUNNABLE; task->state = TASK_RUNNABLE;
sched_rq_add_task(task, curcont->scheduler.rq_runnable, RQ_ADD_FRONT); sched_rq_add_task(task,
task->container->scheduler.rq_runnable,
RQ_ADD_FRONT);
} }
/* /*

View File

@@ -130,11 +130,13 @@ struct ktcb *tcb_find(l4id_t tid)
void tcb_add(struct ktcb *new) void tcb_add(struct ktcb *new)
{ {
spin_lock(&curcont->ktcb_list.list_lock); struct container *c = new->container;
spin_lock(&c->ktcb_list.list_lock);
BUG_ON(!list_empty(&new->task_list)); BUG_ON(!list_empty(&new->task_list));
BUG_ON(!++curcont->ktcb_list.count); BUG_ON(!++c->ktcb_list.count);
list_insert(&new->task_list, &curcont->ktcb_list.list); list_insert(&new->task_list, &c->ktcb_list.list);
spin_unlock(&curcont->ktcb_list.list_lock); spin_unlock(&c->ktcb_list.list_lock);
} }
void tcb_remove(struct ktcb *new) void tcb_remove(struct ktcb *new)

View File

@@ -25,6 +25,7 @@
#include INC_GLUE(memory.h) #include INC_GLUE(memory.h)
#include INC_GLUE(message.h) #include INC_GLUE(message.h)
#include INC_GLUE(syscall.h) #include INC_GLUE(syscall.h)
#include INC_GLUE(init.h)
#include INC_PLAT(platform.h) #include INC_PLAT(platform.h)
#include INC_PLAT(printascii.h) #include INC_PLAT(printascii.h)
#include INC_API(syscall.h) #include INC_API(syscall.h)
@@ -193,8 +194,8 @@ void kip_init()
/* All thread utcbs are allocated starting from UTCB_AREA_START */ /* All thread utcbs are allocated starting from UTCB_AREA_START */
*utcb_ref = (struct utcb *)UTCB_AREA_START; *utcb_ref = (struct utcb *)UTCB_AREA_START;
add_mapping(virt_to_phys(&kip), USER_KIP_PAGE, PAGE_SIZE, add_boot_mapping(virt_to_phys(&kip), USER_KIP_PAGE, PAGE_SIZE,
MAP_USR_RO_FLAGS); MAP_USR_RO_FLAGS);
printk("%s: Kernel built on %s, %s\n", __KERNELNAME__, printk("%s: Kernel built on %s, %s\n", __KERNELNAME__,
kip.kdesc.date, kip.kdesc.time); kip.kdesc.date, kip.kdesc.time);
} }
@@ -357,6 +358,18 @@ void init_tasks()
// init_pager(__PAGERNAME__, &ids); // init_pager(__PAGERNAME__, &ids);
} }
void setup_dummy_current()
{
/*
* Initialize the beginning of last page of
* stack as the current ktcb
*/
memset(current, 0, sizeof(struct ktcb));
current->space = &init_space;
TASK_PGD(current) = &init_pgd;
}
void start_kernel(void) void start_kernel(void)
{ {
printascii("\n"__KERNELNAME__": start kernel...\n"); printascii("\n"__KERNELNAME__": start kernel...\n");
@@ -370,6 +383,9 @@ void start_kernel(void)
/* Enable virtual memory and jump to virtual addresses */ /* Enable virtual memory and jump to virtual addresses */
start_vm(); start_vm();
/* Set up a dummy current ktcb on boot stack with initial pgd */
setup_dummy_current();
/* Initialise platform-specific page mappings, and peripherals */ /* Initialise platform-specific page mappings, and peripherals */
platform_init(); platform_init();
@@ -379,7 +395,8 @@ void start_kernel(void)
vectors_init(); vectors_init();
/* Remap 1MB kernel sections as 4Kb pages. */ /* Remap 1MB kernel sections as 4Kb pages. */
remap_as_pages((void *)page_align(_start_kernel), (void *)page_align_up(_end_kernel)); remap_as_pages((void *)page_align(_start_kernel),
(void *)page_align_up(_end_kernel));
/* Move the initial pgd into a more convenient place, mapped as pages. */ /* Move the initial pgd into a more convenient place, mapped as pages. */
// relocate_page_tables(); // relocate_page_tables();

View File

@@ -135,8 +135,8 @@ void syscall_init()
syscall_table[sys_time_offset >> 2] = (syscall_fn_t)arch_sys_time; syscall_table[sys_time_offset >> 2] = (syscall_fn_t)arch_sys_time;
syscall_table[sys_mutex_control_offset >> 2] = (syscall_fn_t)arch_sys_mutex_control; syscall_table[sys_mutex_control_offset >> 2] = (syscall_fn_t)arch_sys_mutex_control;
add_mapping(virt_to_phys(&__syscall_page_start), add_boot_mapping(virt_to_phys(&__syscall_page_start),
ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS); ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RO_FLAGS);
} }
/* Checks a syscall is legitimate and dispatches to appropriate handler. */ /* Checks a syscall is legitimate and dispatches to appropriate handler. */

View File

@@ -126,97 +126,6 @@ int mem_cache_bufsize(void *start, int struct_size, int nstructs, int aligned)
return start_address - (unsigned long)start; return start_address - (unsigned long)start;
} }
#if 0
struct mem_cache *mem_cache_init(void *bufstart,
int cache_size,
int struct_size,
unsigned int aligned)
{
/* Align to nearest word boundary */
void *start;
struct mem_cache *cache;
unsigned int area_start;
unsigned int *bitmap;
int bwords_in_structs;
int bwords;
int total;
int bsize;
start = (void *)align_up(bufstart, sizeof(int));
cache_size -= (int)start - (int)bufstart;
cache = start;
if ((struct_size < 0) || (cache_size < 0) ||
((unsigned long)start == ~(0))) {
printk("Invalid parameters.\n");
return 0;
}
/*
* The cache definition itself is at the beginning.
* Skipping it to get to start of free memory. i.e. the cache.
*/
area_start = (unsigned long)start + sizeof(struct mem_cache);
cache_size -= sizeof(struct mem_cache);
if (cache_size < struct_size) {
printk("Cache too small for given struct_size\n");
return 0;
}
/* Get how much bitmap words occupy */
total = cache_size / struct_size;
bwords = total >> 5; /* Divide by 32 */
if (total & 0x1F) { /* Remainder? */
bwords++; /* Add one more word for remainder */
}
bsize = bwords * 4;
/* This many structures will be chucked from cache for bitmap space */
bwords_in_structs = ((bsize) / struct_size) + 1;
/* Total structs left after deducing bitmaps */
total = total - bwords_in_structs;
cache_size -= bsize;
/* This should always catch too small caches */
if (total <= 0) {
printk("Cache too small for given struct_size\n");
return 0;
}
if (cache_size <= 0) {
printk("Cache too small for given struct_size\n");
return 0;
}
bitmap = (unsigned int *)area_start;
area_start = (unsigned int)(bitmap + bwords);
if (aligned) {
unsigned int addr = area_start;
unsigned int addr_aligned = align_up(area_start, struct_size);
unsigned int diff = addr_aligned - addr;
BUG_ON(diff >= struct_size);
if (diff)
total--;
cache_size -= diff;
area_start = addr_aligned;
}
link_init(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;
cache->free = cache->total;
cache->struct_size = struct_size;
cache->bitmap = bitmap;
mutex_init(&cache->mutex);
memset(cache->bitmap, 0, bwords*SZ_WORD);
return cache;
}
#endif
struct mem_cache *mem_cache_init(void *bufstart, int cache_size, struct mem_cache *mem_cache_init(void *bufstart, int cache_size,
int struct_size, unsigned int aligned) int struct_size, unsigned int aligned)
{ {