Simplified/Cleaned up pager initialization in the kernel

This commit is contained in:
Bahadir Balban
2009-10-20 22:20:37 +03:00
parent 224b531de5
commit d2fee25924
3 changed files with 75 additions and 299 deletions

View File

@@ -47,70 +47,6 @@ void kres_insert_container(struct container *c,
kres->containers.ncont++;
}
/*
* FIXME: Remove completely. Need to be done in userspace.
* This searches pager capabilities and if it has a virtual memory area
* defined as a UTCB, uses the first entry as its utcb. If not, it is
* not an error, perhaps another pager will map its utcb.
*/
void pager_init_utcb(struct ktcb *task, struct pager *pager)
{
struct capability *cap;
/* Find a virtual memory capability with UTCB map permissions */
list_foreach_struct(cap, &pager->cap_list.caps, list) {
if (cap_rtype(cap) == CAP_RTYPE_VIRTMEM &&
cap->access & CAP_MAP_UTCB) {
/* Use first address slot as pager's utcb */
task->utcb_address = __pfn_to_addr(cap->start);
break;
}
}
}
#if 0
/*
* NOTE: This is not useful because FP stores references to
* old stack so even if stacks are copied, unwinding is not
* possible, which makes copying pointless. If there was no
* FP, it may make sense, but this is not tested.
*
* Copy current stack contents to new one,
* and jump to that stack by modifying sp and frame pointer.
*/
int switch_stacks(struct ktcb *task)
{
volatile register unsigned int stack asm("sp");
volatile register unsigned int frameptr asm("fp");
volatile register unsigned int newstack;
unsigned int stack_size = (unsigned int)_bootstack - stack;
newstack = align((unsigned long)task + PAGE_SIZE - 1,
STACK_ALIGNMENT);
/* Copy stack contents to new stack */
memcpy((void *)(newstack - stack_size),
(void *)stack, stack_size);
/*
* Switch to new stack, as new stack
* minus currently used stack size
*/
stack = newstack - stack_size;
/*
* Frame ptr is new stack minus the original
* difference from start of boot stack to current fptr
*/
frameptr = newstack -
((unsigned int)_bootstack - frameptr);
/* We should be able to return safely */
return 0;
}
#endif
/*
* TODO:
*
@@ -120,48 +56,57 @@ int switch_stacks(struct ktcb *task)
*/
/*
* Inspects pager parameters defined in the container,
* and sets up an execution environment for the pager.
*
* This involves setting up pager's ktcb, space, utcb,
* all ids, registers, and mapping its (perhaps) first
* few pages in order to make it runnable.
*
* The first pager initialization is a special-case
* since it uses the current kernel pgd.
*/
int init_first_pager(struct pager *pager,
int init_pager(struct pager *pager,
struct container *cont,
pgd_table_t *current_pgd)
{
struct ktcb *task;
struct address_space *space;
int first = !!current_pgd;
/*
* Initialize dummy current capability list pointer
* so that capability accounting can be done as normal
*
* FYI: We're still on bootstack instead of current's
* real stack. Hence this is a dummy.
*/
current->cap_list_ptr = &pager->cap_list;
/*
* Find capability from pager's list, since
* there is no ktcb, no standard path to check
* per-task capability list yet.
*/
/* Use it to allocate ktcb */
/* New ktcb allocation is needed */
task = tcb_alloc_init();
/* If first, manually allocate/initalize space */
if (first) {
if (!(space = alloc_space())) {
return -ENOMEM;
}
/* Set up space id */
space->spid = id_new(&kernel_resources.space_ids);
/* Initialize space structure */
link_init(&space->list);
mutex_init(&space->lock);
space->pgd = current_pgd;
address_space_attach(task, space);
} else {
/* Otherwise allocate conventionally */
task->space = address_space_create(0);
}
/* Initialize ktcb */
task_init_registers(task, pager->start_vma);
pager_init_utcb(task, pager);
/* Allocate space structure */
if (!(space = alloc_space()))
return -ENOMEM;
/* Set up space id */
space->spid = id_new(&kernel_resources.space_ids);
/* Initialize space structure */
link_init(&space->list);
mutex_init(&space->lock);
space->pgd = current_pgd;
address_space_attach(task, space);
/* Initialize container/pager relationships */
pager->tcb = task;
task->pager = pager;
@@ -192,64 +137,6 @@ int init_first_pager(struct pager *pager,
return 0;
}
/*
* Inspects pager parameters defined in the container,
* and sets up an execution environment for the pager.
*
* This involves setting up pager's ktcb, space, utcb,
* all ids, registers, and mapping its (perhaps) first
* few pages in order to make it runnable.
*/
int init_pager(struct pager *pager, struct container *cont)
{
struct ktcb *task;
/*
* Initialize dummy current capability list pointer
* so that capability accounting can be done as normal
* FYI: We're still on bootstack instead of current's
* real stack. Hence this is a dummy.
*/
current->cap_list_ptr = &pager->cap_list;
/* Use it to allocate ktcb */
task = tcb_alloc_init();
task_init_registers(task, pager->start_vma);
pager_init_utcb(task, pager);
task->space = address_space_create(0);
/* Initialize container/pager relationships */
pager->tcb = task;
task->pager = pager;
task->container = cont;
task->pagerid = task->tid;
task->cap_list_ptr = &pager->cap_list;
printk("%s: Mapping %lu pages from 0x%lx to 0x%lx for %s\n",
__KERNELNAME__, __pfn(page_align_up(pager->memsize)),
pager->start_lma, pager->start_vma, cont->name);
add_mapping_pgd(pager->start_lma, pager->start_vma,
page_align_up(pager->memsize),
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
/* Initialize task scheduler parameters */
sched_init_task(task, TASK_PRIO_PAGER);
/* Give it a kick-start tick and make runnable */
task->ticks_left = 1;
sched_resume_async(task);
/* Container list that keeps all tasks */
tcb_add(task);
return 0;
}
/*
* Initialize all containers with their initial set of tasks,
* spaces, scheduler parameters such that they can be started.
@@ -259,19 +146,16 @@ int container_init_pagers(struct kernel_resources *kres,
{
struct container *cont;
struct pager *pager;
int pgidx = 0;
list_foreach_struct(cont, &kres->containers.list, list) {
for (int i = 0; i < cont->npagers; i++) {
pager = &cont->pager[i];
/* First pager initializes specially */
if (pgidx == 0)
init_first_pager(pager, cont,
current_pgd);
if (i == 0)
init_pager(pager, cont, current_pgd);
else
init_pager(pager, cont);
pgidx++;
init_pager(pager, cont, 0);
}
}

View File

@@ -477,17 +477,6 @@ int copy_container_info(struct container *c, struct container_info *cinfo)
return 0;
}
/*
* TODO:
*
* Rearrange as follows:
* 1.) Move realloc_page_tables to before container_init_pagers()
* 2.) Set up dummy current cap_list_ptr right after real capability list
* has been created. -> Think! since there are many containers!!!!!!!
* 3.) At this point, no need to do alloc_boot_pmd(), and current->cap_list_ptr
* is valid, so no custom alloc functions.
*/
/*
* Create real containers from compile-time created cinfo structures
*/

View File

@@ -45,26 +45,6 @@ void init_kernel_mappings(void)
add_section_mapping_init(align(virt_to_phys(_start_text),SZ_1MB),
align(virt_to_phys(_start_text),SZ_1MB),
1, 0);
#if 0
/* Map page table to its virtual region */
add_section_mapping_init(virt_to_phys(_start_kspace),
(unsigned int)_start_kspace,
1, 0);
/* Clean current before first time access. */
memset(current, 0, sizeof(struct ktcb));
/*
* We are currently on the bootstack. End of bootstack would
* eventually become the ktcb of the first pager. We use a
* statically allocated address_space structure for the pager.
*/
current->space = &pager_space;
/* Access physical address of pager_space to assign with initial pgd */
((struct address_space *)virt_to_phys(current->space))->pgd = &init_pgd;
#endif
}
void print_sections(void)
@@ -171,6 +151,9 @@ void kip_init()
{
struct utcb **utcb_ref;
/*
* TODO: Adding utcb size might be useful
*/
memset(&kip, 0, PAGE_SIZE);
memcpy(&kip, "L4\230K", 4); /* Name field = l4uK */
kip.api_version = 0xBB;
@@ -179,7 +162,6 @@ void kip_init()
kip.kdesc.magic = 0xBBB;
kip.kdesc.version = CODEZERO_VERSION;
kip.kdesc.subversion = CODEZERO_SUBVERSION;
// kip.kdesc.gendate = (__YEAR__ << 9)|(__MONTH__ << 5)|(__DAY__);
strncpy(kip.kdesc.date, __DATE__, KDESC_DATE_SIZE);
strncpy(kip.kdesc.time, __TIME__, KDESC_TIME_SIZE);
@@ -254,111 +236,11 @@ void switch_to_user(struct ktcb *task)
jump(task);
}
#if 0
/*
* Initialize the pager in the system.
*
* The pager uses the bootstack as its ktcb, the initial kspace as its pgd,
* (kernel pmds are shared among all tasks) and a statically allocated
* pager_space struct for its space structure.
*/
void init_pager(char *name, struct task_ids *ids)
{
struct svc_image *taskimg = 0;
struct ktcb *task;
int task_pages;
BUG_ON(strcmp(name, __PAGERNAME__));
task = current;
tcb_init(task);
/*
* Search the compile-time generated boot descriptor for
* information on available task images.
*/
for (int i = 0; i < bootdesc->total_images; i++) {
if (!strcmp(name, bootdesc->images[i].name)) {
taskimg = &bootdesc->images[i];
break;
}
}
BUG_ON(!taskimg);
if (taskimg->phys_start & PAGE_MASK)
printk("Warning, image start address not page aligned.\n");
/* Calculate the number of pages the task sections occupy. */
task_pages = __pfn((page_align_up(taskimg->phys_end) -
page_align(taskimg->phys_start)));
task->context.pc = INITTASK_AREA_START;
/* Stack starts one page above the end of image. */
task->context.sp = INITTASK_AREA_END - 8;
task->context.spsr = ARM_MODE_USR;
set_task_ids(task, ids);
/* Pager gets first UTCB area available by default */
task->utcb_address = UTCB_AREA_START;
BUG_ON(!TASK_PGD(task));
/*
* This task's userspace mapping. This should allocate a new pmd, if not
* existing, and a new page entry on its private pgd.
*/
add_mapping_pgd(taskimg->phys_start, INITTASK_AREA_START,
task_pages * PAGE_SIZE, MAP_USR_DEFAULT_FLAGS,
TASK_PGD(task));
//printk("Mapping %d pages from 0x%x to 0x%x for %s\n", task_pages,
// taskimg->phys_start, INITTASK_AREA_START, name);
/* Add the physical pages used by the task to the page map */
set_page_map(taskimg->phys_start, task_pages, 1);
/* Task's rendezvous point */
waitqueue_head_init(&task->wqh_send);
waitqueue_head_init(&task->wqh_recv);
waitqueue_head_init(&task->wqh_pager);
/* Global hashlist that keeps all existing tasks */
tcb_add(task);
/* Scheduler initialises the very first task itself */
}
void init_tasks()
{
struct task_ids ids;
/* Initialise thread and space id pools */
thread_id_pool = id_pool_new_init(THREAD_IDS_MAX);
space_id_pool = id_pool_new_init(SPACE_IDS_MAX);
ids.tid = id_new(thread_id_pool);
ids.spid = id_new(space_id_pool);
ids.tgid = ids.tid;
/* Initialise the global task and address space lists */
//init_ktcb_list();
//init_address_space_list();
//init_mutex_queue_head();
printk("%s: Initialized. Starting %s as pager.\n",
__KERNELNAME__, __PAGERNAME__);
/*
* This must come last so that other tasks can copy its pgd before it
* modifies it for its own specifics.
*/
// init_pager(__PAGERNAME__, &ids);
}
#endif
void setup_dummy_current()
{
/*
* Initialize the beginning of last page of
* stack as the current ktcb
* Temporarily iInitialize the beginning of
* last page of stack as the current ktcb
*/
memset(current, 0, sizeof(struct ktcb));
@@ -388,7 +270,7 @@ void init_finalize(struct kernel_resources *kres)
/* Switch to new stack */
stack = newstack;
/* -- Point of no stack unwinding/referencing -- */
/* -- Point of no stack unwinding -- */
/*
* Unmap boot memory, and add it as
@@ -411,39 +293,54 @@ void init_finalize(struct kernel_resources *kres)
* Start the scheduler, jumping to task
*/
scheduler_start();
/* FIXME: Make sure SP_SVC is reset in jump() */
}
void start_kernel(void)
{
printascii("\n"__KERNELNAME__": start kernel...\n");
/* Print section boundaries for kernel image */
// print_sections();
/* Initialise section mappings for the kernel area */
/*
* Initialise section mappings
* for the kernel area
*/
init_kernel_mappings();
/* Enable virtual memory and jump to virtual addresses */
/*
* Enable virtual memory
* and jump to virtual addresses
*/
start_vm();
/* Set up a dummy current ktcb on boot stack with initial pgd */
/*
* Set up a dummy current ktcb on
* boot stack with initial pgd
*/
setup_dummy_current();
/* Initialise platform-specific page mappings, and peripherals */
/*
* Initialise platform-specific
* page mappings, and peripherals
*/
platform_init();
printk("%s: Virtual memory enabled.\n", __KERNELNAME__);
/* Can only print when uart is mapped */
printk("%s: Virtual memory enabled.\n",
__KERNELNAME__);
/* Map and enable high vector page. Faults can be handled after here. */
/*
* Map and enable high vector page.
* Faults can be handled after here.
*/
vectors_init();
/* Remap 1MB kernel sections as 4Kb pages. */
remap_as_pages((void *)page_align(_start_kernel),
(void *)page_align_up(_end_kernel));
/* Initialise kip and map for userspace access */
/*
* Initialise kip and map
* for userspace access
*/
kip_init();
/* Initialise system call page */
@@ -452,10 +349,16 @@ void start_kernel(void)
/* Init scheduler */
sched_init(&scheduler);
/* Evaluate system resources and set up resource pools */
/*
* Evaluate system resources
* and set up resource pools
*/
init_system_resources(&kernel_resources);
/* Free boot memory, jump to first task's stack and start scheduler */
/*
* Free boot memory, switch to first
* task's stack and start scheduler
*/
init_finalize(&kernel_resources);
BUG();