mirror of
https://github.com/drasko/codezero.git
synced 2026-01-11 18:33:16 +01:00
New UTCB implementation almost working.
- KIP's pointer to UTCB seems to work with existing l4lib ipc functions. - Works up to clone() - In clone we mmap() the same UTCB on each new thread - excessive. - Generally during page fault handling, cloned threads may fault on the same page multiple times even though a single handling would be enough for all of them. Need to detect and handle this.
This commit is contained in:
@@ -46,5 +46,5 @@ context switch, a private page is allocated and mapped by the pager, but also
|
||||
the UTCB pointer is updated to point at an offset in this page. As an example,
|
||||
if a UTCB is sized 1/4th of a page, a single page is used by 4 UTCBs. This way,
|
||||
the pager needs to manage 4 entries per-private page, utcbs utilise page memory
|
||||
better, and there is no need for a fixed table of utcbs per address space.
|
||||
fully, and there is no need for a fixed table of utcbs per address space.
|
||||
|
||||
|
||||
@@ -20,8 +20,7 @@ struct exregs_data {
|
||||
u32 valid_vect;
|
||||
u32 flags;
|
||||
l4id_t pagerid;
|
||||
unsigned long utcb_phys;
|
||||
unsigned long utcb_virt;
|
||||
unsigned long utcb_address;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -72,8 +72,7 @@ struct ktcb {
|
||||
struct list_head task_list; /* Global task list. */
|
||||
|
||||
/* UTCB related, see utcb.txt in docs */
|
||||
unsigned long utcb_virt; /* Virtual ref to task's utcb area */
|
||||
unsigned long utcb_phys; /* Physical ref to task's utcb area */
|
||||
unsigned long utcb_address; /* Virtual ref to task's utcb area */
|
||||
|
||||
/* Thread times */
|
||||
u32 kernel_time; /* Ticks spent in kernel */
|
||||
@@ -151,5 +150,7 @@ extern struct id_pool *thread_id_pool;
|
||||
extern struct id_pool *space_id_pool;
|
||||
extern struct id_pool *tgroup_id_pool;
|
||||
|
||||
void task_update_utcb(struct ktcb *cur, struct ktcb *next);
|
||||
|
||||
#endif /* __TCB_H__ */
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#define UTCB_AREA_START 0xF8000000
|
||||
#define UTCB_AREA_END 0xF9000000
|
||||
#define UTCB_AREA_SIZE (UTCB_AREA_END - UTCB_AREA_START)
|
||||
#define UTCB_AREA_SECTIONS (UTCB_AREA_SIZE / ARM_SECTION_SIZE)
|
||||
#define UTCB_SIZE (sizeof(int) * 64)
|
||||
|
||||
#define IO_AREA_START 0xF9000000
|
||||
#define IO_AREA_END 0xFF000000
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
#ifndef __GLUE_ARM_MESSAGE_H__
|
||||
#define __GLUE_ARM_MESSAGE_H__
|
||||
|
||||
#define MR_REST 56
|
||||
#include INC_GLUE(memlayout.h)
|
||||
|
||||
#define MR_REST (UTCB_SIZE - MR_TOTAL - 2) /* -2 is for fields on utcb */
|
||||
#define MR_TOTAL 6
|
||||
#define MR_TAG 0 /* Contains the purpose of message */
|
||||
#define MR_SENDER 1 /* For anythread receivers to discover sender */
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_ARCH(bootdesc.h)
|
||||
|
||||
/* FIXME: Change the unit name */
|
||||
__attribute__ ((section(".data.kip"))) struct kip kip;
|
||||
|
||||
/* Error-checked kernel data request call */
|
||||
|
||||
@@ -79,12 +79,8 @@ void do_exchange_registers(struct ktcb *task, struct exregs_data *exregs)
|
||||
task->pagerid = exregs->pagerid;
|
||||
|
||||
/* Set thread's utcb if supplied */
|
||||
if (exregs->flags & EXREGS_SET_UTCB) {
|
||||
BUG(); /* Check that physical and virtual addresses are in range */
|
||||
task->utcb_phys = exregs->utcb_phys;
|
||||
task->utcb_virt = exregs->utcb_virt;
|
||||
}
|
||||
|
||||
if (exregs->flags & EXREGS_SET_UTCB)
|
||||
task->utcb_address = exregs->utcb_address;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -139,6 +135,12 @@ int sys_exchange_registers(syscall_context_t *regs)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Check UTCB is in valid range */
|
||||
if (exregs->flags & EXREGS_SET_UTCB &&
|
||||
!(exregs->utcb_address >= UTCB_AREA_START &&
|
||||
exregs->utcb_address < UTCB_AREA_END))
|
||||
return -EINVAL;
|
||||
|
||||
/* Copy registers */
|
||||
do_exchange_registers(task, exregs);
|
||||
|
||||
|
||||
105
src/api/thread.c
105
src/api/thread.c
@@ -164,90 +164,9 @@ int thread_start(struct task_ids *ids)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given thread creation flags, determines whether to use a new user
|
||||
* (pager)-supplied utcb address, the utcb address of the original thread,
|
||||
* or no utcb at all. Validation of flags done at beginning of thread_create().
|
||||
*/
|
||||
int arch_new_thread_setup_utcb(struct ktcb *new, struct ktcb *orig, unsigned int flags,
|
||||
unsigned long utcb_address)
|
||||
{
|
||||
unsigned int create_flags = flags & THREAD_CREATE_MASK;
|
||||
unsigned int utcb_flags = flags & THREAD_UTCB_MASK;
|
||||
|
||||
/* In case of multiple threads in same address space */
|
||||
if (create_flags == THREAD_SAME_SPACE) {
|
||||
switch (utcb_flags) {
|
||||
case THREAD_UTCB_SAME:
|
||||
new->utcb_address = orig->utcb_address;
|
||||
break;
|
||||
case THREAD_UTCB_NEW:
|
||||
new->utcb_address = utcb_address;
|
||||
break;
|
||||
case THREAD_UTCB_NONE:
|
||||
new->utcb_address = 0;
|
||||
break;
|
||||
default:
|
||||
printk("%s: Bad thread creation flags. "
|
||||
"Incorrect flag validation?\n",__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/* In case of brand new address space and thread */
|
||||
if (create_flags == THREAD_NEW_SPACE) {
|
||||
switch (utcb_flags) {
|
||||
case THREAD_UTCB_NEW:
|
||||
new->utcb_address = utcb_address;
|
||||
break;
|
||||
/*
|
||||
* No UTCB for brand new space means the thread cannot do
|
||||
* an ipc other than exceptions. This is allowed for now.
|
||||
*/
|
||||
case THREAD_UTCB_NONE:
|
||||
new->utcb_address = 0;
|
||||
break;
|
||||
default:
|
||||
printk("%s: Bad thread creation flags. "
|
||||
"Incorrect flag validation?\n",__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This essentially corresponds to fork() and normally we would expect
|
||||
* the UTCB to be the same as original thread, since the whole address
|
||||
* space is an identical image of the original. Nevertheless it doesn't
|
||||
* do harm to have none or different utcb address and this is left to
|
||||
* the implementor to decide.
|
||||
*/
|
||||
if (create_flags == THREAD_COPY_SPACE) {
|
||||
switch (utcb_flags) {
|
||||
case THREAD_UTCB_SAME:
|
||||
new->utcb_address = orig->utcb_address;
|
||||
break;
|
||||
case THREAD_UTCB_NEW:
|
||||
new->utcb_address = utcb_address;
|
||||
break;
|
||||
case THREAD_UTCB_NONE:
|
||||
new->utcb_address = 0;
|
||||
break;
|
||||
default:
|
||||
printk("%s: Bad thread creation flags. "
|
||||
"Incorrect flag validation?\n",__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_setup_new_thread(struct ktcb *new, struct ktcb *orig,
|
||||
unsigned int flags, unsigned long utcb_address)
|
||||
unsigned int flags)
|
||||
{
|
||||
/* Set up the utcb address */
|
||||
arch_new_thread_setup_utcb(new, orig, flags, utcb_address);
|
||||
|
||||
/* New threads just need their mode set up */
|
||||
if ((flags & THREAD_CREATE_MASK) == THREAD_NEW_SPACE) {
|
||||
BUG_ON(orig);
|
||||
@@ -344,25 +263,10 @@ int thread_setup_new_ids(struct task_ids *ids, unsigned int flags,
|
||||
* are respectively used when creating a brand new task, creating a
|
||||
* new thread in an existing address space, or forking a task.
|
||||
*/
|
||||
int thread_create(struct task_ids *ids, unsigned int flags,
|
||||
unsigned long utcb_address)
|
||||
int thread_create(struct task_ids *ids, unsigned int flags)
|
||||
{
|
||||
struct ktcb *task = 0, *new = (struct ktcb *)zalloc_page();
|
||||
unsigned int create_flags = flags & THREAD_CREATE_MASK;
|
||||
unsigned int utcb_flags = flags & THREAD_UTCB_MASK;
|
||||
|
||||
/* Handle error cases. Should have valid flags for each */
|
||||
if (!utcb_flags || !create_flags)
|
||||
return -EINVAL;
|
||||
|
||||
/* Cannot have new space with same utcb */
|
||||
else if (create_flags == THREAD_NEW_SPACE &&
|
||||
utcb_flags == THREAD_UTCB_SAME)
|
||||
return -EINVAL;
|
||||
|
||||
/* Cannot have new utcb with invalid address */
|
||||
else if (utcb_flags == THREAD_UTCB_NEW && !utcb_address)
|
||||
return -EINVAL;
|
||||
|
||||
/* Determine space allocation */
|
||||
if (create_flags == THREAD_NEW_SPACE) {
|
||||
@@ -397,7 +301,7 @@ out:
|
||||
waitqueue_head_init(&new->wqh_recv);
|
||||
waitqueue_head_init(&new->wqh_pager);
|
||||
|
||||
arch_setup_new_thread(new, task, flags, utcb_address);
|
||||
arch_setup_new_thread(new, task, flags);
|
||||
|
||||
/* Add task to global hlist of tasks */
|
||||
add_task_global(new);
|
||||
@@ -415,11 +319,10 @@ int sys_thread_control(syscall_context_t *regs)
|
||||
int ret = 0;
|
||||
unsigned int flags = regs->r0;
|
||||
struct task_ids *ids = (struct task_ids *)regs->r1;
|
||||
unsigned long utcb_address = regs->r2;
|
||||
|
||||
switch (flags & THREAD_ACTION_MASK) {
|
||||
case THREAD_CREATE:
|
||||
ret = thread_create(ids, flags, utcb_address);
|
||||
ret = thread_create(ids, flags);
|
||||
break;
|
||||
case THREAD_RUN:
|
||||
ret = thread_start(ids);
|
||||
|
||||
@@ -572,6 +572,7 @@ void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
|
||||
irange * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
|
||||
/* Scheduler uses this to switch context */
|
||||
void arch_hardware_flush(pgd_table_t *pgd)
|
||||
{
|
||||
|
||||
@@ -255,12 +255,12 @@ static inline void context_switch(struct ktcb *next)
|
||||
|
||||
// printk("(%d) to (%d)\n", cur->tid, next->tid);
|
||||
|
||||
/* Update KIP UTCB pointer for new thread to run */
|
||||
kip.utcb = next->utcb_address;
|
||||
|
||||
/* Flush caches and everything */
|
||||
arch_hardware_flush(next->pgd);
|
||||
|
||||
/* Update utcb region for next task */
|
||||
task_update_utcb(cur, next);
|
||||
|
||||
/* Switch context */
|
||||
arch_switch(cur, next);
|
||||
|
||||
|
||||
@@ -8,7 +8,10 @@
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/api/kip.h>
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* ID pools for threads and spaces. */
|
||||
struct id_pool *thread_id_pool;
|
||||
@@ -22,3 +25,36 @@ struct list_head global_task_list;
|
||||
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
|
||||
unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
|
||||
|
||||
/*
|
||||
* Every thread has a unique utcb region that is mapped to its address
|
||||
* space as its context is loaded. The utcb region is a function of
|
||||
* this mapping and its offset that is reached via the KIP UTCB pointer
|
||||
*/
|
||||
void task_update_utcb(struct ktcb *cur, struct ktcb *next)
|
||||
{
|
||||
/* Update the KIP pointer */
|
||||
kip.utcb = next->utcb_address;
|
||||
|
||||
/* We stick with KIP update and no private tls mapping for now */
|
||||
#if 0
|
||||
/*
|
||||
* Unless current and next are in the same address
|
||||
* space and sharing the same physical utcb page, we
|
||||
* update the mapping
|
||||
*/
|
||||
if (cur->utcb_phys != next->utcb_phys)
|
||||
add_mapping(page_align(next->utcb_phys),
|
||||
page_align(next->utcb_virt),
|
||||
page_align_up(UTCB_SIZE),
|
||||
MAP_USR_RW_FLAGS);
|
||||
/*
|
||||
* If same physical utcb but different pgd, it means two
|
||||
* address spaces share the same utcb. We treat this as a
|
||||
* bug for now.
|
||||
*/
|
||||
else
|
||||
BUG_ON(cur->pgd != next->pgd);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -84,8 +84,10 @@ void print_sections(void)
|
||||
dprintk("_end: ", (unsigned int)_end);
|
||||
}
|
||||
|
||||
/* Enable virtual memory using kernel's first level table
|
||||
* and continue execution on virtual addresses.*/
|
||||
/*
|
||||
* Enable virtual memory using kernel's pgd
|
||||
* and continue execution on virtual addresses.
|
||||
*/
|
||||
void start_vm()
|
||||
{
|
||||
/*
|
||||
@@ -125,9 +127,7 @@ void start_vm()
|
||||
/* Jump to virtual memory addresses */
|
||||
__asm__ __volatile__ (
|
||||
"add sp, sp, %0 \n" /* Update stack pointer */
|
||||
#ifndef __OPTIMIZED_FP__ /* If fp not optimised away */
|
||||
"add fp, fp, %0 \n" /* Update frame pointer */
|
||||
#endif
|
||||
/* On the next instruction below, r0 gets
|
||||
* current PC + KOFFSET + 2 instructions after itself. */
|
||||
"add r0, pc, %0 \n"
|
||||
@@ -294,6 +294,9 @@ void init_pager(char *name, struct task_ids *ids)
|
||||
|
||||
set_task_ids(task, ids);
|
||||
|
||||
/* Pager gets first UTCB area available by default */
|
||||
task->utcb_address = UTCB_AREA_START;
|
||||
|
||||
if (!task->pgd) {
|
||||
BUG(); /* Inittask won't come here */
|
||||
task->pgd = alloc_pgd();
|
||||
|
||||
@@ -53,9 +53,9 @@ typedef int (*__l4_unmap_t)(void *virt, unsigned long npages, l4id_t tid);
|
||||
extern __l4_unmap_t __l4_unmap;
|
||||
int l4_unmap(void *virtual, unsigned long numpages, l4id_t tid);
|
||||
|
||||
typedef int (*__l4_thread_control_t)(unsigned int action, struct task_ids *ids, void *utcb_address);
|
||||
typedef int (*__l4_thread_control_t)(unsigned int action, struct task_ids *ids);
|
||||
extern __l4_thread_control_t __l4_thread_control;
|
||||
int l4_thread_control(unsigned int action, struct task_ids *ids, void *utcb_address);
|
||||
int l4_thread_control(unsigned int action, struct task_ids *ids);
|
||||
|
||||
typedef int (*__l4_space_control_t)(unsigned int action, void *kdata);
|
||||
extern __l4_space_control_t __l4_space_control;
|
||||
|
||||
@@ -7,7 +7,7 @@ void exregs_set_stack(struct exregs_data *s, unsigned long sp);
|
||||
void exregs_set_mr(struct exregs_data *s, int offset, unsigned long val);
|
||||
void exregs_set_pc(struct exregs_data *s, unsigned long pc);
|
||||
void exregs_set_pager(struct exregs_data *s, l4id_t pagerid);
|
||||
void exregs_set_utcb(struct exregs_data *s, unsigned long phys, unsigned long virt);
|
||||
void exregs_set_utcb(struct exregs_data *s, unsigned long virt);
|
||||
|
||||
/*
|
||||
exregs_set_stack(unsigned long sp)
|
||||
|
||||
@@ -31,11 +31,9 @@ void exregs_set_pager(struct exregs_data *s, l4id_t pagerid)
|
||||
s->flags |= EXREGS_SET_PAGER;
|
||||
}
|
||||
|
||||
void exregs_set_utcb(struct exregs_data *s, unsigned long phys,
|
||||
unsigned long virt)
|
||||
void exregs_set_utcb(struct exregs_data *s, unsigned long virt)
|
||||
{
|
||||
s->utcb_phys = phys;
|
||||
s->utcb_virt = virt;
|
||||
s->utcb_address = virt;
|
||||
s->flags |= EXREGS_SET_UTCB;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,8 @@
|
||||
/* New macro does double dereference */
|
||||
.macro utcb_address rx
|
||||
ldr \rx, =kip_utcb_ref @ First get pointer to utcb pointer in KIP
|
||||
ldr \rx, [\rx] @ Get UTCB address from UTCB pointer in KIP
|
||||
ldr \rx, [\rx] @ Get pointer to UTCB address from UTCB pointer in KIP
|
||||
ldr \rx, [\rx] @ Get the utcb address
|
||||
.endm
|
||||
|
||||
BEGIN_PROC(l4_thread_switch)
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
struct initdata {
|
||||
struct bootdesc *bootdesc;
|
||||
struct page_bitmap page_map;
|
||||
unsigned long pager_utcb_virt;
|
||||
unsigned long pager_utcb_phys;
|
||||
struct list_head boot_file_list;
|
||||
};
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
#include <lib/spinlock.h>
|
||||
|
||||
struct id_pool {
|
||||
struct spinlock lock;
|
||||
int nwords;
|
||||
int bitlimit;
|
||||
u32 bitmap[];
|
||||
};
|
||||
|
||||
|
||||
@@ -54,21 +54,18 @@ struct task_vma_head {
|
||||
int tcb_refs;
|
||||
};
|
||||
|
||||
/*
|
||||
* TLS and UTCB bookkeeping:
|
||||
*
|
||||
* This structure is shared among threads whose utcbs are on the same
|
||||
* physical page. Threads with utcbs on different physical pages have
|
||||
* their own utcb_data structure, even though they are in the same
|
||||
* address space, and share their vm_area_list structure.
|
||||
*/
|
||||
struct utcb_data {
|
||||
unsigned long phys; /* Physical utcb address */
|
||||
unsigned long virt; /* Virtual utcb address */
|
||||
u32 bit; /* Bitvector of free utcb slots on page */
|
||||
struct page *p; /* Physical page */
|
||||
struct utcb_desc {
|
||||
struct list_head list;
|
||||
unsigned long utcb_base;
|
||||
struct id_pool *slots;
|
||||
};
|
||||
|
||||
struct utcb_head {
|
||||
struct list_head list;
|
||||
int tcb_refs;
|
||||
};
|
||||
|
||||
|
||||
/* Stores all task information that can be kept in userspace. */
|
||||
struct tcb {
|
||||
/* Task list */
|
||||
@@ -119,8 +116,11 @@ struct tcb {
|
||||
/* Default ipc-shared-page information */
|
||||
void *shared_page;
|
||||
|
||||
/* Task's utcb data */
|
||||
struct utcb_data *utcb;
|
||||
/* Chain of utcb descriptors */
|
||||
struct utcb_head *utcb_head;
|
||||
|
||||
/* Unique utcb address of this task */
|
||||
unsigned long utcb_address;
|
||||
|
||||
/* Virtual memory areas */
|
||||
struct task_vma_head *vm_area_head;
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <boot.h>
|
||||
#include <mmap.h>
|
||||
#include <shm.h>
|
||||
#include <utcb.h>
|
||||
#include <l4/api/thread.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
|
||||
@@ -81,6 +82,9 @@ int boottask_mmap_regions(struct tcb *task, struct vm_file *file)
|
||||
__pfn(DEFAULT_SHPAGE_SIZE))))
|
||||
return (int)shm;
|
||||
|
||||
/* Task's utcb region */
|
||||
task_setup_utcb(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,11 +79,15 @@ int sys_fork(struct tcb *parent)
|
||||
memset(&exregs, 0, sizeof(exregs));
|
||||
exregs_set_mr(&exregs, MR_RETURN, 0);
|
||||
|
||||
/* Set child's new utcb address set by task_create() */
|
||||
BUG_ON(!child->utcb_address);
|
||||
exregs_set_utcb(&exregs, child->utcb_address);
|
||||
|
||||
/* Do the actual exregs call to c0 */
|
||||
if ((err = l4_exchange_registers(&exregs, child->tid)) < 0)
|
||||
BUG();
|
||||
|
||||
/* Create and prefault a utcb for child and map it to vfs task */
|
||||
/* Create and prefault a shared page for child and map it to vfs task */
|
||||
shpage_map_to_task(child, find_task(VFS_TID),
|
||||
SHPAGE_NEW_ADDRESS | SHPAGE_NEW_SHM |
|
||||
SHPAGE_PREFAULT);
|
||||
@@ -122,6 +126,7 @@ int do_clone(struct tcb *parent, unsigned long child_stack, unsigned int flags)
|
||||
|
||||
if (IS_ERR(child = task_create(parent, &ids, THREAD_SAME_SPACE, flags)))
|
||||
return (int)child;
|
||||
|
||||
/* Set up child stack marks with given stack argument */
|
||||
child->stack_end = child_stack;
|
||||
child->stack_start = 0;
|
||||
@@ -133,12 +138,14 @@ int do_clone(struct tcb *parent, unsigned long child_stack, unsigned int flags)
|
||||
|
||||
/* Set child's clone return value to 0 */
|
||||
exregs_set_mr(&exregs, MR_RETURN, 0);
|
||||
BUG_ON(!child->utcb_address);
|
||||
exregs_set_utcb(&exregs, child->utcb_address);
|
||||
|
||||
/* Do the actual exregs call to c0 */
|
||||
if ((err = l4_exchange_registers(&exregs, child->tid)) < 0)
|
||||
BUG();
|
||||
|
||||
/* Create and prefault a utcb for child and map it to vfs task */
|
||||
/* Create and prefault a shared page for child and map it to vfs task */
|
||||
shpage_map_to_task(child, find_task(VFS_TID),
|
||||
SHPAGE_NEW_ADDRESS | SHPAGE_NEW_SHM |
|
||||
SHPAGE_PREFAULT);
|
||||
|
||||
@@ -89,9 +89,13 @@ int execve_recycle_task(struct tcb *new, struct tcb *orig)
|
||||
new->tgid = orig->tgid;
|
||||
new->pagerid = orig->pagerid;
|
||||
|
||||
/* Copy utcb */
|
||||
/* Copy shared page */
|
||||
new->shared_page = orig->shared_page;
|
||||
|
||||
/* Copy utcb descriptors and unique utcb */
|
||||
new->utcb_head = orig->utcb_head;
|
||||
new->utcb_address = orig->utcb_address;
|
||||
|
||||
/* Copy parent relationship */
|
||||
BUG_ON(new->parent);
|
||||
new->parent = orig->parent;
|
||||
@@ -102,7 +106,7 @@ int execve_recycle_task(struct tcb *new, struct tcb *orig)
|
||||
|
||||
/* Vfs still knows the thread */
|
||||
|
||||
/* Keep the utcb on vfs */
|
||||
/* Keep the shared page on vfs */
|
||||
|
||||
/* Ask the kernel to recycle the thread */
|
||||
if ((err = l4_thread_control(THREAD_RECYCLE, &ids)) < 0) {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <memory.h>
|
||||
#include <mm/alloc_page.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <lib/bit.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
#include <l4lib/utcb.h>
|
||||
@@ -17,7 +18,7 @@
|
||||
#include <init.h>
|
||||
#include <test.h>
|
||||
#include <boot.h>
|
||||
|
||||
#include <utcb.h>
|
||||
|
||||
/* A separate list than the generic file list that keeps just the boot files */
|
||||
LIST_HEAD(boot_file_list);
|
||||
@@ -55,6 +56,16 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
|
||||
list_add(&task->child_ref, &task->children);
|
||||
task->parent = task;
|
||||
|
||||
/*
|
||||
* The first UTCB address is already assigned
|
||||
* by the microkernel for this pager. Ensure that we also got
|
||||
* the same from our internal utcb bookkeeping.
|
||||
*/
|
||||
BUG_ON(task->utcb_address != UTCB_AREA_START);
|
||||
|
||||
/* Pager must prefault its utcb */
|
||||
prefault_page(task, task->utcb_address, VM_READ | VM_WRITE);
|
||||
|
||||
/* Add the task to the global task list */
|
||||
global_add_task(task);
|
||||
|
||||
@@ -170,6 +181,10 @@ void init_mm(struct initdata *initdata)
|
||||
}
|
||||
printf("%s: Initialised shm structures.\n", __TASKNAME__);
|
||||
|
||||
if (utcb_pool_init() < 0) {
|
||||
printf("SHM initialisation failed.\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* For supplying contiguous virtual addresses to pager */
|
||||
pager_address_pool_init();
|
||||
|
||||
@@ -62,6 +62,7 @@ void print_page_map(struct page_bitmap *map)
|
||||
printf("Total of %d pages. %d Kbytes.\n", total_used, total_used << 2);
|
||||
}
|
||||
|
||||
|
||||
int request_initdata(struct initdata *initdata)
|
||||
{
|
||||
int err;
|
||||
@@ -96,6 +97,7 @@ int request_initdata(struct initdata *initdata)
|
||||
"KDATA_BOOTDESC request.\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
||||
@@ -51,6 +51,7 @@ int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
|
||||
return -1;
|
||||
|
||||
/* This is a state machine that checks n contiguous free bits. */
|
||||
/* FIXME: It should be <= instead of <. Fix & test in a single patch */
|
||||
while (i + nbits < limit) {
|
||||
first = i;
|
||||
last = i;
|
||||
|
||||
@@ -12,29 +12,29 @@
|
||||
|
||||
struct id_pool *id_pool_new_init(int totalbits)
|
||||
{
|
||||
int nwords = BITWISE_GETWORD(totalbits);
|
||||
int nwords = BITWISE_GETWORD(totalbits) + 1;
|
||||
struct id_pool *new = kzalloc((nwords * SZ_WORD)
|
||||
+ sizeof(struct id_pool));
|
||||
if (!new)
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
new->nwords = nwords;
|
||||
new->bitlimit = totalbits;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/* Search for a free slot up to the limit given */
|
||||
int id_new(struct id_pool *pool)
|
||||
{
|
||||
int id = find_and_set_first_free_bit(pool->bitmap,
|
||||
pool->nwords * WORD_BITS);
|
||||
if (id < 0)
|
||||
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
|
||||
return id;
|
||||
return find_and_set_first_free_bit(pool->bitmap, pool->bitlimit);
|
||||
}
|
||||
|
||||
/* This finds n contiguous free ids, allocates and returns the first one */
|
||||
int ids_new_contiguous(struct id_pool *pool, int numids)
|
||||
{
|
||||
int id = find_and_set_first_free_contig_bits(pool->bitmap,
|
||||
pool->nwords *WORD_BITS,
|
||||
pool->bitlimit,
|
||||
numids);
|
||||
if (id < 0)
|
||||
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
|
||||
@@ -70,9 +70,7 @@ int id_get(struct id_pool *pool, int id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&pool->lock);
|
||||
ret = check_and_set_bit(pool->bitmap, id);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -166,8 +166,10 @@ int mmap_address_validate(struct tcb *task, unsigned long map_address,
|
||||
|
||||
/* Private mappings can only go in task address space */
|
||||
if (vm_flags & VMA_PRIVATE) {
|
||||
if (map_address >= task->start ||
|
||||
map_address < task->end) {
|
||||
if ((map_address >= task->start &&
|
||||
map_address < task->end) ||
|
||||
(map_address >= UTCB_AREA_START &&
|
||||
map_address < UTCB_AREA_END)) {
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
@@ -176,9 +178,7 @@ int mmap_address_validate(struct tcb *task, unsigned long map_address,
|
||||
* memory address space,
|
||||
*/
|
||||
} else if (vm_flags & VMA_SHARED) {
|
||||
if ((map_address >= UTCB_AREA_START &&
|
||||
map_address < UTCB_AREA_END) ||
|
||||
(map_address >= task->start &&
|
||||
if ((map_address >= task->start &&
|
||||
map_address < task->end) ||
|
||||
(map_address >= SHM_AREA_START &&
|
||||
map_address < SHM_AREA_END))
|
||||
@@ -202,6 +202,13 @@ unsigned long mmap_new_address(struct tcb *task, unsigned int flags,
|
||||
return find_unmapped_area(npages, task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Side note:
|
||||
* Why in do_mmap() shm files have devzero mapped behind separately but
|
||||
* anonymous files map devzero directly? Because private anonymous files get
|
||||
* shadow objects in front when written to. Shm files are not private, so they
|
||||
* stay where they are and just grow. Other processes can reach and map them.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Maps the given file with given flags at the given page offset to the given
|
||||
|
||||
@@ -130,6 +130,7 @@ int vma_unmap(struct vm_area *vma, struct tcb *task,
|
||||
int vma_flush_pages(struct vm_area *vma)
|
||||
{
|
||||
struct vm_object *vmo;
|
||||
struct vm_obj_link *vmo_link;
|
||||
int err;
|
||||
|
||||
/* Read-only vmas need not flush objects */
|
||||
@@ -141,7 +142,8 @@ int vma_flush_pages(struct vm_area *vma)
|
||||
* could only be a single VM_SHARED file-backed object in the chain.
|
||||
*/
|
||||
BUG_ON(list_empty(&vma->list));
|
||||
vmo = list_entry(vma->list.next, struct vm_object, list);
|
||||
vmo_link = list_entry(vma->vm_obj_list.next, struct vm_obj_link, list);
|
||||
vmo = vmo_link->obj;
|
||||
|
||||
/* Only dirty objects would need flushing */
|
||||
if (!(vmo->flags & VM_DIRTY))
|
||||
|
||||
@@ -255,8 +255,8 @@ void *shmat_shmget_internal(struct tcb *task, key_t key, void *shmaddr)
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Make sure hostile tasks don't subvert other tasks' utcbs
|
||||
* by early-registring their utcb address here.
|
||||
* FIXME: Make sure hostile tasks don't subvert other tasks' shared pages
|
||||
* by early-registring their shared page address here.
|
||||
*/
|
||||
int sys_shmget(key_t key, int size, int shmflg)
|
||||
{
|
||||
@@ -355,25 +355,25 @@ int shpage_map_to_task(struct tcb *owner, struct tcb *mapper, unsigned int flags
|
||||
{
|
||||
struct vm_file *default_shm;
|
||||
|
||||
/* Allocate a new utcb address */
|
||||
/* Allocate a new shared page address */
|
||||
if (flags & SHPAGE_NEW_ADDRESS)
|
||||
owner->shared_page =
|
||||
shm_new_address(DEFAULT_SHPAGE_SIZE/PAGE_SIZE);
|
||||
else if (!owner->shared_page)
|
||||
BUG();
|
||||
|
||||
/* Create a new shared memory segment for utcb */
|
||||
/* Create a new shared memory segment */
|
||||
if (flags & SHPAGE_NEW_SHM)
|
||||
if (IS_ERR(default_shm = shm_new((key_t)owner->shared_page,
|
||||
__pfn(DEFAULT_SHPAGE_SIZE))))
|
||||
return (int)default_shm;
|
||||
|
||||
/* Map the utcb to mapper */
|
||||
/* Map the shared page to mapper */
|
||||
if (IS_ERR(shmat_shmget_internal(mapper, (key_t)owner->shared_page,
|
||||
owner->shared_page)))
|
||||
BUG();
|
||||
|
||||
/* Prefault the owner's utcb to mapper's address space */
|
||||
/* Prefault the owner's shared page to mapper's address space */
|
||||
if (flags & SHPAGE_PREFAULT)
|
||||
for (int i = 0; i < __pfn(DEFAULT_SHPAGE_SIZE); i++)
|
||||
prefault_page(mapper, (unsigned long)owner->shared_page +
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <boot.h>
|
||||
#include <globals.h>
|
||||
#include <test.h>
|
||||
#include <utcb.h>
|
||||
|
||||
struct global_list global_tasks = {
|
||||
.list = { &global_tasks.list, &global_tasks.list },
|
||||
@@ -86,6 +87,16 @@ struct tcb *tcb_alloc_init(unsigned int flags)
|
||||
}
|
||||
task->vm_area_head->tcb_refs = 1;
|
||||
INIT_LIST_HEAD(&task->vm_area_head->list);
|
||||
|
||||
/* Also allocate a utcb head for new address space */
|
||||
if (!(task->utcb_head =
|
||||
kzalloc(sizeof(*task->utcb_head)))) {
|
||||
kfree(task->vm_area_head);
|
||||
kfree(task);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
task->utcb_head->tcb_refs = 1;
|
||||
INIT_LIST_HEAD(&task->utcb_head->list);
|
||||
}
|
||||
|
||||
/* Allocate file structures if not shared */
|
||||
@@ -93,6 +104,7 @@ struct tcb *tcb_alloc_init(unsigned int flags)
|
||||
if (!(task->files =
|
||||
kzalloc(sizeof(*task->files)))) {
|
||||
kfree(task->vm_area_head);
|
||||
kfree(task->utcb_head);
|
||||
kfree(task);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
@@ -240,15 +252,26 @@ int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags)
|
||||
to->map_start = from->map_start;
|
||||
to->map_end = from->map_end;
|
||||
|
||||
/* Sharing the list of vmas */
|
||||
/* Sharing the list of vmas and utcbs */
|
||||
if (flags & TCB_SHARED_VM) {
|
||||
to->vm_area_head = from->vm_area_head;
|
||||
to->vm_area_head->tcb_refs++;
|
||||
to->utcb_head = from->utcb_head;
|
||||
to->utcb_head->tcb_refs++;
|
||||
} else {
|
||||
/* Copy all vm areas */
|
||||
task_copy_vmas(to, from);
|
||||
|
||||
/*
|
||||
* NOTE:
|
||||
* No copy for utcb descriptor list,
|
||||
* forker shall start its own unique.
|
||||
*/
|
||||
}
|
||||
|
||||
/* Set up a new utcb for new thread */
|
||||
task_setup_utcb(to);
|
||||
|
||||
/* Copy all file descriptors */
|
||||
if (flags & TCB_SHARED_FILES) {
|
||||
to->files = from->files;
|
||||
@@ -535,8 +558,9 @@ int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_
|
||||
}
|
||||
|
||||
/*
|
||||
* Task already has recycled task's utcb. It will attach to it
|
||||
* when it starts in userspace.
|
||||
* Task already has recycled task's shared page. It will attach to it
|
||||
* when it starts in userspace. Task also already utilizes recycled
|
||||
* task's utcb.
|
||||
*/
|
||||
//if (IS_ERR(shm = shm_new((key_t)task->utcb, __pfn(DEFAULT_UTCB_SIZE))))
|
||||
// return (int)shm;
|
||||
@@ -563,6 +587,7 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
exregs_set_stack(&exregs, sp);
|
||||
exregs_set_pc(&exregs, pc);
|
||||
exregs_set_pager(&exregs, pager);
|
||||
exregs_set_utcb(&exregs, task->utcb_address);
|
||||
|
||||
if ((err = l4_exchange_registers(&exregs, task->tid)) < 0) {
|
||||
printf("l4_exchange_registers failed with %d.\n", err);
|
||||
|
||||
@@ -3,25 +3,122 @@
|
||||
*
|
||||
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban
|
||||
*/
|
||||
|
||||
#include <l4lib/arch/utcb.h>
|
||||
#include <l4/macros.h>
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include <mmap.h>
|
||||
#include <utcb.h>
|
||||
#include <lib/malloc.h>
|
||||
|
||||
/*
|
||||
* UTCB management in Codezero:
|
||||
*
|
||||
* 1.) Every task in the system defines an array of utcbs in a special .utcb
|
||||
* section that is page-aligned and to be kept wired in by the pager.
|
||||
* 2.) The region marks are written to bootdesc structure at compile-time.
|
||||
* 3.) Pager reads the bootdesc struct from the microkernel during init.
|
||||
* 4.) Pager initialises structures to alloc/dealloc new utcbs for every address
|
||||
* space.
|
||||
* 5.) Pagers dynamically allocate utcb addresses as each thread is created
|
||||
* via a thread_control() system call.
|
||||
* 6.) Each thread in an address space learns their utcb address from a
|
||||
* well-defined KIP offset. This is updated as each thread becomes runnable.
|
||||
* UTCB management in Codezero
|
||||
*/
|
||||
|
||||
/* Globally disjoint utcb virtual region pool */
|
||||
static struct address_pool utcb_region_pool;
|
||||
|
||||
int utcb_pool_init()
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Initialise the global shm virtual address pool */
|
||||
if ((err = address_pool_init(&utcb_region_pool,
|
||||
UTCB_AREA_START, UTCB_AREA_END)) < 0) {
|
||||
printf("UTCB address pool initialisation failed.\n");
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *utcb_new_address(int npages)
|
||||
{
|
||||
return address_new(&utcb_region_pool, npages);
|
||||
}
|
||||
|
||||
int utcb_delete_address(void *utcb_address, int npages)
|
||||
{
|
||||
return address_del(&utcb_region_pool, utcb_address, npages);
|
||||
}
|
||||
|
||||
/* Return an empty utcb slot in this descriptor */
|
||||
unsigned long utcb_slot(struct utcb_desc *desc)
|
||||
{
|
||||
int slot;
|
||||
|
||||
if ((slot = id_new(desc->slots)) < 0)
|
||||
return 0;
|
||||
else
|
||||
return desc->utcb_base + (unsigned long)slot * UTCB_SIZE;
|
||||
}
|
||||
|
||||
unsigned long task_new_utcb_desc(struct tcb *task)
|
||||
{
|
||||
struct utcb_desc *d;
|
||||
|
||||
/* Allocate a new descriptor */
|
||||
if (!(d = kzalloc(sizeof(*d))))
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&d->list);
|
||||
|
||||
/* We currently assume UTCB is smaller than PAGE_SIZE */
|
||||
BUG_ON(UTCB_SIZE > PAGE_SIZE);
|
||||
|
||||
/* Initialise utcb slots */
|
||||
d->slots = id_pool_new_init(PAGE_SIZE / UTCB_SIZE);
|
||||
|
||||
/* Obtain a new and unique utcb base */
|
||||
d->utcb_base = (unsigned long)utcb_new_address(1);
|
||||
|
||||
/* Add descriptor to tcb's chain */
|
||||
list_add(&d->list, &task->utcb_head->list);
|
||||
|
||||
/* Obtain and return first slot */
|
||||
return d->utcb_base + UTCB_SIZE * id_new(d->slots);
|
||||
}
|
||||
|
||||
/*
|
||||
* Upon fork, the utcb descriptor list is replaced by a new one, since it is a new
|
||||
* address space. A new utcb is allocated and mmap'ed for the child task
|
||||
* running in the newly created address space.
|
||||
*
|
||||
* The original privately mmap'ed regions for thread-local utcbs remain
|
||||
* as copy-on-write on the new task, just like mmap'ed the stacks for cloned
|
||||
* threads in the parent address space.
|
||||
*
|
||||
* Upon clone, naturally the utcb descriptor chain and vm_areas remain to be
|
||||
* shared. A new utcb slot is allocated either by using an empty one in one of
|
||||
* the existing mmap'ed utcb regions, or by mmaping a new utcb region.
|
||||
*/
|
||||
int task_setup_utcb(struct tcb *task)
|
||||
{
|
||||
struct utcb_desc *udesc;
|
||||
unsigned long slot;
|
||||
void *err;
|
||||
|
||||
/* Setting this up twice is a bug */
|
||||
BUG_ON(task->utcb_address);
|
||||
|
||||
/* Search for an empty utcb slot already allocated to this space */
|
||||
list_for_each_entry(udesc, &task->utcb_head->list, list)
|
||||
if ((slot = utcb_slot(udesc)))
|
||||
goto out;
|
||||
|
||||
/* Allocate a new utcb memory region and return its base */
|
||||
slot = task_new_utcb_desc(task);
|
||||
out:
|
||||
|
||||
/* Map this region as private to current task */
|
||||
if (IS_ERR(err = do_mmap(0, 0, task, slot,
|
||||
VMA_ANONYMOUS | VMA_PRIVATE | VMA_FIXED |
|
||||
VM_READ | VM_WRITE, 1))) {
|
||||
printf("UTCB: mmapping failed with %d\n", err);
|
||||
return (int)err;
|
||||
}
|
||||
|
||||
/* Assign task's utcb address */
|
||||
task->utcb_address = slot;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user