mirror of
https://github.com/drasko/codezero.git
synced 2026-01-15 12:23:15 +01:00
Kernel updates since December 2009
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'space.c', 'bootmem.c', 'resource.c', 'container.c', 'capability.c', 'cinfo.c']
|
||||
src_local = ['irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'space.c', 'bootmem.c', 'resource.c', 'container.c', 'capability.c', 'cinfo.c', 'debug.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -15,10 +15,8 @@
|
||||
* Increase this size if bootmem allocations fail.
|
||||
*/
|
||||
#define BOOTMEM_SIZE (SZ_4K * 4)
|
||||
|
||||
SECTION(".init.pgd") pgd_table_t init_pgd;
|
||||
SECTION(".init.bootmem") char bootmem[BOOTMEM_SIZE];
|
||||
__initdata struct address_space init_space;
|
||||
struct address_space init_space;
|
||||
|
||||
static unsigned long cursor = (unsigned long)&bootmem;
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <l4/api/exregs.h>
|
||||
#include <l4/api/ipc.h>
|
||||
#include <l4/api/irq.h>
|
||||
#include <l4/api/cache.h>
|
||||
#include INC_GLUE(message.h)
|
||||
#include INC_GLUE(ipc.h)
|
||||
|
||||
@@ -584,6 +585,7 @@ struct capability *cap_match_mem(struct capability *cap,
|
||||
{
|
||||
struct sys_map_args *args = args_ptr;
|
||||
struct ktcb *target = args->task;
|
||||
unsigned long long start, end, pfn_point;
|
||||
unsigned long pfn;
|
||||
unsigned int perms;
|
||||
|
||||
@@ -593,27 +595,45 @@ struct capability *cap_match_mem(struct capability *cap,
|
||||
else
|
||||
pfn = __pfn(args->virt);
|
||||
|
||||
/* Check range */
|
||||
if (cap->start > pfn || cap->end < pfn + args->npages)
|
||||
/* Long long range check to avoid overflow */
|
||||
start = cap->start;
|
||||
end = cap->end;
|
||||
pfn_point = pfn;
|
||||
if (start > pfn_point || cap->end < pfn_point + args->npages)
|
||||
return 0;
|
||||
|
||||
/* Check permissions */
|
||||
switch (args->flags) {
|
||||
case MAP_USR_RW_FLAGS:
|
||||
case MAP_USR_RW:
|
||||
perms = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_CACHED;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
case MAP_USR_RO_FLAGS:
|
||||
case MAP_USR_RWX:
|
||||
perms = CAP_MAP_READ | CAP_MAP_WRITE |
|
||||
CAP_MAP_EXEC | CAP_MAP_CACHED;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
case MAP_USR_RO:
|
||||
perms = CAP_MAP_READ | CAP_MAP_CACHED;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
case MAP_USR_IO_FLAGS:
|
||||
case MAP_USR_RX:
|
||||
perms = CAP_MAP_READ | CAP_MAP_EXEC | CAP_MAP_CACHED;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
case MAP_USR_IO:
|
||||
perms = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_UNCACHED;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
case MAP_UNMAP: /* Check for unmap syscall */
|
||||
if (!(cap->access & CAP_MAP_UNMAP))
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
/* Anything else is an invalid/unrecognised argument */
|
||||
return 0;
|
||||
@@ -760,6 +780,53 @@ struct capability *cap_match_irqctrl(struct capability *cap,
|
||||
return cap;
|
||||
}
|
||||
|
||||
|
||||
struct sys_cache_args {
|
||||
unsigned long start;
|
||||
unsigned long npages;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
struct capability *cap_match_cache(struct capability *cap, void *args_ptr)
|
||||
{
|
||||
struct sys_cache_args *args = args_ptr;
|
||||
unsigned long pfn = __pfn(args->start);
|
||||
unsigned long long start, end, pfn_point;
|
||||
unsigned int perms;
|
||||
|
||||
/* Long long range check to avoid overflow */
|
||||
start = cap->start;
|
||||
end = cap->end;
|
||||
pfn_point = pfn;
|
||||
if (start > pfn_point || end < pfn_point + args->npages)
|
||||
return 0;
|
||||
|
||||
/* Check permissions */
|
||||
switch (args->flags) {
|
||||
/* check for cache functionality flags */
|
||||
case L4_INVALIDATE_DCACHE:
|
||||
case L4_INVALIDATE_ICACHE:
|
||||
case L4_INVALIDATE_TLB:
|
||||
perms = CAP_CACHE_INVALIDATE;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case L4_CLEAN_DCACHE:
|
||||
case L4_CLEAN_INVALIDATE_DCACHE:
|
||||
perms = CAP_CACHE_CLEAN;
|
||||
if ((cap->access & perms) != perms)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Anything else is an invalid/unrecognised argument */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CAPABILITIES)
|
||||
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
|
||||
{
|
||||
@@ -813,6 +880,26 @@ int cap_map_check(struct ktcb *target, unsigned long phys, unsigned long virt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_unmap_check(struct ktcb *target, unsigned long virt,
|
||||
unsigned long npages)
|
||||
{
|
||||
struct capability *virtmem;
|
||||
|
||||
/* Unmap check also uses identical struct as map check */
|
||||
struct sys_map_args args = {
|
||||
.task = target,
|
||||
.virt = virt,
|
||||
.npages = npages,
|
||||
.flags = MAP_UNMAP,
|
||||
};
|
||||
|
||||
if (!(virtmem = cap_find(current, cap_match_mem,
|
||||
&args, CAP_TYPE_MAP_VIRTMEM)))
|
||||
return -ENOCAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limitation: We currently only check from sender's
|
||||
* perspective. This is because sender always targets a
|
||||
@@ -906,6 +993,30 @@ int cap_irq_check(struct ktcb *registrant, unsigned int req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is just a wrapper call for l4_cache_control
|
||||
* system call sanity check
|
||||
*/
|
||||
int cap_cache_check(unsigned long start, unsigned long end, unsigned int flags)
|
||||
{
|
||||
struct capability *virtmem;
|
||||
struct sys_cache_args args = {
|
||||
.start = start,
|
||||
.npages = __pfn(end) - __pfn(start),
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
/*
|
||||
* We just want to check if the virtual memory region
|
||||
* concerned here has
|
||||
* appropriate permissions for cache calls
|
||||
*/
|
||||
if (!(virtmem = cap_find(current, cap_match_cache,
|
||||
&args, CAP_TYPE_MAP_VIRTMEM)))
|
||||
return -ENOCAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* Meaning !CONFIG_CAPABILITIES */
|
||||
int cap_mutex_check(unsigned long mutex_address, int mutex_op)
|
||||
@@ -930,6 +1041,12 @@ int cap_map_check(struct ktcb *task, unsigned long phys, unsigned long virt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_unmap_check(struct ktcb *target, unsigned long virt,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_exregs_check(struct ktcb *task, struct exregs_data *exregs)
|
||||
{
|
||||
return 0;
|
||||
@@ -948,4 +1065,9 @@ int cap_irq_check(struct ktcb *registrant, unsigned int req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cap_cache_check(unsigned long start, unsigned long end,
|
||||
unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* End of !CONFIG_CAPABILITIES */
|
||||
|
||||
@@ -8,8 +8,10 @@
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/generic/bootmem.h>
|
||||
#include <l4/generic/thread.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_ARCH(linker.h)
|
||||
|
||||
@@ -83,22 +85,16 @@ struct container *container_find(struct kernel_resources *kres, l4id_t cid)
|
||||
* This involves setting up pager's ktcb, space, utcb,
|
||||
* all ids, registers, and mapping its (perhaps) first
|
||||
* few pages in order to make it runnable.
|
||||
*
|
||||
* The first pager initialization is a special-case
|
||||
* since it uses the current kernel pgd.
|
||||
*/
|
||||
int init_pager(struct pager *pager,
|
||||
struct container *cont,
|
||||
pgd_table_t *current_pgd)
|
||||
int init_pager(struct pager *pager, struct container *cont)
|
||||
{
|
||||
struct ktcb *task;
|
||||
struct address_space *space;
|
||||
int first = !!current_pgd;
|
||||
|
||||
/*
|
||||
* Set up dummy current cap_list so that cap accounting
|
||||
* can be done to this pager. Note, that we're still on
|
||||
* bootstack.
|
||||
* idle task stack.
|
||||
*/
|
||||
cap_list_move(¤t->cap_list, &pager->cap_list);
|
||||
|
||||
@@ -108,25 +104,8 @@ int init_pager(struct pager *pager,
|
||||
/* New ktcb allocation is needed */
|
||||
task = tcb_alloc_init(cont->cid);
|
||||
|
||||
/* If first, manually allocate/initalize space */
|
||||
if (first) {
|
||||
if (!(space = alloc_space()))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set up space id */
|
||||
space->spid = id_new(&kernel_resources.space_ids);
|
||||
|
||||
/* Initialize space structure */
|
||||
link_init(&space->list);
|
||||
mutex_init(&space->lock);
|
||||
cap_list_init(&space->cap_list);
|
||||
space->pgd = current_pgd;
|
||||
address_space_attach(task, space);
|
||||
} else {
|
||||
/* Otherwise allocate conventionally */
|
||||
space = address_space_create(0);
|
||||
address_space_attach(task, space);
|
||||
}
|
||||
space = address_space_create(0);
|
||||
address_space_attach(task, space);
|
||||
|
||||
/* Initialize ktcb */
|
||||
task_init_registers(task, pager->start_address);
|
||||
@@ -136,6 +115,9 @@ int init_pager(struct pager *pager,
|
||||
task->tgid = task->tid;
|
||||
task->container = cont;
|
||||
|
||||
/* Set cpu affinity */
|
||||
thread_setup_affinity(task);
|
||||
|
||||
/* Add the address space to container space list */
|
||||
address_space_add(task->space);
|
||||
|
||||
@@ -148,7 +130,7 @@ int init_pager(struct pager *pager,
|
||||
/* Map the task's space */
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
|
||||
MAP_USR_RWX, TASK_PGD(task));
|
||||
|
||||
/* Move capability list from dummy to task's space cap list */
|
||||
cap_list_move(&task->space->cap_list, ¤t->cap_list);
|
||||
@@ -162,6 +144,7 @@ int init_pager(struct pager *pager,
|
||||
|
||||
/* Container list that keeps all tasks */
|
||||
tcb_add(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -256,24 +239,15 @@ int update_dynamic_capids(struct kernel_resources *kres)
|
||||
* Initialize all containers with their initial set of tasks,
|
||||
* spaces, scheduler parameters such that they can be started.
|
||||
*/
|
||||
int container_init_pagers(struct kernel_resources *kres,
|
||||
pgd_table_t *current_pgd)
|
||||
int container_init_pagers(struct kernel_resources *kres)
|
||||
{
|
||||
struct container *cont;
|
||||
struct pager *pager;
|
||||
int first = 1;
|
||||
|
||||
list_foreach_struct(cont, &kres->containers.list, list) {
|
||||
for (int i = 0; i < cont->npagers; i++) {
|
||||
pager = &cont->pager[i];
|
||||
|
||||
/* First pager initializes specially */
|
||||
if (first) {
|
||||
init_pager(pager, cont, current_pgd);
|
||||
first = 0;
|
||||
} else {
|
||||
init_pager(pager, cont, 0);
|
||||
}
|
||||
init_pager(pager, cont);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
126
src/generic/debug.c
Normal file
126
src/generic/debug.c
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Basic debug information about the kernel
|
||||
*
|
||||
* Copyright (C) 2010 B Labs Ltd.
|
||||
*
|
||||
* Written by Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/debug.h>
|
||||
#include INC_SUBARCH(cpu.h)
|
||||
#include <l4/generic/platform.h>
|
||||
|
||||
#if defined (CONFIG_DEBUG_ACCOUNTING)
|
||||
|
||||
struct system_accounting system_accounting;
|
||||
|
||||
void system_accounting_print(struct system_accounting *sys_acc)
|
||||
{
|
||||
printk("System Operations Accounting:\n\n");
|
||||
|
||||
printk("System calls:\n");
|
||||
printk("=============\n");
|
||||
printk("IPC: %llu\n", sys_acc->syscalls.ipc);
|
||||
printk("Thread Switch: %llu\n", sys_acc->syscalls.tswitch);
|
||||
printk("Thread Control: %llu\n", sys_acc->syscalls.tctrl);
|
||||
printk("Exchange Registers: %llu\n", sys_acc->syscalls.exregs);
|
||||
printk("Unmap: %llu\n", sys_acc->syscalls.unmap);
|
||||
printk("Irq Control: %llu\n", sys_acc->syscalls.irqctrl);
|
||||
printk("Map: %llu\n", sys_acc->syscalls.map);
|
||||
printk("Getid: %llu\n", sys_acc->syscalls.getid);
|
||||
printk("Capability Control: %llu\n", sys_acc->syscalls.capctrl);
|
||||
printk("Time: %llu\n", sys_acc->syscalls.time);
|
||||
printk("Mutex Control: %llu\n", sys_acc->syscalls.mutexctrl);
|
||||
printk("Cache Control: %llu\n", sys_acc->syscalls.cachectrl);
|
||||
|
||||
printk("\nExceptions:\n");
|
||||
printk("===========\n");
|
||||
printk("System call: %llu\n", sys_acc->exceptions.syscall);
|
||||
printk("Data Abort: %llu\n", sys_acc->exceptions.data_abort);
|
||||
printk("Prefetch Abort: %llu\n", sys_acc->exceptions.prefetch_abort);
|
||||
printk("Irq: %llu\n", sys_acc->exceptions.irq);
|
||||
printk("Undef Abort: %llu\n", sys_acc->exceptions.undefined_abort);
|
||||
printk("Context Switch: %llu\n", sys_acc->task_ops.context_switch);
|
||||
printk("Space Switch: %llu\n", sys_acc->task_ops.space_switch);
|
||||
|
||||
printk("\nCache operations:\n");
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* For spinlock debugging
|
||||
*/
|
||||
#if defined (CONFIG_DEBUG_SPINLOCKS)
|
||||
|
||||
#include <l4/lib/bit.h>
|
||||
|
||||
#define DEBUG_SPINLOCK_TOTAL 10
|
||||
|
||||
DECLARE_PERCPU(static unsigned long, held_lock_array[DEBUG_SPINLOCK_TOTAL]);
|
||||
DECLARE_PERCPU(static u32, held_lock_bitmap);
|
||||
|
||||
void spin_lock_record_check(void *lock_addr)
|
||||
{
|
||||
int bit = 0;
|
||||
|
||||
/*
|
||||
* Check if we already hold this lock
|
||||
*/
|
||||
for (int i = 0; i < DEBUG_SPINLOCK_TOTAL; i++) {
|
||||
if (per_cpu(held_lock_array[i]) == (unsigned long)lock_addr) {
|
||||
print_early("Spinlock already held.\n");
|
||||
printk("lock_addr=%p\n", lock_addr);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add it as a new lock
|
||||
*/
|
||||
bit = find_and_set_first_free_bit(&per_cpu(held_lock_bitmap),
|
||||
DEBUG_SPINLOCK_TOTAL);
|
||||
per_cpu(held_lock_array[bit]) = (unsigned long)lock_addr;
|
||||
}
|
||||
|
||||
void spin_unlock_delete_check(void *lock_addr)
|
||||
{
|
||||
/*
|
||||
* Check if already unlocked
|
||||
*/
|
||||
if (*((unsigned int *)lock_addr) == 0) {
|
||||
print_early("Spinlock already unlocked.");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Search for the value
|
||||
*/
|
||||
for (int i = 0; i < DEBUG_SPINLOCK_TOTAL; i++) {
|
||||
if (per_cpu(held_lock_array[i]) == (unsigned long)lock_addr) {
|
||||
/*
|
||||
* Delete its entry
|
||||
*/
|
||||
per_cpu(held_lock_array[i]) = 0;
|
||||
BUG_ON(check_and_clear_bit(&per_cpu(held_lock_bitmap),
|
||||
i) < 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* It must have been recorded
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <l4/config.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/debug.h>
|
||||
#include <l4/generic/platform.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/irq.h>
|
||||
@@ -57,7 +58,8 @@ static inline void cascade_irq_chip(struct irq_chip *this_chip)
|
||||
{
|
||||
if (this_chip->cascade >= 0) {
|
||||
BUG_ON(IRQ_CHIPS_MAX == 1);
|
||||
this_chip->ops.unmask(this_chip->cascade);
|
||||
if(this_chip->ops.unmask)
|
||||
this_chip->ops.unmask(this_chip->cascade);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +71,8 @@ void irq_controllers_init(void)
|
||||
this_chip = irq_chip_array + i;
|
||||
|
||||
/* Initialise the irq chip (e.g. reset all registers) */
|
||||
this_chip->ops.init();
|
||||
if (this_chip->ops.init)
|
||||
this_chip->ops.init();
|
||||
|
||||
/* Enable cascaded irq on this chip if it exists */
|
||||
cascade_irq_chip(this_chip);
|
||||
@@ -98,8 +101,10 @@ l4id_t global_irq_index(void)
|
||||
this_chip = irq_chip_array + i;
|
||||
|
||||
/* Find local irq that is triggered on this chip */
|
||||
BUG_ON((irq_index =
|
||||
this_chip->ops.read_irq()) == IRQ_NIL);
|
||||
if (this_chip->ops.read_irq) {
|
||||
irq_index = this_chip->ops.read_irq(this_chip->data);
|
||||
BUG_ON(irq_index == IRQ_NIL);
|
||||
}
|
||||
|
||||
/* See if this irq is a cascaded irq */
|
||||
if (irq_index == this_chip->cascade)
|
||||
@@ -127,6 +132,8 @@ void do_irq(void)
|
||||
l4id_t irq_index = global_irq_index();
|
||||
struct irq_desc *this_irq = irq_desc_array + irq_index;
|
||||
|
||||
system_account_irq();
|
||||
|
||||
/*
|
||||
* Note, this can be easily done a few instructions
|
||||
* quicker by some immediate read/disable/enable_all().
|
||||
|
||||
@@ -8,9 +8,11 @@
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/bootmem.h>
|
||||
#include <l4/generic/platform.h>
|
||||
#include <l4/lib/math.h>
|
||||
#include <l4/lib/memcache.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include <l4/api/errno.h>
|
||||
@@ -132,22 +134,28 @@ void free_pmd(void *addr)
|
||||
BUG_ON(mem_cache_free(kernel_resources.pmd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_space(void *addr)
|
||||
void free_space(void *addr, struct ktcb *task)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
BUG_ON(!(cap = capability_find_by_rtype(current,
|
||||
BUG_ON(!(cap = capability_find_by_rtype(task,
|
||||
CAP_RTYPE_SPACEPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
BUG_ON(mem_cache_free(kernel_resources.space_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_ktcb(void *addr)
|
||||
|
||||
/*
|
||||
* Account it to pager, but if it doesn't exist,
|
||||
* to current idle task
|
||||
*/
|
||||
void free_ktcb(void *addr, struct ktcb *acc_task)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
BUG_ON(!(cap = capability_find_by_rtype(current,
|
||||
/* Account it to task's pager if it exists */
|
||||
BUG_ON(!(cap = capability_find_by_rtype(acc_task,
|
||||
CAP_RTYPE_THREADPOOL)));
|
||||
capability_free(cap, 1);
|
||||
|
||||
@@ -449,8 +457,8 @@ void init_kernel_resources(struct kernel_resources *kres)
|
||||
|
||||
/* Set up total physical memory as single capability */
|
||||
physmem = alloc_bootmem(sizeof(*physmem), 0);
|
||||
physmem->start = __pfn(PHYS_MEM_START);
|
||||
physmem->end = __pfn(PHYS_MEM_END);
|
||||
physmem->start = __pfn(PLATFORM_PHYS_MEM_START);
|
||||
physmem->end = __pfn(PLATFORM_PHYS_MEM_END);
|
||||
link_init(&physmem->list);
|
||||
cap_list_insert(physmem, &kres->physmem_free);
|
||||
|
||||
@@ -599,14 +607,7 @@ void setup_kernel_resources(struct boot_resources *bootres,
|
||||
{
|
||||
struct capability *cap;
|
||||
struct container *container;
|
||||
pgd_table_t *current_pgd;
|
||||
|
||||
/*
|
||||
* See how many containers we have. Assign next
|
||||
* unused container id for kernel resources
|
||||
*/
|
||||
kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
|
||||
// kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
|
||||
//pgd_table_t *current_pgd;
|
||||
|
||||
/* First initialize the list of non-memory capabilities */
|
||||
cap = boot_capability_create();
|
||||
@@ -646,11 +647,34 @@ void setup_kernel_resources(struct boot_resources *bootres,
|
||||
* since we want to avoid allocating an uncertain
|
||||
* amount of memory from the boot allocators.
|
||||
*/
|
||||
current_pgd = realloc_page_tables();
|
||||
// current_pgd = arch_realloc_page_tables();
|
||||
|
||||
/* Move it back */
|
||||
cap_list_move(&kres->non_memory_caps, ¤t->cap_list);
|
||||
|
||||
|
||||
/*
|
||||
* Setting up ids used internally.
|
||||
*
|
||||
* See how many containers we have. Assign next
|
||||
* unused container id for kernel resources
|
||||
*/
|
||||
kres->cid = id_get(&kres->container_ids, bootres->nconts + 1);
|
||||
// kres->cid = id_get(&kres->container_ids, 0); // Gets id 0
|
||||
|
||||
/*
|
||||
* Assign thread and space ids to current which will later
|
||||
* become the idle task
|
||||
*/
|
||||
current->tid = id_new(&kres->ktcb_ids);
|
||||
current->space->spid = id_new(&kres->space_ids);
|
||||
|
||||
/*
|
||||
* Init per-cpu zombie lists
|
||||
*/
|
||||
for (int i = 0; i < CONFIG_NCPU; i++)
|
||||
init_ktcb_list(&per_cpu_byid(kres->zombie_list, i));
|
||||
|
||||
/*
|
||||
* Create real containers from compile-time created
|
||||
* cinfo structures
|
||||
@@ -667,8 +691,7 @@ void setup_kernel_resources(struct boot_resources *bootres,
|
||||
}
|
||||
|
||||
/* Initialize pagers */
|
||||
container_init_pagers(kres, current_pgd);
|
||||
|
||||
container_init_pagers(kres);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -703,11 +726,11 @@ struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
add_boot_mapping(__pfn_to_addr(cap->start),
|
||||
virtual,
|
||||
page_align_up(bufsize),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
MAP_KERN_RW);
|
||||
} else {
|
||||
add_mapping_pgd(__pfn_to_addr(cap->start),
|
||||
virtual, page_align_up(bufsize),
|
||||
MAP_SVC_RW_FLAGS, &init_pgd);
|
||||
MAP_KERN_RW, &init_pgd);
|
||||
}
|
||||
/* Unmap area from memcap */
|
||||
memcap_unmap_range(cap, &kres->physmem_free,
|
||||
@@ -791,12 +814,11 @@ void init_resource_allocators(struct boot_resources *bootres,
|
||||
kres, 0);
|
||||
|
||||
/* Count boot pmds used so far and add them */
|
||||
bootres->nkpmds += pgd_count_pmds(&init_pgd);
|
||||
bootres->nkpmds += pgd_count_boot_pmds();
|
||||
|
||||
/*
|
||||
* Calculate maximum possible pmds
|
||||
* that may be used during this pmd
|
||||
* cache init and add them.
|
||||
* Calculate maximum possible pmds that may be used
|
||||
* during this pmd cache initialization and add them.
|
||||
*/
|
||||
bootres->nkpmds += ((bootres->npmds * PMD_SIZE) / PMD_MAP_SIZE);
|
||||
if (!is_aligned(bootres->npmds * PMD_SIZE,
|
||||
|
||||
@@ -15,60 +15,71 @@
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/thread.h>
|
||||
#include <l4/generic/debug.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include <l4/api/kip.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_GLUE(init.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(irq.h)
|
||||
|
||||
struct scheduler scheduler;
|
||||
DECLARE_PERCPU(struct scheduler, scheduler);
|
||||
|
||||
/* This is incremented on each irq or voluntarily by preempt_disable() */
|
||||
extern unsigned int current_irq_nest_count;
|
||||
DECLARE_PERCPU(extern unsigned int, current_irq_nest_count);
|
||||
|
||||
/* This ensures no scheduling occurs after voluntary preempt_disable() */
|
||||
static int voluntary_preempt = 0;
|
||||
DECLARE_PERCPU(static int, voluntary_preempt);
|
||||
|
||||
void sched_lock_runqueues(unsigned long *irqflags)
|
||||
void sched_lock_runqueues(struct scheduler *sched, unsigned long *irqflags)
|
||||
{
|
||||
spin_lock_irq(&scheduler.sched_rq[0].lock, irqflags);
|
||||
spin_lock(&scheduler.sched_rq[1].lock);
|
||||
spin_lock_irq(&sched->sched_rq[0].lock, irqflags);
|
||||
spin_lock(&sched->sched_rq[1].lock);
|
||||
BUG_ON(irqs_enabled());
|
||||
}
|
||||
|
||||
void sched_unlock_runqueues(unsigned long irqflags)
|
||||
void sched_unlock_runqueues(struct scheduler *sched, unsigned long irqflags)
|
||||
{
|
||||
spin_unlock(&scheduler.sched_rq[1].lock);
|
||||
spin_unlock_irq(&scheduler.sched_rq[0].lock, irqflags);
|
||||
spin_unlock(&sched->sched_rq[1].lock);
|
||||
spin_unlock_irq(&sched->sched_rq[0].lock, irqflags);
|
||||
}
|
||||
|
||||
int preemptive()
|
||||
{
|
||||
return current_irq_nest_count == 0;
|
||||
return per_cpu(current_irq_nest_count) == 0;
|
||||
}
|
||||
|
||||
int preempt_count()
|
||||
{
|
||||
return current_irq_nest_count;
|
||||
return per_cpu(current_irq_nest_count);
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_PREEMPT_DISABLE)
|
||||
|
||||
void preempt_enable(void)
|
||||
{
|
||||
voluntary_preempt--;
|
||||
current_irq_nest_count--;
|
||||
per_cpu(voluntary_preempt)--;
|
||||
per_cpu(current_irq_nest_count)--;
|
||||
}
|
||||
|
||||
/* A positive irq nest count implies current context cannot be preempted. */
|
||||
void preempt_disable(void)
|
||||
{
|
||||
current_irq_nest_count++;
|
||||
voluntary_preempt++;
|
||||
per_cpu(current_irq_nest_count)++;
|
||||
per_cpu(voluntary_preempt)++;
|
||||
}
|
||||
|
||||
#else /* End of !CONFIG_PREEMPT_DISABLE */
|
||||
|
||||
void preempt_enable(void) { }
|
||||
void preempt_disable(void) { }
|
||||
|
||||
#endif /* CONFIG_PREEMPT_DISABLE */
|
||||
|
||||
int in_irq_context(void)
|
||||
{
|
||||
/*
|
||||
@@ -76,13 +87,15 @@ int in_irq_context(void)
|
||||
* one more than all preempt_disable()'s which are
|
||||
* counted by voluntary_preempt.
|
||||
*/
|
||||
return (current_irq_nest_count == (voluntary_preempt + 1));
|
||||
return (per_cpu(current_irq_nest_count) ==
|
||||
(per_cpu(voluntary_preempt) + 1));
|
||||
}
|
||||
|
||||
int in_nested_irq_context(void)
|
||||
{
|
||||
/* Deducing voluntary preemptions we get real irq nesting */
|
||||
return (current_irq_nest_count - voluntary_preempt) > 1;
|
||||
return (per_cpu(current_irq_nest_count) -
|
||||
per_cpu(voluntary_preempt)) > 1;
|
||||
}
|
||||
|
||||
int in_process_context(void)
|
||||
@@ -90,36 +103,24 @@ int in_process_context(void)
|
||||
return !in_irq_context();
|
||||
}
|
||||
|
||||
/*
|
||||
* In current implementation, if all task are asleep it is considered
|
||||
* a bug. We use idle_task() to investigate.
|
||||
*
|
||||
* In the future, it will be natural that all tasks may be asleep,
|
||||
* so this will change to something such as a Wait-for-Interrupt
|
||||
* routine.
|
||||
*/
|
||||
void idle_task(void)
|
||||
void sched_init_runqueue(struct scheduler *sched, struct runqueue *rq)
|
||||
{
|
||||
printk("Idle task.\n");
|
||||
|
||||
while(1);
|
||||
}
|
||||
|
||||
void sched_init_runqueue(struct runqueue *rq)
|
||||
{
|
||||
memset(rq, 0, sizeof(struct runqueue));
|
||||
link_init(&rq->task_list);
|
||||
spin_lock_init(&rq->lock);
|
||||
rq->sched = sched;
|
||||
}
|
||||
|
||||
void sched_init(struct scheduler *scheduler)
|
||||
void sched_init()
|
||||
{
|
||||
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
|
||||
sched_init_runqueue(&scheduler->sched_rq[i]);
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
|
||||
scheduler->rq_runnable = &scheduler->sched_rq[0];
|
||||
scheduler->rq_expired = &scheduler->sched_rq[1];
|
||||
scheduler->prio_total = TASK_PRIO_TOTAL;
|
||||
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
|
||||
sched_init_runqueue(sched, &sched->sched_rq[i]);
|
||||
|
||||
sched->rq_runnable = &sched->sched_rq[0];
|
||||
sched->rq_expired = &sched->sched_rq[1];
|
||||
sched->prio_total = TASK_PRIO_TOTAL;
|
||||
sched->idle_task = current;
|
||||
}
|
||||
|
||||
/* Swap runnable and expired runqueues. */
|
||||
@@ -127,12 +128,12 @@ static void sched_rq_swap_runqueues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
BUG_ON(list_empty(&scheduler.rq_expired->task_list));
|
||||
BUG_ON(list_empty(&per_cpu(scheduler).rq_expired->task_list));
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = scheduler.rq_runnable;
|
||||
scheduler.rq_runnable = scheduler.rq_expired;
|
||||
scheduler.rq_expired = temp;
|
||||
temp = per_cpu(scheduler).rq_runnable;
|
||||
per_cpu(scheduler).rq_runnable = per_cpu(scheduler).rq_expired;
|
||||
per_cpu(scheduler).rq_expired = temp;
|
||||
}
|
||||
|
||||
/* Set policy on where to add tasks in the runqueue */
|
||||
@@ -143,39 +144,45 @@ static void sched_rq_swap_runqueues(void)
|
||||
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct scheduler *sched =
|
||||
&per_cpu_byid(scheduler, task->affinity);
|
||||
|
||||
BUG_ON(!list_empty(&task->rq_list));
|
||||
|
||||
sched_lock_runqueues(&irqflags);
|
||||
/* Lock that particular cpu's runqueue set */
|
||||
sched_lock_runqueues(sched, &irqflags);
|
||||
if (front)
|
||||
list_insert(&task->rq_list, &rq->task_list);
|
||||
else
|
||||
list_insert_tail(&task->rq_list, &rq->task_list);
|
||||
rq->total++;
|
||||
task->rq = rq;
|
||||
sched_unlock_runqueues(irqflags);
|
||||
|
||||
/* Unlock that particular cpu's runqueue set */
|
||||
sched_unlock_runqueues(sched, irqflags);
|
||||
}
|
||||
|
||||
/* Helper for removing a task from its runqueue. */
|
||||
static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
{
|
||||
struct runqueue *rq;
|
||||
unsigned long irqflags;
|
||||
struct scheduler *sched =
|
||||
&per_cpu_byid(scheduler, task->affinity);
|
||||
|
||||
sched_lock_runqueues(&irqflags);
|
||||
sched_lock_runqueues(sched, &irqflags);
|
||||
|
||||
/*
|
||||
* We must lock both, otherwise rqs may swap and
|
||||
* we may get the wrong rq.
|
||||
*/
|
||||
rq = task->rq;
|
||||
BUG_ON(list_empty(&task->rq_list));
|
||||
list_remove_init(&task->rq_list);
|
||||
task->rq = 0;
|
||||
rq->total--;
|
||||
|
||||
BUG_ON(rq->total < 0);
|
||||
sched_unlock_runqueues(irqflags);
|
||||
task->rq->total--;
|
||||
BUG_ON(task->rq->total < 0);
|
||||
task->rq = 0;
|
||||
|
||||
sched_unlock_runqueues(sched, irqflags);
|
||||
}
|
||||
|
||||
|
||||
@@ -209,7 +216,8 @@ void sched_resume_sync(struct ktcb *task)
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task,
|
||||
scheduler.rq_runnable,
|
||||
per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
@@ -224,38 +232,10 @@ void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task,
|
||||
scheduler.rq_runnable,
|
||||
per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
}
|
||||
|
||||
|
||||
/* Same as suspend, task state and flags are different */
|
||||
void sched_exit_sync(void)
|
||||
{
|
||||
preempt_disable();
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_DEAD;
|
||||
current->flags &= ~TASK_EXITING;
|
||||
|
||||
if (current->pagerid != current->tid)
|
||||
wake_up(¤t->wqh_pager, 0);
|
||||
preempt_enable();
|
||||
|
||||
schedule();
|
||||
}
|
||||
|
||||
void sched_exit_async(void)
|
||||
{
|
||||
preempt_disable();
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_DEAD;
|
||||
current->flags &= ~TASK_EXITING;
|
||||
|
||||
if (current->pagerid != current->tid)
|
||||
wake_up(¤t->wqh_pager, 0);
|
||||
preempt_enable();
|
||||
|
||||
need_resched = 1;
|
||||
// printk("CPU%d: Resuming task %d with affinity %d\n", smp_get_cpuid(), task->tid, task->affinity);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -291,23 +271,25 @@ void sched_suspend_async(void)
|
||||
}
|
||||
|
||||
|
||||
extern void arch_switch(struct ktcb *cur, struct ktcb *next);
|
||||
extern void arch_context_switch(struct ktcb *cur, struct ktcb *next);
|
||||
|
||||
static inline void context_switch(struct ktcb *next)
|
||||
{
|
||||
struct ktcb *cur = current;
|
||||
|
||||
//printk("(%d) to (%d)\n", cur->tid, next->tid);
|
||||
// printk("Core:%d (%d) to (%d)\n", smp_get_cpuid(), cur->tid, next->tid);
|
||||
|
||||
system_account_context_switch();
|
||||
|
||||
/* Flush caches and everything */
|
||||
arch_hardware_flush(TASK_PGD(next));
|
||||
if (current->space->spid != next->space->spid)
|
||||
arch_space_switch(next);
|
||||
|
||||
/* Update utcb region for next task */
|
||||
task_update_utcb(next);
|
||||
|
||||
/* Switch context */
|
||||
arch_switch(cur, next);
|
||||
arch_context_switch(cur, next);
|
||||
|
||||
// printk("Returning from yield. Tid: (%d)\n", cur->tid);
|
||||
}
|
||||
@@ -321,7 +303,7 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
|
||||
BUG_ON(prio_total < task->priority);
|
||||
BUG_ON(prio_total == 0);
|
||||
return task->ticks_assigned =
|
||||
SCHED_TICKS * task->priority / prio_total;
|
||||
CONFIG_SCHED_TICKS * task->priority / prio_total;
|
||||
}
|
||||
|
||||
|
||||
@@ -360,11 +342,11 @@ void schedule()
|
||||
|
||||
/* Should not schedule with preemption
|
||||
* disabled or in nested irq */
|
||||
BUG_ON(voluntary_preempt);
|
||||
BUG_ON(per_cpu(voluntary_preempt));
|
||||
BUG_ON(in_nested_irq_context());
|
||||
|
||||
/* Should not have more ticks than SCHED_TICKS */
|
||||
BUG_ON(current->ticks_left > SCHED_TICKS);
|
||||
BUG_ON(current->ticks_left > CONFIG_SCHED_TICKS);
|
||||
|
||||
/* If coming from process path, cannot have
|
||||
* any irqs that schedule after this */
|
||||
@@ -378,16 +360,17 @@ void schedule()
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current,
|
||||
scheduler.rq_runnable,
|
||||
per_cpu(scheduler).rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(current,
|
||||
scheduler.rq_expired,
|
||||
per_cpu(scheduler).rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Are these smp-safe?
|
||||
* FIXME: Are these smp-safe? BB: On first glance they
|
||||
* should be because runqueues are per-cpu right now.
|
||||
*
|
||||
* If task is about to sleep and
|
||||
* it has pending events, wake it up.
|
||||
@@ -406,31 +389,36 @@ void schedule()
|
||||
TASK_IN_USER(current)) {
|
||||
if (current->flags & TASK_SUSPENDING)
|
||||
sched_suspend_async();
|
||||
else if (current->flags & TASK_EXITING)
|
||||
sched_exit_async();
|
||||
}
|
||||
|
||||
/* Determine the next task to be run */
|
||||
do {
|
||||
if (scheduler.rq_runnable->total > 0) {
|
||||
next = link_to_struct(scheduler.rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
/* Simpler task pick up loop. May put in sched_pick_next() */
|
||||
for (;;) {
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
|
||||
/* If we or a child has just exited, run idle task once for clean up */
|
||||
if (current->flags & TASK_EXITED) {
|
||||
current->flags &= ~TASK_EXITED;
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else if (sched->rq_runnable->total > 0) {
|
||||
/* Get a runnable task, if available */
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (sched->rq_expired->total > 0) {
|
||||
/* Swap queues and retry if not */
|
||||
sched_rq_swap_runqueues();
|
||||
continue;
|
||||
} else if (in_process_context()) {
|
||||
/* Do idle task if no runnable tasks and in process */
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else {
|
||||
if (scheduler.rq_expired->total > 0) {
|
||||
sched_rq_swap_runqueues();
|
||||
next = link_to_struct(
|
||||
scheduler.rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
} else {
|
||||
/* Irq preemptions return to current task
|
||||
* if no runnable tasks are available */
|
||||
next = current;
|
||||
}
|
||||
/* Irq calls must return to interrupted current process */
|
||||
next = current;
|
||||
break;
|
||||
}
|
||||
/* If process context, poll forever for new tasks */
|
||||
} while (scheduler.rq_runnable->total == 0 &&
|
||||
scheduler.rq_expired->total == 0 &&
|
||||
in_process_context());
|
||||
}
|
||||
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING)
|
||||
@@ -443,7 +431,7 @@ void schedule()
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, scheduler.prio_total);
|
||||
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
@@ -462,7 +450,7 @@ void schedule()
|
||||
*/
|
||||
void scheduler_start()
|
||||
{
|
||||
timer_start();
|
||||
platform_timer_start();
|
||||
switch_to_user(current);
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
* Copyright (C) 2008 Bahadir Balban
|
||||
*/
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
@@ -15,7 +16,6 @@
|
||||
#include <l4/api/kip.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
|
||||
|
||||
void init_address_space_list(struct address_space_list *space_list)
|
||||
{
|
||||
memset(space_list, 0, sizeof(*space_list));
|
||||
@@ -47,15 +47,16 @@ void address_space_add(struct address_space *space)
|
||||
BUG_ON(!++curcont->space_list.count);
|
||||
}
|
||||
|
||||
void address_space_remove(struct address_space *space)
|
||||
void address_space_remove(struct address_space *space, struct container *cont)
|
||||
{
|
||||
BUG_ON(list_empty(&space->list));
|
||||
BUG_ON(--curcont->space_list.count < 0);
|
||||
BUG_ON(--cont->space_list.count < 0);
|
||||
list_remove_init(&space->list);
|
||||
}
|
||||
|
||||
/* Assumes address space reflock is already held */
|
||||
void address_space_delete(struct address_space *space)
|
||||
void address_space_delete(struct address_space *space,
|
||||
struct ktcb *task_accounted)
|
||||
{
|
||||
BUG_ON(space->ktcb_refs);
|
||||
|
||||
@@ -66,7 +67,7 @@ void address_space_delete(struct address_space *space)
|
||||
id_del(&kernel_resources.space_ids, space->spid);
|
||||
|
||||
/* Deallocate the space structure */
|
||||
free_space(space);
|
||||
free_space(space, task_accounted);
|
||||
}
|
||||
|
||||
struct address_space *address_space_create(struct address_space *orig)
|
||||
@@ -81,7 +82,7 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
|
||||
/* Allocate pgd */
|
||||
if (!(pgd = alloc_pgd())) {
|
||||
free_space(space);
|
||||
free_space(space, current);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -92,7 +93,7 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
space->pgd = pgd;
|
||||
|
||||
/* Copy all kernel entries */
|
||||
copy_pgd_kern_all(pgd);
|
||||
arch_copy_pgd_kernel_entries(pgd);
|
||||
|
||||
/*
|
||||
* Set up space id: Always allocate a new one. Specifying a space id
|
||||
@@ -106,7 +107,7 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
/* Copy its user entries/tables */
|
||||
if ((err = copy_user_tables(space, orig)) < 0) {
|
||||
free_pgd(pgd);
|
||||
free_space(space);
|
||||
free_space(space, current);
|
||||
return PTR_ERR(err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
|
||||
void init_ktcb_list(struct ktcb_list *ktcb_list)
|
||||
{
|
||||
@@ -30,6 +32,8 @@ void tcb_init(struct ktcb *new)
|
||||
link_init(&new->task_list);
|
||||
mutex_init(&new->thread_control_lock);
|
||||
|
||||
spin_lock_init(&new->thread_lock);
|
||||
|
||||
init_ktcb_list(&new->child_exit_list);
|
||||
cap_list_init(&new->cap_list);
|
||||
|
||||
@@ -65,32 +69,52 @@ struct ktcb *tcb_alloc_init(l4id_t cid)
|
||||
|
||||
void tcb_delete(struct ktcb *tcb)
|
||||
{
|
||||
struct ktcb *pager, *acc_task;
|
||||
|
||||
/* Sanity checks first */
|
||||
BUG_ON(!is_page_aligned(tcb));
|
||||
BUG_ON(tcb->wqh_pager.sleepers > 0);
|
||||
BUG_ON(tcb->wqh_send.sleepers > 0);
|
||||
BUG_ON(tcb->wqh_recv.sleepers > 0);
|
||||
BUG_ON(!list_empty(&tcb->task_list) &&
|
||||
!(tcb->flags & TASK_EXITING));
|
||||
BUG_ON(!list_empty(&tcb->rq_list) && tcb != current);
|
||||
BUG_ON(tcb->rq && tcb != current);
|
||||
BUG_ON(tcb->affinity != current->affinity);
|
||||
BUG_ON(tcb->state != TASK_INACTIVE);
|
||||
BUG_ON(!list_empty(&tcb->rq_list));
|
||||
BUG_ON(tcb->rq);
|
||||
BUG_ON(tcb == current);
|
||||
BUG_ON(tcb->nlocks);
|
||||
BUG_ON(tcb->waiting_on);
|
||||
BUG_ON(tcb->wq);
|
||||
|
||||
mutex_lock(&curcont->space_list.lock);
|
||||
/* Remove from zombie list */
|
||||
list_remove(&tcb->task_list);
|
||||
|
||||
/* Determine task to account deletions */
|
||||
if (!(pager = tcb_find(tcb->pagerid)))
|
||||
acc_task = current;
|
||||
else
|
||||
acc_task = pager;
|
||||
|
||||
/*
|
||||
* NOTE: This protects single threaded space
|
||||
* deletion against space modification.
|
||||
*
|
||||
* If space deletion were multi-threaded, list
|
||||
* traversal would be needed to ensure list is
|
||||
* still there.
|
||||
*/
|
||||
mutex_lock(&tcb->container->space_list.lock);
|
||||
mutex_lock(&tcb->space->lock);
|
||||
BUG_ON(--tcb->space->ktcb_refs < 0);
|
||||
|
||||
/* No refs left for the space, delete it */
|
||||
if (tcb->space->ktcb_refs == 0) {
|
||||
address_space_remove(tcb->space);
|
||||
address_space_remove(tcb->space, tcb->container);
|
||||
mutex_unlock(&tcb->space->lock);
|
||||
address_space_delete(tcb->space);
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
address_space_delete(tcb->space, acc_task);
|
||||
mutex_unlock(&tcb->container->space_list.lock);
|
||||
} else {
|
||||
mutex_unlock(&tcb->space->lock);
|
||||
mutex_unlock(&curcont->space_list.lock);
|
||||
mutex_unlock(&tcb->container->space_list.lock);
|
||||
}
|
||||
|
||||
/* Clear container id part */
|
||||
@@ -100,7 +124,7 @@ void tcb_delete(struct ktcb *tcb)
|
||||
id_del(&kernel_resources.ktcb_ids, tcb->tid);
|
||||
|
||||
/* Free the tcb */
|
||||
free_ktcb(tcb);
|
||||
free_ktcb(tcb, acc_task);
|
||||
}
|
||||
|
||||
struct ktcb *tcb_find_by_space(l4id_t spid)
|
||||
@@ -133,6 +157,22 @@ struct ktcb *container_find_tcb(struct container *c, l4id_t tid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ktcb *container_find_lock_tcb(struct container *c, l4id_t tid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
spin_lock(&c->ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &c->ktcb_list.list, task_list) {
|
||||
if (task->tid == tid) {
|
||||
spin_lock(&task->thread_lock);
|
||||
spin_unlock(&c->ktcb_list.list_lock);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
spin_unlock(&c->ktcb_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Threads are the only resource where inter-container searches are
|
||||
* allowed. This is because on other containers, only threads can be
|
||||
@@ -158,6 +198,26 @@ struct ktcb *tcb_find(l4id_t tid)
|
||||
}
|
||||
}
|
||||
|
||||
struct ktcb *tcb_find_lock(l4id_t tid)
|
||||
{
|
||||
struct container *c;
|
||||
|
||||
if (current->tid == tid) {
|
||||
spin_lock(¤t->thread_lock);
|
||||
return current;
|
||||
}
|
||||
|
||||
if (tid_to_cid(tid) == curcont->cid) {
|
||||
return container_find_lock_tcb(curcont, tid);
|
||||
} else {
|
||||
if (!(c = container_find(&kernel_resources,
|
||||
tid_to_cid(tid))))
|
||||
return 0;
|
||||
else
|
||||
return container_find_lock_tcb(c, tid);
|
||||
}
|
||||
}
|
||||
|
||||
void ktcb_list_add(struct ktcb *new, struct ktcb_list *ktcb_list)
|
||||
{
|
||||
spin_lock(&ktcb_list->list_lock);
|
||||
@@ -178,13 +238,45 @@ void tcb_add(struct ktcb *new)
|
||||
spin_unlock(&c->ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
void tcb_remove(struct ktcb *new)
|
||||
/*
|
||||
* Its important that this is per-cpu. This is
|
||||
* because it must be guaranteed that the task
|
||||
* is not runnable. Idle task on that cpu guarantees it.
|
||||
*/
|
||||
void tcb_delete_zombies(void)
|
||||
{
|
||||
struct ktcb *zombie, *n;
|
||||
struct ktcb_list *ktcb_list =
|
||||
&per_cpu(kernel_resources.zombie_list);
|
||||
|
||||
/* Traverse the per-cpu zombie list */
|
||||
spin_lock(&ktcb_list->list_lock);
|
||||
list_foreach_removable_struct(zombie, n,
|
||||
&ktcb_list->list,
|
||||
task_list)
|
||||
/* Delete all zombies one by one */
|
||||
tcb_delete(zombie);
|
||||
spin_unlock(&ktcb_list->list_lock);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* It's enough to lock list and thread without
|
||||
* traversing the list, because we're only
|
||||
* protecting against thread modification.
|
||||
* Deletion is a single-threaded operation
|
||||
*/
|
||||
void tcb_remove(struct ktcb *task)
|
||||
{
|
||||
/* Lock list */
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
BUG_ON(list_empty(&new->task_list));
|
||||
BUG_ON(list_empty(&task->task_list));
|
||||
BUG_ON(--curcont->ktcb_list.count < 0);
|
||||
list_remove_init(&new->task_list);
|
||||
spin_lock(&task->thread_lock);
|
||||
|
||||
list_remove_init(&task->task_list);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
spin_unlock(&task->thread_lock);
|
||||
}
|
||||
|
||||
void ktcb_list_remove(struct ktcb *new, struct ktcb_list *ktcb_list)
|
||||
@@ -207,8 +299,7 @@ unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
|
||||
*/
|
||||
void task_update_utcb(struct ktcb *task)
|
||||
{
|
||||
/* Update the KIP pointer */
|
||||
kip.utcb = task->utcb_address;
|
||||
arch_update_utcb(task->utcb_address);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -251,7 +342,7 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
|
||||
if (current == task) {
|
||||
/* Check own utcb, if not there, page it in */
|
||||
if ((ret = check_access(task->utcb_address, UTCB_SIZE,
|
||||
MAP_SVC_RW_FLAGS, page_in)) < 0)
|
||||
MAP_KERN_RW, page_in)) < 0)
|
||||
return -EFAULT;
|
||||
else
|
||||
return 0;
|
||||
@@ -259,7 +350,7 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
|
||||
/* Check another's utcb, but don't try to map in */
|
||||
if ((ret = check_access_task(task->utcb_address,
|
||||
UTCB_SIZE,
|
||||
MAP_SVC_RW_FLAGS, 0,
|
||||
MAP_KERN_RW, 0,
|
||||
task)) < 0) {
|
||||
return -EFAULT;
|
||||
} else {
|
||||
@@ -268,18 +359,18 @@ int tcb_check_and_lazy_map_utcb(struct ktcb *task, int page_in)
|
||||
* unless they're identical
|
||||
*/
|
||||
if ((phys =
|
||||
virt_to_phys_by_pgd(task->utcb_address,
|
||||
TASK_PGD(task))) !=
|
||||
virt_to_phys_by_pgd(task->utcb_address,
|
||||
TASK_PGD(current))) {
|
||||
virt_to_phys_by_pgd(TASK_PGD(task),
|
||||
task->utcb_address)) !=
|
||||
virt_to_phys_by_pgd(TASK_PGD(current),
|
||||
task->utcb_address)) {
|
||||
/*
|
||||
* We have none or an old reference.
|
||||
* Update it with privileged flags,
|
||||
* so that only kernel can access.
|
||||
*/
|
||||
add_mapping_pgd(phys, task->utcb_address,
|
||||
add_mapping_pgd(phys, page_align(task->utcb_address),
|
||||
page_align_up(UTCB_SIZE),
|
||||
MAP_SVC_RW_FLAGS,
|
||||
MAP_KERN_RW,
|
||||
TASK_PGD(current));
|
||||
}
|
||||
BUG_ON(!phys);
|
||||
|
||||
@@ -58,7 +58,7 @@ void update_system_time(void)
|
||||
* TODO: Investigate: how do we make sure timer_irq is
|
||||
* called SCHED_TICKS times per second?
|
||||
*/
|
||||
if (systime.thz == SCHED_TICKS) {
|
||||
if (systime.thz == CONFIG_SCHED_TICKS) {
|
||||
systime.thz = 0;
|
||||
systime.sec++;
|
||||
}
|
||||
@@ -71,7 +71,7 @@ int sys_time(struct timeval *tv, int set)
|
||||
int err;
|
||||
|
||||
if ((err = check_access((unsigned long)tv, sizeof(*tv),
|
||||
MAP_USR_RW_FLAGS, 1)) < 0)
|
||||
MAP_USR_RW, 1)) < 0)
|
||||
return err;
|
||||
|
||||
/* Get time */
|
||||
@@ -79,7 +79,7 @@ int sys_time(struct timeval *tv, int set)
|
||||
while(retries > 0) {
|
||||
systime.reader = 1;
|
||||
tv->tv_sec = systime.sec;
|
||||
tv->tv_usec = 1000000 * systime.thz / SCHED_TICKS;
|
||||
tv->tv_usec = 1000000 * systime.thz / CONFIG_SCHED_TICKS;
|
||||
|
||||
retries--;
|
||||
if (systime.reader)
|
||||
|
||||
Reference in New Issue
Block a user