mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 10:53:16 +01:00
Initial commit
This commit is contained in:
10
src/generic/SConscript
Normal file
10
src/generic/SConscript
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
|
||||
# Inherit global environment
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'pgalloc.c', 'kmalloc.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
77
src/generic/irq.c
Normal file
77
src/generic/irq.c
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Kernel irq handling (core irqs like timer). Also hope to add thread-level
|
||||
* irq handling in the future.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*
|
||||
*/
|
||||
#include <l4/config.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/generic/platform.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
|
||||
/* This enables the lower chip on the current chip, if such chaining exists. */
|
||||
static inline void cascade_irq_chip(struct irq_chip *this_chip)
|
||||
{
|
||||
if (this_chip->cascade >= 0) {
|
||||
BUG_ON(IRQ_CHIPS_MAX == 1);
|
||||
this_chip->ops.unmask(this_chip->cascade);
|
||||
}
|
||||
}
|
||||
|
||||
void irq_controllers_init(void)
|
||||
{
|
||||
struct irq_chip *this_chip;
|
||||
|
||||
for (int i = 0; i < IRQ_CHIPS_MAX; i++) {
|
||||
this_chip = irq_chip_array + i;
|
||||
/* Initialise the irq chips (e.g. reset all registers) */
|
||||
this_chip->ops.init();
|
||||
/* Enable cascaded irqs if needed */
|
||||
cascade_irq_chip(this_chip);
|
||||
}
|
||||
}
|
||||
|
||||
int global_irq_index(void)
|
||||
{
|
||||
struct irq_chip *this_chip;
|
||||
int irq_index = 0;
|
||||
|
||||
/* Loop over irq chips from top to bottom until
|
||||
* the actual irq on the lowest chip is found */
|
||||
for (int i = 0; i < IRQ_CHIPS_MAX; i++) {
|
||||
this_chip = irq_chip_array + i;
|
||||
BUG_ON((irq_index = this_chip->ops.read_irq()) < 0);
|
||||
if (irq_index != this_chip->cascade) {
|
||||
irq_index += this_chip->offset;
|
||||
/* Found the real irq, return */
|
||||
break;
|
||||
}
|
||||
/* Hit the cascading irq. Continue on next irq chip. */
|
||||
}
|
||||
return irq_index;
|
||||
}
|
||||
|
||||
void do_irq(void)
|
||||
{
|
||||
int irq_index = global_irq_index();
|
||||
struct irq_desc *this_irq = irq_desc_array + irq_index;
|
||||
|
||||
/* TODO: This can be easily done few instructions quicker by some
|
||||
* immediate read/disable/enable_all(). We stick with this clear
|
||||
* implementation for now. */
|
||||
irq_disable(irq_index);
|
||||
enable_irqs();
|
||||
/* TODO: Call irq_thread_notify(irq_index) for threaded irqs. */
|
||||
BUG_ON(!this_irq->handler);
|
||||
if (this_irq->handler() != IRQ_HANDLED) {
|
||||
printk("Spurious or broken irq\n"); BUG();
|
||||
}
|
||||
irq_enable(irq_index);
|
||||
}
|
||||
|
||||
101
src/generic/kmalloc.c
Normal file
101
src/generic/kmalloc.c
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Memory pool based kmalloc.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/memcache.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* Supports this many different kmalloc sizes */
|
||||
#define KMALLOC_POOLS_MAX 5
|
||||
|
||||
struct kmalloc_mempool {
|
||||
int total;
|
||||
struct list_head pool_head[KMALLOC_POOLS_MAX];
|
||||
};
|
||||
struct kmalloc_mempool km_pool;
|
||||
|
||||
void init_kmalloc()
|
||||
{
|
||||
for (int i = 0; i < KMALLOC_POOLS_MAX; i++)
|
||||
INIT_LIST_HEAD(&km_pool.pool_head[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates memory from mem_caches that it generates on-the-fly,
|
||||
* for up to KMALLOC_POOLS_MAX different sizes.
|
||||
*/
|
||||
void *kmalloc(int size)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
int right_sized_pool_idx = -1;
|
||||
int index;
|
||||
|
||||
/* Search all existing pools for this size, and if found, free bufs */
|
||||
for (int i = 0; i < km_pool.total; i++) {
|
||||
list_for_each_entry_safe(cache, n, &km_pool.pool_head[i], list) {
|
||||
if (cache->struct_size == size) {
|
||||
right_sized_pool_idx = i;
|
||||
if (cache->free)
|
||||
return mem_cache_alloc(cache);
|
||||
else
|
||||
continue;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* No such pool list already available at hand, and we don't have room
|
||||
* for new pool lists.
|
||||
*/
|
||||
if ((right_sized_pool_idx < 0) &&
|
||||
(km_pool.total == KMALLOC_POOLS_MAX - 1)) {
|
||||
printk("kmalloc: Too many types of pool sizes requested. "
|
||||
"Giving up.\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (right_sized_pool_idx >= 0)
|
||||
index = right_sized_pool_idx;
|
||||
else
|
||||
index = km_pool.total++;
|
||||
|
||||
/* Only allow up to page size */
|
||||
BUG_ON(size >= PAGE_SIZE);
|
||||
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
|
||||
size, 0)));
|
||||
list_add(&cache->list, &km_pool.pool_head[index]);
|
||||
return mem_cache_alloc(cache);
|
||||
}
|
||||
|
||||
/* FIXME:
|
||||
* Horrible complexity O(n^2) because we don't know which cache
|
||||
* we're freeing from!!! But its simple. ;-)
|
||||
*/
|
||||
int kfree(void *p)
|
||||
{
|
||||
struct mem_cache *cache, *tmp;
|
||||
|
||||
for (int i = 0; i < km_pool.total; i++)
|
||||
list_for_each_entry_safe(cache, tmp, &km_pool.pool_head[i], list)
|
||||
if (!mem_cache_free(cache, p)) {
|
||||
if (mem_cache_is_empty(cache)) {
|
||||
list_del(&cache->list);
|
||||
free_page(cache);
|
||||
/* Total remains the same. */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void *kzalloc(int size)
|
||||
{
|
||||
void *p = kmalloc(size);
|
||||
memset(p, 0, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
172
src/generic/pgalloc.c
Normal file
172
src/generic/pgalloc.c
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Simple kernel memory allocator built on top of memcache
|
||||
* implementation.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/memcache.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/physmem.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
#define PGALLOC_PGD_CACHE 0
|
||||
#define PGALLOC_PMD_CACHE 1
|
||||
#define PGALLOC_PG_CACHE 2
|
||||
#define PGALLOC_CACHE_TOTAL 3
|
||||
|
||||
/* The initial chunk of physical memory allocated before any pagers. */
|
||||
#define PGALLOC_INIT_GRANT SZ_1MB
|
||||
|
||||
/* Covers 3 main types of memory needed by the kernel. */
|
||||
struct pgalloc {
|
||||
struct list_head cache_list[3];
|
||||
};
|
||||
static struct pgalloc pgalloc;
|
||||
|
||||
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
|
||||
{
|
||||
INIT_LIST_HEAD(&cache->list);
|
||||
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
|
||||
list_add(&cache->list, &pgalloc.cache_list[cidx]);
|
||||
}
|
||||
|
||||
void calc_kmem_usage_per_grant(kmem_usage_per_grant_t *params)
|
||||
{
|
||||
/* Pmds, pgds, pages in numbers, per grant */
|
||||
int pmds_per_task_avg = params->task_size_avg / PMD_MAP_SIZE;
|
||||
int pmds_per_kmem_grant = params->tasks_per_kmem_grant * pmds_per_task_avg;
|
||||
int pgds_per_kmem_grant = params->tasks_per_kmem_grant * 1;
|
||||
int pgs_per_kmem_grant = params->tasks_per_kmem_grant * 1;
|
||||
|
||||
/* Now everything in Kbs */
|
||||
params->pmd_total = pmds_per_kmem_grant * PMD_SIZE;
|
||||
params->pgd_total = pgds_per_kmem_grant * PGD_SIZE;
|
||||
params->pg_total = pgs_per_kmem_grant * PAGE_SIZE;
|
||||
params->extra = params->grant_size -
|
||||
(params->pgd_total + params->pmd_total +
|
||||
params->pg_total);
|
||||
}
|
||||
|
||||
int pgalloc_add_new_grant(unsigned long pfn, int npages)
|
||||
{
|
||||
unsigned long physical = __pfn_to_addr(pfn);
|
||||
void *virtual = (void *)phys_to_virt(physical);
|
||||
struct mem_cache *pgd_cache, *pmd_cache, *pg_cache;
|
||||
kmem_usage_per_grant_t params;
|
||||
|
||||
/* First map the whole grant */
|
||||
add_mapping(physical, phys_to_virt(physical), __pfn_to_addr(npages),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
|
||||
/* Calculate how to divide buffer into different caches */
|
||||
params.task_size_avg = TASK_AVERAGE_SIZE;
|
||||
params.grant_size = npages * PAGE_SIZE;
|
||||
|
||||
/* Calculate pools for how many tasks from this much grant */
|
||||
params.tasks_per_kmem_grant = (__pfn(SZ_1MB) * TASKS_PER_1MB_GRANT) /
|
||||
__pfn(params.grant_size);
|
||||
calc_kmem_usage_per_grant(¶ms);
|
||||
|
||||
/* Create the caches, least alignment-needing, most, then others. */
|
||||
pmd_cache = mem_cache_init(virtual, params.pmd_total, PMD_SIZE, 1);
|
||||
virtual += params.pmd_total;
|
||||
pgd_cache = mem_cache_init(virtual, params.pgd_total, PGD_SIZE, 1);
|
||||
virtual += params.pgd_total;
|
||||
pg_cache = mem_cache_init(virtual, params.pg_total + params.extra,
|
||||
PAGE_SIZE, 1);
|
||||
|
||||
/* Add the caches */
|
||||
pgalloc_add_new_cache(pgd_cache, PGALLOC_PGD_CACHE);
|
||||
pgalloc_add_new_cache(pmd_cache, PGALLOC_PMD_CACHE);
|
||||
pgalloc_add_new_cache(pg_cache, PGALLOC_PG_CACHE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void init_pgalloc(void)
|
||||
{
|
||||
int initial_grant = PGALLOC_INIT_GRANT;
|
||||
|
||||
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
|
||||
INIT_LIST_HEAD(&pgalloc.cache_list[i]);
|
||||
|
||||
/* Grant ourselves with an initial chunk of physical memory */
|
||||
physmem.free_cur = page_align_up(physmem.free_cur);
|
||||
set_page_map(physmem.free_cur, __pfn(initial_grant), 1);
|
||||
pgalloc_add_new_grant(__pfn(physmem.free_cur), __pfn(initial_grant));
|
||||
physmem.free_cur += initial_grant;
|
||||
|
||||
/* Activate kmalloc */
|
||||
init_kmalloc();
|
||||
}
|
||||
|
||||
void pgalloc_remove_cache(struct mem_cache *cache)
|
||||
{
|
||||
list_del_init(&cache->list);
|
||||
}
|
||||
|
||||
static inline void *pgalloc_from_cache(int cidx)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
|
||||
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
|
||||
if (mem_cache_total_empty(cache))
|
||||
return mem_cache_zalloc(cache);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kfree_to_cache(int cidx, void *virtual)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
|
||||
list_for_each_entry_safe(cache, n, &pgalloc.cache_list[cidx], list)
|
||||
if (mem_cache_free(cache, virtual) == 0)
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void *alloc_page(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PG_CACHE);
|
||||
}
|
||||
|
||||
void *alloc_pmd(void)
|
||||
{
|
||||
pmd_table_t *pmd;
|
||||
|
||||
if (!(pmd = alloc_boot_pmd()))
|
||||
pmd = pgalloc_from_cache(PGALLOC_PMD_CACHE);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
void *alloc_pgd(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PGD_CACHE);
|
||||
}
|
||||
|
||||
int free_page(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PG_CACHE, v);
|
||||
}
|
||||
|
||||
int free_pmd(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PMD_CACHE, v);
|
||||
}
|
||||
|
||||
int free_pgd(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PGD_CACHE, v);
|
||||
}
|
||||
|
||||
void *zalloc_page(void)
|
||||
{
|
||||
void *p = alloc_page();
|
||||
memset(p, 0, PAGE_SIZE);
|
||||
return p;
|
||||
}
|
||||
|
||||
94
src/generic/physmem.c
Normal file
94
src/generic/physmem.c
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Global physical memory descriptions.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/physmem.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_PLAT(printascii.h)
|
||||
#include INC_ARCH(linker.h)
|
||||
|
||||
struct page_bitmap page_map;
|
||||
|
||||
static void init_page_map(unsigned long start, unsigned long end)
|
||||
{
|
||||
page_map.pfn_start = __pfn(start);
|
||||
page_map.pfn_end = __pfn(end);
|
||||
set_page_map(start, __pfn(end - start), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Marks pages in the global page_map as used or unused.
|
||||
*
|
||||
* @start = start page address to set, inclusive.
|
||||
* @numpages = number of pages to set.
|
||||
*/
|
||||
int set_page_map(unsigned long start, int numpages, int val)
|
||||
{
|
||||
unsigned long pfn_start = __pfn(start);
|
||||
unsigned long pfn_end = __pfn(start) + numpages;
|
||||
unsigned long pfn_err = 0;
|
||||
|
||||
if (page_map.pfn_start > pfn_start || page_map.pfn_end < pfn_start) {
|
||||
pfn_err = pfn_start;
|
||||
goto error;
|
||||
}
|
||||
if (page_map.pfn_end < pfn_end || page_map.pfn_start > pfn_end) {
|
||||
pfn_err = pfn_end;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (val)
|
||||
for (int i = pfn_start; i < pfn_end; i++)
|
||||
page_map.map[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
|
||||
else
|
||||
for (int i = pfn_start; i < pfn_end; i++)
|
||||
page_map.map[BITWISE_GETWORD(i)] &= ~BITWISE_GETBIT(i);
|
||||
return 0;
|
||||
error:
|
||||
BUG_MSG("Given page area is out of system page_map range: 0x%lx\n",
|
||||
pfn_err << PAGE_BITS);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Describes physical memory boundaries of the system. */
|
||||
struct memdesc physmem;
|
||||
|
||||
/* Fills in the physmem structure with free physical memory information */
|
||||
void physmem_init()
|
||||
{
|
||||
unsigned long start = (unsigned long)_start_kernel;
|
||||
unsigned long end = (unsigned long)_end_kernel;
|
||||
|
||||
/* Initialise page map */
|
||||
init_page_map(PHYS_MEM_START, PHYS_MEM_END);
|
||||
|
||||
/* Mark kernel areas as used */
|
||||
set_page_map(virt_to_phys(start), __pfn(end - start), 1);
|
||||
|
||||
/* Map initial pgd area as used */
|
||||
start = (unsigned long)__pt_start;
|
||||
end = (unsigned long)__pt_end;
|
||||
set_page_map(virt_to_phys(current->pgd), __pfn(end - start), 1);
|
||||
|
||||
physmem.start = PHYS_MEM_START;
|
||||
physmem.end = PHYS_MEM_END;
|
||||
|
||||
physmem.free_cur = __svc_images_end;
|
||||
physmem.free_end = PHYS_MEM_END;
|
||||
physmem.numpages = (PHYS_MEM_START - PHYS_MEM_END) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
void memory_init()
|
||||
{
|
||||
printascii("Initialising kernel memory allocator.\n");
|
||||
init_pgalloc();
|
||||
}
|
||||
|
||||
371
src/generic/scheduler.c
Normal file
371
src/generic/scheduler.c
Normal file
@@ -0,0 +1,371 @@
|
||||
/*
|
||||
* A basic scheduler that does the job for now.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include <l4/api/kip.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
#include INC_GLUE(init.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
|
||||
/* A very basic runqueue */
|
||||
struct runqueue {
|
||||
struct spinlock lock;
|
||||
struct list_head task_list;
|
||||
unsigned int total;
|
||||
};
|
||||
|
||||
static struct runqueue sched_rq[3];
|
||||
static struct runqueue *rq_runnable, *rq_expired, *rq_pending;
|
||||
|
||||
|
||||
/* This is incremented on each irq or voluntarily by preempt_disable() */
|
||||
extern unsigned int current_irq_nest_count;
|
||||
|
||||
/* This ensures no scheduling occurs after voluntary preempt_disable() */
|
||||
static int voluntary_preempt = 0;
|
||||
|
||||
int preemptive()
|
||||
{
|
||||
return current_irq_nest_count == 0;
|
||||
}
|
||||
|
||||
int preempt_count()
|
||||
{
|
||||
return current_irq_nest_count;
|
||||
}
|
||||
|
||||
void preempt_enable(void)
|
||||
{
|
||||
voluntary_preempt--;
|
||||
current_irq_nest_count--;
|
||||
|
||||
/*
|
||||
* Even if count increases after we check it, it will come back to zero.
|
||||
* This test really is asking "is this the outmost explicit
|
||||
* preempt_enable() that will really enable context switching?"
|
||||
*/
|
||||
if (current_irq_nest_count == 0) {
|
||||
/* Then, give scheduler a chance to check need_resched == 1 */
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
/* A positive irq nest count implies current context cannot be preempted. */
|
||||
void preempt_disable(void)
|
||||
{
|
||||
current_irq_nest_count++;
|
||||
voluntary_preempt++;
|
||||
}
|
||||
|
||||
void sched_runqueue_init(void)
|
||||
{
|
||||
for (int i = 0; i < 3; i++) {
|
||||
memset(&sched_rq[i], 0, sizeof(struct runqueue));
|
||||
INIT_LIST_HEAD(&sched_rq[i].task_list);
|
||||
spin_lock_init(&sched_rq[i].lock);
|
||||
}
|
||||
|
||||
rq_runnable = &sched_rq[0];
|
||||
rq_expired = &sched_rq[1];
|
||||
rq_pending = &sched_rq[2];
|
||||
}
|
||||
|
||||
/* Lock scheduler. Should only be used when scheduling. */
|
||||
static inline void sched_lock(void)
|
||||
{
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
/* Sched unlock */
|
||||
static inline void sched_unlock(void)
|
||||
{
|
||||
/*
|
||||
* This is to make sure preempt_enable() does not
|
||||
* try to schedule since we're already scheduling.
|
||||
*/
|
||||
need_resched = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Swaps runnable and expired queues *if* runnable queue is empty. */
|
||||
static void sched_rq_swap_expired_runnable(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
if (list_empty(&rq_runnable->task_list) &&
|
||||
!list_empty(&rq_expired->task_list)) {
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = rq_runnable;
|
||||
rq_runnable = rq_expired;
|
||||
rq_expired = temp;
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper for adding a new task to a runqueue */
|
||||
static void sched_rq_add_task(struct ktcb *task, struct runqueue *rq, int front)
|
||||
{
|
||||
BUG_ON(task->rq);
|
||||
|
||||
/*
|
||||
* If the task is sinfully in a runqueue, this may still keep silent
|
||||
* upon a racing condition, since its rq can't be locked in advance.
|
||||
*/
|
||||
BUG_ON(!list_empty(&task->rq_list));
|
||||
|
||||
if (front)
|
||||
list_add(&task->rq_list, &rq->task_list);
|
||||
else
|
||||
list_add_tail(&task->rq_list, &rq->task_list);
|
||||
rq->total++;
|
||||
task->rq = rq;
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_rq_add_task_front(struct ktcb *task, struct runqueue *rq)
|
||||
{
|
||||
sched_rq_add_task(task, rq, 1);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_rq_add_task_behind(struct ktcb *task, struct runqueue *rq)
|
||||
{
|
||||
sched_rq_add_task(task, rq, 0);
|
||||
}
|
||||
|
||||
/* Helper for removing a task from its runqueue. */
|
||||
static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
{
|
||||
list_del_init(&task->rq_list);
|
||||
task->rq->total--;
|
||||
task->rq = 0;
|
||||
}
|
||||
|
||||
static inline void sched_init_task(struct ktcb *task)
|
||||
{
|
||||
INIT_LIST_HEAD(&task->rq_list);
|
||||
task->ticks_left = TASK_TIMESLICE_DEFAULT;
|
||||
task->state = TASK_INACTIVE;
|
||||
task->ts_need_resched = 0;
|
||||
}
|
||||
|
||||
void sched_tell(struct ktcb *task, unsigned int fl)
|
||||
{
|
||||
BUG_ON(!(SCHED_FL_MASK & fl));
|
||||
/* The last flag overrrides all existing flags. */
|
||||
task->schedfl = fl;
|
||||
}
|
||||
|
||||
void sched_yield()
|
||||
{
|
||||
need_resched = 1;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/*
|
||||
* Any task that wants the scheduler's attention and not in its any one of
|
||||
* its currently runnable realms, would call this. E.g. dormant tasks
|
||||
* sleeping tasks, newly created tasks. But not currently runnable tasks.
|
||||
*/
|
||||
void sched_add_pending_task(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task->rq);
|
||||
spin_lock(&rq_pending->lock);
|
||||
sched_rq_add_task_behind(task, rq_pending);
|
||||
spin_unlock(&rq_pending->lock);
|
||||
}
|
||||
|
||||
/* Tells scheduler to remove given runnable task from runqueues */
|
||||
void sched_notify_sleep(struct ktcb *task)
|
||||
{
|
||||
sched_tell(task, SCHED_FL_SLEEP);
|
||||
}
|
||||
|
||||
void sched_sleep_task(struct ktcb *task)
|
||||
{
|
||||
sched_notify_sleep(task);
|
||||
if (task == current)
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
/* Tells scheduler to remove given runnable task from runqueues */
|
||||
void sched_notify_suspend(struct ktcb *task)
|
||||
{
|
||||
sched_tell(task, SCHED_FL_SUSPEND);
|
||||
}
|
||||
|
||||
void sched_suspend_task(struct ktcb *task)
|
||||
{
|
||||
sched_notify_suspend(task);
|
||||
if (task == current)
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
/* Tells scheduler to add given task into runqueues whenever possible */
|
||||
void sched_notify_resume(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(current == task);
|
||||
sched_tell(task, SCHED_FL_RESUME);
|
||||
sched_add_pending_task(task);
|
||||
}
|
||||
|
||||
/* NOTE: Might as well just set need_resched instead of full yield.
|
||||
* This would work on irq context as well. */
|
||||
/* Same as resume, but also yields. */
|
||||
void sched_resume_task(struct ktcb *task)
|
||||
{
|
||||
sched_notify_resume(task);
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
void sched_start_task(struct ktcb *task)
|
||||
{
|
||||
sched_init_task(task);
|
||||
sched_resume_task(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks currently pending scheduling flags on the task and does two things:
|
||||
* 1) Modify their state.
|
||||
* 2) Modify their runqueues.
|
||||
*
|
||||
* An inactive/sleeping task that is pending-runnable would change state here.
|
||||
* A runnable task that is pending-inactive would also change state here.
|
||||
* Returns 1 if it has changed anything, e.g. task state, runqueues, and
|
||||
* 0 otherwise.
|
||||
*/
|
||||
static int sched_next_state(struct ktcb *task)
|
||||
{
|
||||
unsigned int flags = task->schedfl;
|
||||
int ret = 0;
|
||||
|
||||
switch(flags) {
|
||||
case 0:
|
||||
ret = 0;
|
||||
break;
|
||||
case SCHED_FL_SUSPEND:
|
||||
task->state = TASK_INACTIVE;
|
||||
ret = 1;
|
||||
break;
|
||||
case SCHED_FL_RESUME:
|
||||
task->state = TASK_RUNNABLE;
|
||||
ret = 1;
|
||||
break;
|
||||
case SCHED_FL_SLEEP:
|
||||
task->state = TASK_SLEEPING;
|
||||
ret = 1;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
task->schedfl = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
extern void switch_to(struct ktcb *cur, struct ktcb *next);
|
||||
|
||||
static inline void context_switch(struct ktcb *next)
|
||||
{
|
||||
struct ktcb *cur = current;
|
||||
|
||||
// printk("(%d) to (%d)\n", cur->tid, next->tid);
|
||||
|
||||
/* Flush caches and everything */
|
||||
arm_clean_invalidate_cache();
|
||||
arm_invalidate_tlb();
|
||||
arm_set_ttb(virt_to_phys(next->pgd));
|
||||
arm_invalidate_tlb();
|
||||
switch_to(cur, next);
|
||||
// printk("Returning from yield. Tid: (%d)\n", cur->tid);
|
||||
}
|
||||
|
||||
void scheduler()
|
||||
{
|
||||
struct ktcb *next = 0, *pending = 0, *n = 0;
|
||||
|
||||
sched_lock();
|
||||
need_resched = 0;
|
||||
BUG_ON(current->tid < MIN_PREDEFINED_TID ||
|
||||
current->tid > MAX_PREDEFINED_TID);
|
||||
BUG_ON(current->rq != rq_runnable);
|
||||
|
||||
/* Current task */
|
||||
sched_rq_remove_task(current);
|
||||
sched_next_state(current);
|
||||
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
current->ticks_left += TASK_TIMESLICE_DEFAULT;
|
||||
BUG_ON(current->ticks_left <= 0);
|
||||
sched_rq_add_task_behind(current, rq_expired);
|
||||
}
|
||||
sched_rq_swap_expired_runnable();
|
||||
|
||||
/* Runnable-pending tasks */
|
||||
spin_lock(&rq_pending->lock);
|
||||
list_for_each_entry_safe(pending, n, &rq_pending->task_list, rq_list) {
|
||||
sched_next_state(pending);
|
||||
sched_rq_remove_task(pending);
|
||||
if (pending->state == TASK_RUNNABLE)
|
||||
sched_rq_add_task_front(pending, rq_runnable);
|
||||
}
|
||||
spin_unlock(&rq_pending->lock);
|
||||
|
||||
/* Next task */
|
||||
retry_next:
|
||||
if (rq_runnable->total > 0) {
|
||||
next = list_entry(rq_runnable->task_list.next, struct ktcb, rq_list);
|
||||
sched_next_state(next);
|
||||
if (next->state != TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(next);
|
||||
sched_rq_swap_expired_runnable();
|
||||
goto retry_next;
|
||||
}
|
||||
} else {
|
||||
printk("Idle task.\n");
|
||||
while (1);
|
||||
}
|
||||
|
||||
disable_irqs();
|
||||
sched_unlock();
|
||||
context_switch(next);
|
||||
}
|
||||
|
||||
void schedule(void)
|
||||
{
|
||||
/* It's a royal bug to call schedule when preemption is disabled */
|
||||
BUG_ON(voluntary_preempt);
|
||||
|
||||
if (need_resched)
|
||||
scheduler();
|
||||
}
|
||||
|
||||
void scheduler_start()
|
||||
{
|
||||
/* Initialise runqueues */
|
||||
sched_runqueue_init();
|
||||
|
||||
/* Initialse inittask as runnable for first-ever scheduling */
|
||||
sched_init_task(current);
|
||||
current->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task_front(current, rq_runnable);
|
||||
|
||||
/* Start the timer */
|
||||
timer_start();
|
||||
switch_to_user(current);
|
||||
}
|
||||
|
||||
36
src/generic/tcb.c
Normal file
36
src/generic/tcb.c
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Some ktcb related data
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
|
||||
/* ID pools for threads and spaces. */
|
||||
struct id_pool *thread_id_pool;
|
||||
struct id_pool *space_id_pool;
|
||||
|
||||
/* Hash table for all existing tasks */
|
||||
struct list_head global_task_list;
|
||||
|
||||
/* Offsets for ktcb fields that are accessed from assembler */
|
||||
unsigned int need_resched_offset = offsetof(struct ktcb, ts_need_resched);
|
||||
unsigned int syscall_regs_offset = offsetof(struct ktcb, syscall_regs);
|
||||
|
||||
|
||||
#if 0
|
||||
int task_suspend(struct ktcb *task)
|
||||
{
|
||||
task->flags |= SCHED_FLAG_SUSPEND;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int task_resume(struct ktcb *task)
|
||||
{
|
||||
task->flags &= ~SCHED_FLAG_SUSPEND;
|
||||
return sched_enqueue_task(task);
|
||||
}
|
||||
#endif
|
||||
|
||||
96
src/generic/time.c
Normal file
96
src/generic/time.c
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Time.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*
|
||||
*/
|
||||
#include <l4/types.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/time.h>
|
||||
#include INC_ARCH(exception.h)
|
||||
|
||||
/* TODO:
|
||||
* 1) Add RTC support.
|
||||
* 2) Need to calculate time since EPOCH,
|
||||
* 3) Jiffies must be initialised to a reasonable value.
|
||||
*/
|
||||
|
||||
volatile u32 jiffies;
|
||||
|
||||
static inline void increase_jiffies(void)
|
||||
{
|
||||
jiffies++;
|
||||
}
|
||||
|
||||
|
||||
static int noticks_noresched = 0;
|
||||
|
||||
/*
|
||||
* Check preemption anomalies:
|
||||
*
|
||||
* This checks how many times no rescheduling has occured even though ticks
|
||||
* reached zero. This suggests that preemption was enabled for more than a timer
|
||||
* interval. Normally, even if a preemption irq occured during a non-preemptive
|
||||
* state, preemption is *guaranteed* to occur before the next irq, provided that
|
||||
* the non-preemptive period is less than a timer irq interval (and it must be).
|
||||
*
|
||||
* Time:
|
||||
*
|
||||
* |-|---------------------|-|-------------------->
|
||||
* | V | V
|
||||
* | Preemption irq() | Next irq.
|
||||
* V V
|
||||
* preempt_disabled() preempt_enabled() && preemption;
|
||||
*/
|
||||
void check_noticks_noresched(void)
|
||||
{
|
||||
if (!current->ticks_left)
|
||||
noticks_noresched++;
|
||||
|
||||
if (noticks_noresched >= 2) {
|
||||
printk("Warning, no ticks and yet no rescheduling "
|
||||
"for %d times.\n", noticks_noresched);
|
||||
printk("Spending more than a timer period"
|
||||
" as nonpreemptive!!!\n");
|
||||
}
|
||||
}
|
||||
|
||||
void update_process_times(void)
|
||||
{
|
||||
struct ktcb *cur = current;
|
||||
|
||||
BUG_ON(cur->ticks_left < 0);
|
||||
|
||||
/*
|
||||
* If preemption is disabled we stop reducing ticks when it reaches 0
|
||||
* but set need_resched so that as soon as preempt-enabled, scheduling
|
||||
* occurs.
|
||||
*/
|
||||
if (cur->ticks_left == 0) {
|
||||
need_resched = 1;
|
||||
// check_noticks_noresched();
|
||||
return;
|
||||
}
|
||||
// noticks_noresched = 0;
|
||||
|
||||
if (in_kernel())
|
||||
cur->kernel_time++;
|
||||
else
|
||||
cur->user_time++;
|
||||
|
||||
cur->ticks_left--;
|
||||
if (!cur->ticks_left)
|
||||
need_resched = 1;
|
||||
}
|
||||
|
||||
|
||||
int do_timer_irq(void)
|
||||
{
|
||||
increase_jiffies();
|
||||
update_process_times();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user