Code that compiles and works up to initializing the first memcache.

This commit is contained in:
Bahadir Balban
2009-07-29 19:08:29 +03:00
parent dd8f773f10
commit 723cf7bde9
12 changed files with 230 additions and 197 deletions

View File

@@ -7,6 +7,34 @@
#define MUTEX_CONTROL_LOCK L4_MUTEX_LOCK
#define MUTEX_CONTROL_UNLOCK L4_MUTEX_UNLOCK
#include <l4/lib/wait.h>
#include <l4/lib/list.h>
#include <l4/lib/mutex.h>
struct mutex_queue {
unsigned long physical;
struct link list;
struct waitqueue_head wqh_contenders;
struct waitqueue_head wqh_holders;
};
/*
* Mutex queue head keeps the list of all userspace mutexes.
*
* Here, mutex_control_mutex is a single lock for:
* (1) Mutex_queue create/deletion
* (2) List add/removal.
* (3) Wait synchronization:
* - Both waitqueue spinlocks need to be acquired for
* rendezvous inspection to occur atomically. Currently
* it's not done since we rely on this mutex for that.
*/
struct mutex_queue_head {
struct link list;
struct mutex mutex_control_mutex;
int count;
} mutex_queue_head;
void init_mutex_queue_head(void);
#endif

View File

@@ -7,6 +7,8 @@
#define __V5_MM_H__
/* ARM specific definitions */
#define VIRT_MEM_START 0
#define VIRT_MEM_END 0xFFFFFFFF
#define ARM_SECTION_SIZE SZ_1MB
#define ARM_SECTION_MASK (ARM_SECTION_SIZE - 1)
#define ARM_SECTION_BITS 20

View File

@@ -44,11 +44,12 @@ struct container {
* Threads, address spaces, mutex queues, cpu share ...
* Pagers possess these capabilities.
*/
struct capability caps[5] /* threadpool, spacepool, mutexpool, cpupool, mempool */
struct capability caps[5]; /* threadpool, spacepool, mutexpool, cpupool, mempool */
};
/* The array of containers present on the system */
extern struct container container[];
#define CONFIG_MAX_CAPS_USED 11
#define CONFIG_MAX_PAGERS_USED 2
/* Compact, raw capability structure */
struct cap_info {
@@ -78,7 +79,7 @@ struct pager_info {
* One or more virtmem caps,
* Zero or more umutex caps,
*/
struct cap_info caps[];
struct cap_info caps[CONFIG_MAX_CAPS_USED];
};
/*
@@ -86,9 +87,9 @@ struct pager_info {
* used to create run-time containers
*/
struct container_info {
char *name;
char name[64];
int npagers;
struct pager_info pagers[];
struct pager_info pager[CONFIG_MAX_PAGERS_USED];
};
extern struct container_info cinfo[];

View File

@@ -2,7 +2,7 @@
#define __RESOURCES_H__
/* Number of containers defined at compile-time */
#define CONTAINERS_TOTAL 1
#define TOTAL_CONTAINERS 1
#include <l4/generic/capability.h>
@@ -13,6 +13,7 @@ struct boot_resources {
int nthreads;
int nspaces;
int npmds;
int nmutex;
/* Kernel resource usage */
int nkpmds;
@@ -38,14 +39,13 @@ struct kernel_container {
struct mem_cache *pmd_cache;
struct mem_cache *ktcb_cache;
struct mem_cache *address_space_cache;
struct mem_cache *umutex_cache;
struct mem_cache *mutex_cache;
struct mem_cache *cap_cache;
struct mem_cache *cont_cache;
};
extern struct kernel_container kernel_container;
int init_system_resources(struct kernel_container *kcont,
struct boot_resources *bootres);
int init_system_resources(struct kernel_container *kcont);
#endif /* __RESOURCES_H__ */

View File

@@ -36,6 +36,24 @@ static inline struct ktcb *current_task(void)
#define current current_task()
#define need_resched (current->ts_need_resched)
#define SCHED_RQ_TOTAL 2
/* A basic runqueue */
struct runqueue {
struct spinlock lock; /* Lock */
struct link task_list; /* List of tasks in rq */
unsigned int total; /* Total tasks */
};
/* Contains per-container scheduling structures */
struct scheduler {
struct runqueue sched_rq[SCHED_RQ_TOTAL];
struct runqueue *rq_runnable;
struct runqueue *rq_expired;
/* Total priority of all tasks in container */
int prio_total;
};
void sched_init_task(struct ktcb *task, int priority);
void sched_prepare_sleep(void);

View File

@@ -16,31 +16,6 @@
#include INC_ARCH(exception.h)
#include INC_GLUE(memory.h)
struct mutex_queue {
unsigned long physical;
struct link list;
struct waitqueue_head wqh_contenders;
struct waitqueue_head wqh_holders;
};
/*
* Mutex queue head keeps the list of all userspace mutexes.
*
* Here, mutex_control_mutex is a single lock for:
* (1) Mutex_queue create/deletion
* (2) List add/removal.
* (3) Wait synchronization:
* - Both waitqueue spinlocks need to be acquired for
* rendezvous inspection to occur atomically. Currently
* it's not done since we rely on this mutex for that.
*/
struct mutex_queue_head {
struct link list;
struct mutex mutex_control_mutex;
int count;
} mutex_queue_head;
void init_mutex_queue_head(void)
{
memset(&mutex_queue_head, 0, sizeof (mutex_queue_head));

View File

@@ -4,7 +4,7 @@
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'pgalloc.c', 'kmalloc.c', 'space.c', 'bootm.c', 'resource.c']
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'pgalloc.c', 'kmalloc.c', 'space.c', 'bootm.c', 'resource.c', 'container.c']
obj = env.Object(src_local)
Return('obj')

View File

@@ -3,89 +3,99 @@
*
* Copyright (C) 2009 Bahadir Balban
*/
#include <l4/generic/container.h>
#include <l4/generic/capability.h>
#include <l4/generic/cap-types.h>
#include INC_GLUE(memory.h)
/*
* FIXME:
* Add irqs, exceptions
*/
struct container_info cinfo[] = {
[0] = {
.name = "Codezero POSIX Services",
.npagers = 1,
.pagers = {
.[0] = {
.pager_lma = 0x38000,
.pager_vma = 0xE0000000,
.pager_size = 0x96000,
.pager = {
[0] = {
.pager_lma = __pfn(0x38000),
.pager_vma = __pfn(0xE0000000),
.pager_size = __pfn(0x96000),
.ncaps = 11,
.caps = {
.[0] = {
[0] = {
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
.access = CAP_MAP_READ | CAP_MAP_WRITE
| CAP_MAP_EXEC | CAP_MAP_UNMAP,
.access = 0,
.start = 0xE0000000,
.end = 0xF0000000,
.size = 0x10000000,
.start = __pfn(0xE0000000),
.end = __pfn(0xF0000000),
.size = __pfn(0x10000000),
},
.[1] = {
[1] = {
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
.access = CAP_MAP_READ | CAP_MAP_WRITE
| CAP_MAP_EXEC | CAP_MAP_UNMAP,
.start = 0x10000000,
.end = 0x20000000,
.size = 0x10000000,
.start = __pfn(0x10000000),
.end = __pfn(0x20000000),
.size = __pfn(0x10000000),
},
.[2] = {
[2] = {
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
.access = CAP_MAP_READ | CAP_MAP_WRITE
| CAP_MAP_EXEC | CAP_MAP_UNMAP,
.access = 0,
.start = 0x20000000,
.end = 0x30000000,
.size = 0x10000000,
.start = __pfn(0x20000000),
.end = __pfn(0x30000000),
.size = __pfn(0x10000000),
},
.[3] = {
[3] = {
.type = CAP_TYPE_MAP | CAP_RTYPE_PHYSMEM,
.access = CAP_MAP_CACHED | CAP_MAP_UNCACHED
| CAP_MAP_READ | CAP_MAP_WRITE
| CAP_MAP_EXEC | CAP_MAP_UNMAP,
.start = 0x38000,
.end = 0x1000000, /* 16 MB */
.start = __pfn(0x38000),
.end = __pfn(0x1000000), /* 16 MB for all posix services */
},
.[4] = {
[4] = {
.type = CAP_TYPE_IPC | CAP_RTYPE_CONTAINER,
.access = CAP_IPC_SEND | CAP_IPC_RECV
| CAP_IPC_FULL | CAP_IPC_SHORT
| CAP_IPC_EXTENDED,
.start = 0, .end = 0, .size = 0,
},
.[5] = {
[5] = {
.type = CAP_TYPE_TCTRL | CAP_RTYPE_CONTAINER,
.access = CAP_TCTRL_CREATE | CAP_TCTRL_DESTROY
| CAP_TCTRL_SUSPEND | CAP_TCTRL_RESUME
| CAP_TCTRL_RECYCLE,
.start = 0, .end = 0, .size = 0,
},
.[6] = {
[6] = {
.type = CAP_TYPE_EXREGS | CAP_RTYPE_CONTAINER,
.access = CAP_EXREGS_RW_PAGER
| CAP_EXREGS_RW_UTCB | CAP_EXREGS_RW_SP
| CAP_EXREGS_RW_PC | CAP_EXREGS_RW_REGS,
.start = 0, .end = 0, .size = 0,
},
.[7] = {
[7] = {
.type = CAP_TYPE_QUANTITY
| CAP_RTYPE_THREADPOOL,
.access = 0, .start = 0, .end = 0,
.size = 64,
},
.[8] = {
[8] = {
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_SPACEPOOL,
.access = 0, .start = 0, .end = 0,
.size = 64,
},
.[9] = {
[9] = {
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_CPUPOOL,
.access = 0, .start = 0, .end = 0,
.size = 50, /* Percentage */
},
.[10] = {
[10] = {
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_MUTEXPOOL,
.access = 0, .start = 0, .end = 0,
.size = 100,
@@ -93,5 +103,6 @@ struct container_info cinfo[] = {
},
},
},
},
};

View File

@@ -5,8 +5,12 @@
*/
#include <l4/generic/capability.h>
#include <l4/generic/cap-types.h>
#include <l4/generic/container.h>
#include <l4/lib/list.h>
#include <l4/generic/resource.h>
#include <l4/generic/bootmem.h>
#include <l4/lib/math.h>
#include <l4/lib/memcache.h>
#include INC_GLUE(memory.h)
#include INC_ARCH(linker.h)
@@ -18,62 +22,12 @@ void cap_list_init(struct cap_list *clist)
link_init(&clist->caps);
}
void cap_list_add(struct cap_list *clist, struct capability *cap)
void cap_list_insert(struct capability *cap, struct cap_list *clist)
{
list_add(&cap->list, &clist->caps);
list_insert(&cap->list, &clist->caps);
clist->ncaps++;
}
/*
* Initializes kernel caplists, and sets up total of physical
* and virtual memory as single capabilities of the kernel.
* They will then get split into caps of different lengths
* during the traversal of container capabilities.
*/
void setup_kernel_container(struct kernel_container *kcont)
{
struct capability *physmem, *virtmem, *kernel_area;
/* Initialize kernel capability lists */
cap_list_init(&kcont->physmem_used);
cap_list_init(&kcont->physmem_free);
cap_list_init(&kcont->virtmem_used);
cap_list_init(&kcont->virtmem_free);
cap_list_init(&kcont->devmem_used);
cap_list_init(&kcont->devmem_free);
/* Set up total physical memory as single capability */
physmem = alloc_bootmem(sizeof(*physmem));
physmem->start = __pfn(PHYS_MEM_START);
physmem->end = __pfn(PHYS_MEM_END);
link_init(&physmem->list);
cap_list_add(&kcont->physmem_free, physmem);
/* Set up total virtual memory as single capability */
virtmem = alloc_bootmem(sizeof(*virtmem));
virtmem->start = __pfn(VIRT_MEM_START);
virtmem->end = __pfn(VIRT_MEM_END);
link_init(&virtmem->list);
cap_list_add(&kcont->virtmem_free, virtmem);
/* Set up kernel used area as a single capability */
kernel_area = alloc_bootmem(sizeof(*physmem));
kernel_area->start = __pfn(virt_to_phys(_start_kernel));
kernel_area->end = __pfn(virt_to_phys(_end_kernel));
link_init(&kernel_area->list);
list_add(&kcont->physmem_used, kernel_area);
/* Unmap kernel used area from free physical memory capabilities */
memcap_unmap(&kcont->physmem_free, kernel_area->start,
kernel_area->end);
/* TODO:
* Add all virtual memory areas used by the kernel
* e.g. kernel virtual area, syscall page, kip page,
* vectors page, timer, sysctl and uart device pages
*/
}
/*
* This splits a capability, splitter region must be in
* the *middle* of original capability
@@ -85,7 +39,7 @@ int memcap_split(struct capability *cap, struct cap_list *cap_list,
struct capability *new;
/* Allocate a capability first */
new = alloc_bootmem(sizeof(*new));
new = alloc_bootmem(sizeof(*new), 0);
/*
* Some sanity checks to show that splitter range does end up
@@ -100,7 +54,7 @@ int memcap_split(struct capability *cap, struct cap_list *cap_list,
new->access = cap->access;
/* Add new one next to original cap */
cap_list_add(new, cap_list);
cap_list_insert(new, cap_list);
return 0;
}
@@ -126,8 +80,8 @@ int memcap_shrink(struct capability *cap, struct cap_list *cap_list,
int memcap_unmap_range(struct capability *cap,
struct cap_list *cap_list,
const unsigned long pfn_start,
const unsigned long pfn_end)
const unsigned long start,
const unsigned long end)
{
/* Split needed? */
if (cap->start < start && cap->end > end)
@@ -162,16 +116,16 @@ int memcap_unmap(struct cap_list *cap_list,
/* Check for intersection */
if (set_intersection(unmap_start, unmap_end,
cap->start, cap->end)) {
if ((err = memcap_unmap_range(cap, cap_list
if ((err = memcap_unmap_range(cap, cap_list,
unmap_start,
unmap_end))) {
return err;
}
/* Return 1 to indicate unmap occured */
return 1;
return 0;
}
}
return 0
/* Return 1 to indicate unmap didn't occur */
return 1;
}
/*
@@ -197,17 +151,18 @@ int process_cap_info(struct cap_info *cap,
bootres->nmutex += cap->size;
break;
case CAP_RTYPE_VIRTMEM:
/* Area size in pages divided by mapsize in pages */
bootres->npmds +=
cap->size / PMD_MAP_SIZE;
cap->size / __pfn(PMD_MAP_SIZE);
if ((ret = memcap_unmap(&kcont->virtmem_free,
cap->start, cap->end))) {
if (ret < 0)
printk("FATAL: Insufficient boot memory "
"to split capability\n");
if (ret > 0)
printf("FATAL: Memory capability range "
printk("FATAL: Memory capability range "
"overlaps with another one. "
"start=0x%x, end=0x%x\n",
"start=0x%lx, end=0x%lx\n",
__pfn_to_addr(cap->start),
__pfn_to_addr(cap->end));
BUG();
@@ -220,9 +175,9 @@ int process_cap_info(struct cap_info *cap,
printk("FATAL: Insufficient boot memory "
"to split capability\n");
if (ret > 0)
printf("FATAL: Memory capability range "
printk("FATAL: Memory capability range "
"overlaps with another one. "
"start=0x%x, end=0x%x\n",
"start=0x%lx, end=0x%lx\n",
__pfn_to_addr(cap->start),
__pfn_to_addr(cap->end));
BUG();
@@ -239,35 +194,44 @@ void migrate_boot_resources(struct boot_resources *bootres,
struct kernel_container *kcont)
{
/* Migrate boot page tables to new caches */
migrate_page_tables(kcont);
// migrate_page_tables(kcont);
/* Migrate all boot-allocated capabilities */
migrate_boot_caps(kcont);
// migrate_boot_caps(kcont);
}
/* Delete all boot memory and add it to physical memory pool. */
int free_boot_memory(struct kernel_container *kcont,
struct boot_resources *bootres)
int free_boot_memory(struct boot_resources *bootres,
struct kernel_container *kcont)
{
/* Trim kernel used memory memcap */
memcap_unmap(&kcont->physical_used, _bootmem_start, _bootmem_end);
/* Trim kernel used memory cap */
memcap_unmap(&kcont->physmem_used, (unsigned long)_start_init,
(unsigned long)_end_init);
/* Add it to unused physical memory */
memcap_map(&kcont->physical_unused, _bootmem_start, _bootmem_end);
/* Add it to unused physical memory */
// memcap_map(&kcont->physmem_free, (unsigned long)_start_init,
// (unsigned long)_end_init);
/*
* Freed physical area will be unmapped from virtual
* by not mapping it in the task page tables.
*/
return 0;
}
struct mem_cache *init_resource_cache(struct boot_resources *bootres,
struct kernel_container *kcont)
struct mem_cache *init_resource_cache(int nstruct, int struct_size,
struct kernel_container *kcont,
int aligned)
{
struct capability *cap;
unsigned long bufsize;
/* In all unused physical memory regions */
list_foreach_struct(cap, &kcont->physical_unused, list) {
list_foreach_struct(cap, &kcont->physmem_free.caps, list) {
/* Get buffer size needed for cache */
bufsize = mem_cache_bufsize(__pfn_to_addr(cap->start),
PGD_SIZE, bootres->nspaces,
bufsize = mem_cache_bufsize((void *)__pfn_to_addr(cap->start),
struct_size, nstruct,
aligned);
/*
* Check if memcap region size is enough to cover
@@ -290,19 +254,70 @@ struct mem_cache *init_resource_cache(struct boot_resources *bootres,
MAP_SVC_RW_FLAGS);
}
/* Unmap area from memcap */
memcap_unmap_range(cap, &kcont->physical_unused,
memcap_unmap_range(cap, &kcont->physmem_free,
cap->start, cap->start +
__pfn(page_align_up((bufsize))));
/* TODO: Manipulate memcaps for virtual range??? */
/* Initialize the cache */
return mem_cache_init(virtual, bufsize, PGD_SIZE, 1);
return mem_cache_init((void *)virtual, bufsize,
PGD_SIZE, 1);
}
}
return 0;
}
/*
* Initializes kernel caplists, and sets up total of physical
* and virtual memory as single capabilities of the kernel.
* They will then get split into caps of different lengths
* during the traversal of container capabilities.
*/
void init_kernel_container(struct kernel_container *kcont)
{
struct capability *physmem, *virtmem, *kernel_area;
/* Initialize kernel capability lists */
cap_list_init(&kcont->physmem_used);
cap_list_init(&kcont->physmem_free);
cap_list_init(&kcont->virtmem_used);
cap_list_init(&kcont->virtmem_free);
cap_list_init(&kcont->devmem_used);
cap_list_init(&kcont->devmem_free);
/* Set up total physical memory as single capability */
physmem = alloc_bootmem(sizeof(*physmem), 0);
physmem->start = __pfn(PHYS_MEM_START);
physmem->end = __pfn(PHYS_MEM_END);
link_init(&physmem->list);
cap_list_insert(physmem, &kcont->physmem_free);
/* Set up total virtual memory as single capability */
virtmem = alloc_bootmem(sizeof(*virtmem), 0);
virtmem->start = __pfn(VIRT_MEM_START);
virtmem->end = __pfn(VIRT_MEM_END);
link_init(&virtmem->list);
cap_list_insert(virtmem, &kcont->virtmem_free);
/* Set up kernel used area as a single capability */
kernel_area = alloc_bootmem(sizeof(*physmem), 0);
kernel_area->start = __pfn(virt_to_phys(_start_kernel));
kernel_area->end = __pfn(virt_to_phys(_end_kernel));
link_init(&kernel_area->list);
cap_list_insert(kernel_area, &kcont->physmem_used);
/* Unmap kernel used area from free physical memory capabilities */
memcap_unmap(&kcont->physmem_free, kernel_area->start,
kernel_area->end);
/* TODO:
* Add all virtual memory areas used by the kernel
* e.g. kernel virtual area, syscall page, kip page,
* vectors page, timer, sysctl and uart device pages
*/
}
void create_containers(struct boot_resources *bootres,
struct kernel_container *kcont)
{
@@ -327,50 +342,38 @@ void create_capabilities(struct boot_resources *bootres,
void init_resource_allocators(struct boot_resources *bootres,
struct kernel_container *kcont)
{
struct mem_cache *cache;
/* Initialise PGD cache */
cache = init_resource_cache(bootres->nspaces,
PGD_SIZE, kcont, 1);
kcont->pgd_cache = cache;
kcont->pgd_cache = init_resource_cache(bootres->nspaces,
PGD_SIZE, kcont, 1);
/* Initialise PMD cache */
cache = init_resource_cache(bootres->npmds,
PMD_SIZE, kcont, 1);
cache->pmd_cache = cache;
kcont->pmd_cache = init_resource_cache(bootres->npmds,
PMD_SIZE, kcont, 1);
/* Initialise struct address_space cache */
cache = init_resource_cache(bootres->nspaces,
kcont->address_space_cache =
init_resource_cache(bootres->nspaces,
sizeof(struct address_space),
kcont, 0);
cache->address_space_cache = cache;
/* Initialise ktcb cache */
cache = init_resource_cache(bootres->nthreads,
PAGE_SIZE, kcont, 1);
cache->ktcb_cache = cache;
kcont->ktcb_cache = init_resource_cache(bootres->nthreads,
PAGE_SIZE, kcont, 1);
/* Initialise umutex cache */
cache = init_resource_cache(bootres->numutex,
sizeof(struct mutex_queue),
kcont, 0);
cache->umutex_cache = cache;
kcont->mutex_cache = init_resource_cache(bootres->nmutex,
sizeof(struct mutex_queue),
kcont, 0);
/* TODO: Initialize ID cache */
/* # of capabilities are now constant, create capabilities cache */
/* Initialise capability cache */
cache = init_resource_cache(bootres->ncaps, /* FIXME: Count correctly */
sizeof(struct capability),
kcont, 0);
cache->cap_cache = cache;
kcont->cap_cache = init_resource_cache(bootres->ncaps, /* FIXME: Count correctly */
sizeof(struct capability),
kcont, 0);
/* Initialise container cache */
cache = init_resource_cache(bootres->ncont,
sizeof(struct container),
kcont, 0);
cache->cont_cache = cache;
kcont->cont_cache = init_resource_cache(bootres->nconts,
sizeof(struct container),
kcont, 0);
/* Create system containers */
create_containers(bootres, kcont);
@@ -379,30 +382,29 @@ void init_resource_allocators(struct boot_resources *bootres,
create_capabilities(bootres, kcont);
}
int init_boot_resources(struct boot_resources *bootres, struct kernel_container *kcont)
int init_boot_resources(struct boot_resources *bootres,
struct kernel_container *kcont)
{
struct cap_info *cap;
struct pager_info *pgr;
struct container_info *cont;
setup_kernel_container(kcont);
init_kernel_container(kcont);
/* Number of containers known at compile-time */
bootres->nconts = ncontainers;
bootres->nconts = TOTAL_CONTAINERS;
/* Traverse all containers */
for (int i = 0; i < bootres->nconts; i++) {
/* Traverse all pagers */
for (int j = 0; j < container[i]->npagers; j++) {
int ncaps = container[i].pager[j].ncaps;
for (int j = 0; j < cinfo[i].npagers; j++) {
int ncaps = cinfo[i].pager[j].ncaps;
/* Count all capabilities */
bootres->ncaps += ncaps;
/* Count all resources */
for (int k = 0; k < ncaps; k++) {
cap = container[i].pager[j].caps[k];
proces_cap_info(cap);
cap = &cinfo[i].pager[j].caps[k];
process_cap_info(cap, bootres, kcont);
}
}
}
@@ -419,11 +421,15 @@ int init_system_resources(struct kernel_container *kcont)
struct boot_resources bootres;
init_boot_resources(&bootres, &kcont);
memset(&bootres, 0, sizeof(bootres));
init_resource_allocators(&bootres, &kcont);
init_boot_resources(&bootres, kcont);
free_boot_memory(bootres, kcont);
init_resource_allocators(&bootres, kcont);
free_boot_memory(&bootres, kcont);
return 0;
}

View File

@@ -23,14 +23,6 @@
#include INC_ARCH(exception.h)
/* A basic runqueue */
struct runqueue {
struct spinlock lock; /* Lock */
struct link task_list; /* List of tasks in rq */
unsigned int total; /* Total tasks */
};
#define SCHED_RQ_TOTAL 2
static struct runqueue sched_rq[SCHED_RQ_TOTAL];
static struct runqueue *rq_runnable, *rq_expired;
static int prio_total; /* Total priority of all tasks */

View File

@@ -382,7 +382,7 @@ void start_kernel(void)
// relocate_page_tables();
/* Evaluate system resources and set up resource pools */
init_system_resources();
init_system_resources(&kernel_container);
/* Initialise memory allocators */
paging_init();

View File

@@ -98,7 +98,7 @@ out:
int mem_cache_bufsize(void *start, int struct_size, int nstructs, int aligned)
{
unsigned long start_address = (unsigned long)start;
int total_bytes, bwords;
int total_bytes, bwords, bitmap_size;
/* Word alignment requirement */
start_address = align_up(start_address, sizeof(int));
@@ -121,7 +121,7 @@ int mem_cache_bufsize(void *start, int struct_size, int nstructs, int aligned)
/* Check alignment requirement */
if (aligned)
start_address += align_up(start_address, struct_size);
start_address = align_up(start_address, struct_size);
return start_address - (unsigned long)start;
}
@@ -143,7 +143,7 @@ struct mem_cache *mem_cache_init(void *bufstart,
start = (void *)align_up(bufstart, sizeof(int));
cache_size -= (int)start - (int)bufstart;
mem_cache = start;
cache = start;
if ((struct_size < 0) || (cache_size < 0) ||
((unsigned long)start == ~(0))) {