mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Code that compiles until initialization of containers and pagers.
This commit is contained in:
@@ -33,9 +33,10 @@ struct mutex_queue_head {
|
||||
struct link list;
|
||||
struct mutex mutex_control_mutex;
|
||||
int count;
|
||||
} mutex_queue_head;
|
||||
};
|
||||
|
||||
void init_mutex_queue_head(struct mutex_queue_head *mqhead);
|
||||
|
||||
void init_mutex_queue_head(void);
|
||||
#endif
|
||||
|
||||
#define L4_MUTEX_LOCK 0
|
||||
|
||||
@@ -148,7 +148,8 @@ int copy_user_tables(struct address_space *new, struct address_space *orig);
|
||||
pgd_table_t *copy_page_tables(pgd_table_t *from);
|
||||
void remap_as_pages(void *vstart, void *vend);
|
||||
|
||||
void relocate_page_tables(void);
|
||||
int pgd_count_pmds(pgd_table_t *pgd);
|
||||
pgd_table_t *realloc_page_tables(void);
|
||||
void remove_section_mapping(unsigned long vaddr);
|
||||
|
||||
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
|
||||
|
||||
@@ -32,8 +32,10 @@
|
||||
#define CAP_RTYPE_CPUPOOL (1 << 23)
|
||||
#define CAP_RTYPE_THREADPOOL (1 << 24)
|
||||
#define CAP_RTYPE_SPACEPOOL (1 << 25)
|
||||
#define CAP_RTYPE_MUTEXPOOL (1 << 27)
|
||||
#define CAP_RTYPE_MEMPOOL (1 << 26) /* Do we need this ??? */
|
||||
#define CAP_RTYPE_MUTEXPOOL (1 << 26)
|
||||
#define CAP_RTYPE_MAPPOOL (1 << 27) /* For pmd spending */
|
||||
#define CAP_RTYPE_CAPPOOL (1 << 28) /* For new cap generation */
|
||||
|
||||
/*
|
||||
* Access permissions
|
||||
*/
|
||||
@@ -61,6 +63,7 @@
|
||||
#define CAP_MAP_CACHED (1 << 3)
|
||||
#define CAP_MAP_UNCACHED (1 << 4)
|
||||
#define CAP_MAP_UNMAP (1 << 5)
|
||||
#define CAP_MAP_UTCB (1 << 6)
|
||||
|
||||
/* Ipc capability */
|
||||
#define CAP_IPC_SEND (1 << 0)
|
||||
|
||||
@@ -14,21 +14,23 @@
|
||||
*
|
||||
* In this structure:
|
||||
*
|
||||
* The capid denotes the unique capability ID. The resid denotes the unique ID
|
||||
* of targeted resource. The owner denotes the unique ID of capability owner.
|
||||
* This is almost always a thread ID.
|
||||
* The capid denotes the unique capability ID.
|
||||
* The resid denotes the unique ID of targeted resource.
|
||||
* The owner denotes the unique ID of the one and only capability owner. This is
|
||||
* almost always a thread ID.
|
||||
*
|
||||
* The type field contains two types: The capability type, and the targeted
|
||||
* resource type. The targeted resouce type denotes what type of resource the
|
||||
* capability is allowed to operate on. For example a thread, a thread group,
|
||||
* an address space or a memory can be of this type.
|
||||
* The type field contains two types:
|
||||
* - The capability type,
|
||||
* - The targeted resource type.
|
||||
*
|
||||
* The targeted resouce type denotes what type of resource the capability is
|
||||
* allowed to operate on. For example a thread, a thread group, an address space
|
||||
* or a memory can be of this type.
|
||||
*
|
||||
* The capability type defines the general set of operations allowed on a
|
||||
* particular resource. The resource type defines the type of resource that
|
||||
* the capability is targeting. For example a capability type may be
|
||||
* thread_control, exchange_registers, ipc, or map operations. A resource type
|
||||
* may be such as a thread, a thread group, a virtual or physical memory
|
||||
* region.
|
||||
* particular resource. For example a capability type may be thread_control,
|
||||
* exchange_registers, ipc, or map operations. A resource type may be such as a
|
||||
* thread, a thread group, a virtual or physical memory region.
|
||||
*
|
||||
* There are also quantitative capability types. While their names denote
|
||||
* quantitative objects such as memory, threads, and address spaces, these
|
||||
@@ -64,6 +66,9 @@ struct cap_list {
|
||||
struct link caps;
|
||||
};
|
||||
|
||||
void capability_init(struct capability *cap);
|
||||
struct capability *capability_create(void);
|
||||
|
||||
#if 0
|
||||
/* Virtual memory space allocated to container */
|
||||
struct capability cap_virtmap = {
|
||||
|
||||
@@ -9,34 +9,47 @@
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/api/mutex.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
|
||||
#define curcont (current->container)
|
||||
|
||||
#define CONFIG_CONTAINER_NAMESIZE 64
|
||||
#define CONFIG_MAX_CAPS_USED 14
|
||||
#define CONFIG_MAX_PAGERS_USED 2
|
||||
|
||||
/* Container macro. No locks needed! */
|
||||
#define this_container (current->container)
|
||||
|
||||
struct pager {
|
||||
struct ktcb *tcb;
|
||||
unsigned long start_lma;
|
||||
unsigned long start_vma;
|
||||
unsigned long start_address;
|
||||
unsigned long stack_address;
|
||||
unsigned long memsize;
|
||||
struct cap_list cap_list;
|
||||
};
|
||||
|
||||
|
||||
struct container {
|
||||
/* Unique container id */
|
||||
l4id_t cid;
|
||||
l4id_t cid; /* Unique container id */
|
||||
int npagers; /* # of pagers */
|
||||
struct link list; /* List ref for containers */
|
||||
struct address_space_list space_list; /* List of address spaces */
|
||||
char name[CONFIG_CONTAINER_NAMESIZE]; /* Name of container */
|
||||
struct ktcb_list ktcb_list; /* List of threads */
|
||||
struct link pager_list; /* List of pagers */
|
||||
|
||||
/* List of address spaces */
|
||||
struct address_space_list space_list;
|
||||
|
||||
/* List of threads */
|
||||
struct ktcb_list ktcb_list;
|
||||
|
||||
/* ID pools for threads and spaces */
|
||||
struct id_pool *thread_id_pool;
|
||||
struct id_pool *thread_id_pool; /* Id pools for thread/spaces */
|
||||
struct id_pool *space_id_pool;
|
||||
|
||||
/* Scheduling structs */
|
||||
struct scheduler scheduler;
|
||||
struct scheduler scheduler; /* Scheduling structs */
|
||||
|
||||
/* Mutex list for all userspace mutexes */
|
||||
struct mutex_queue_head mutex_queue_head;
|
||||
struct mutex_queue_head mutex_queue_head; /* Userspace mutex list */
|
||||
|
||||
/*
|
||||
* Capabilities that apply to this container
|
||||
@@ -44,13 +57,10 @@ struct container {
|
||||
* Threads, address spaces, mutex queues, cpu share ...
|
||||
* Pagers possess these capabilities.
|
||||
*/
|
||||
struct capability caps[5]; /* threadpool, spacepool, mutexpool, cpupool, mempool */
|
||||
/* threadpool, spacepool, mutexpool, cpupool, mempool */
|
||||
struct pager pager[CONFIG_MAX_PAGERS_USED];
|
||||
};
|
||||
|
||||
|
||||
#define CONFIG_MAX_CAPS_USED 11
|
||||
#define CONFIG_MAX_PAGERS_USED 2
|
||||
|
||||
/* Compact, raw capability structure */
|
||||
struct cap_info {
|
||||
unsigned int type;
|
||||
@@ -60,10 +70,13 @@ struct cap_info {
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
|
||||
struct pager_info {
|
||||
unsigned long pager_lma;
|
||||
unsigned long pager_vma;
|
||||
unsigned long pager_size;
|
||||
unsigned long start_address;
|
||||
unsigned long stack_address;
|
||||
|
||||
/* Number of capabilities defined */
|
||||
int ncaps;
|
||||
@@ -87,12 +100,22 @@ struct pager_info {
|
||||
* used to create run-time containers
|
||||
*/
|
||||
struct container_info {
|
||||
char name[64];
|
||||
char name[CONFIG_CONTAINER_NAMESIZE];
|
||||
int npagers;
|
||||
struct pager_info pager[CONFIG_MAX_PAGERS_USED];
|
||||
};
|
||||
|
||||
extern struct container_info cinfo[];
|
||||
|
||||
void kcont_insert_container(struct container *c,
|
||||
struct kernel_container *kcont);
|
||||
|
||||
struct container *container_create(void);
|
||||
|
||||
int container_init_pagers(struct kernel_container *kcont,
|
||||
pgd_table_t *current_pgd);
|
||||
|
||||
int init_containers(struct kernel_container *kcont);
|
||||
|
||||
#endif /* __CONTAINER_H__ */
|
||||
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
#ifndef __PGALLOC_H__
|
||||
#define __PGALLOC_H__
|
||||
|
||||
void *zalloc_page(void);
|
||||
void *alloc_page(void);
|
||||
void *alloc_pmd(void);
|
||||
void *alloc_pgd(void);
|
||||
int free_page(void *);
|
||||
int free_pmd(void *);
|
||||
int free_pgd(void *);
|
||||
|
||||
int pgalloc_add_new_grant(unsigned long pfn, int npages);
|
||||
void init_pgalloc();
|
||||
|
||||
#endif /* __PGALLOC_H__ */
|
||||
@@ -1,10 +1,18 @@
|
||||
/*
|
||||
* Description of resources on the system
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
|
||||
#ifndef __RESOURCES_H__
|
||||
#define __RESOURCES_H__
|
||||
|
||||
/* Number of containers defined at compile-time */
|
||||
#define TOTAL_CONTAINERS 1
|
||||
#define CONFIG_TOTAL_CONTAINERS 1
|
||||
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
struct boot_resources {
|
||||
int nconts;
|
||||
@@ -21,8 +29,30 @@ struct boot_resources {
|
||||
int nkmemcaps;
|
||||
};
|
||||
|
||||
/* List of containers */
|
||||
struct container_head {
|
||||
int ncont;
|
||||
struct link list;
|
||||
};
|
||||
|
||||
/*
|
||||
* Everything on the platform is described and stored
|
||||
* in the structure below.
|
||||
*/
|
||||
struct kernel_container {
|
||||
l4id_t cid;
|
||||
|
||||
/* System id pools */
|
||||
struct id_pool space_ids;
|
||||
struct id_pool ktcb_ids;
|
||||
struct id_pool resource_ids;
|
||||
struct id_pool container_ids;
|
||||
struct id_pool mutex_ids;
|
||||
struct id_pool capability_ids;
|
||||
|
||||
/* List of all containers */
|
||||
struct container_head containers;
|
||||
|
||||
/* Physical memory caps, used/unused */
|
||||
struct cap_list physmem_used;
|
||||
struct cap_list physmem_free;
|
||||
@@ -38,7 +68,7 @@ struct kernel_container {
|
||||
struct mem_cache *pgd_cache;
|
||||
struct mem_cache *pmd_cache;
|
||||
struct mem_cache *ktcb_cache;
|
||||
struct mem_cache *address_space_cache;
|
||||
struct mem_cache *space_cache;
|
||||
struct mem_cache *mutex_cache;
|
||||
struct mem_cache *cap_cache;
|
||||
struct mem_cache *cont_cache;
|
||||
@@ -46,6 +76,22 @@ struct kernel_container {
|
||||
|
||||
extern struct kernel_container kernel_container;
|
||||
|
||||
void free_pgd(void *addr);
|
||||
void free_pmd(void *addr);
|
||||
void free_space(void *addr);
|
||||
void free_ktcb(void *addr);
|
||||
void free_capability(void *addr);
|
||||
void free_container(void *addr);
|
||||
void free_user_mutex(void *addr);
|
||||
|
||||
pgd_table_t *alloc_pgd(void);
|
||||
pmd_table_t *alloc_pmd(void);
|
||||
struct address_space *alloc_space(void);
|
||||
struct ktcb *alloc_ktcb(void);
|
||||
struct capability *alloc_capability(void);
|
||||
struct container *alloc_container(void);
|
||||
struct mutex_queue *alloc_user_mutex(void);
|
||||
|
||||
int init_system_resources(struct kernel_container *kcont);
|
||||
|
||||
#endif /* __RESOURCES_H__ */
|
||||
|
||||
@@ -45,6 +45,7 @@ struct runqueue {
|
||||
struct link task_list; /* List of tasks in rq */
|
||||
unsigned int total; /* Total tasks */
|
||||
};
|
||||
|
||||
/* Contains per-container scheduling structures */
|
||||
struct scheduler {
|
||||
struct runqueue sched_rq[SCHED_RQ_TOTAL];
|
||||
@@ -55,6 +56,7 @@ struct scheduler {
|
||||
int prio_total;
|
||||
};
|
||||
|
||||
void sched_init_runqueue(struct runqueue *rq);
|
||||
void sched_init_task(struct ktcb *task, int priority);
|
||||
void sched_prepare_sleep(void);
|
||||
void sched_suspend_sync(void);
|
||||
@@ -63,5 +65,6 @@ void sched_resume_sync(struct ktcb *task);
|
||||
void sched_resume_async(struct ktcb *task);
|
||||
void scheduler_start(void);
|
||||
void schedule(void);
|
||||
void sched_init(struct scheduler *scheduler);
|
||||
|
||||
#endif /* __SCHEDULER_H__ */
|
||||
|
||||
@@ -54,7 +54,7 @@ void address_space_add(struct address_space *space);
|
||||
void address_space_remove(struct address_space *space);
|
||||
void address_space_reference_lock();
|
||||
void address_space_reference_unlock();
|
||||
void init_address_space_list(void);
|
||||
void init_address_space_list(struct address_space_list *space_list);
|
||||
int check_access(unsigned long vaddr, unsigned long size,
|
||||
unsigned int flags, int page_in);
|
||||
#endif
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(syscall.h)
|
||||
@@ -42,6 +42,8 @@ struct task_ids {
|
||||
l4id_t tgid;
|
||||
};
|
||||
|
||||
struct container;
|
||||
|
||||
struct ktcb {
|
||||
/* User context */
|
||||
task_context_t context;
|
||||
@@ -94,6 +96,9 @@ struct ktcb {
|
||||
/* Page table information */
|
||||
struct address_space *space;
|
||||
|
||||
/* Container */
|
||||
struct container *container;
|
||||
|
||||
/* Fields for ipc rendezvous */
|
||||
struct waitqueue_head wqh_recv;
|
||||
struct waitqueue_head wqh_send;
|
||||
@@ -121,7 +126,6 @@ union ktcb_union {
|
||||
char kstack[PAGE_SIZE];
|
||||
};
|
||||
|
||||
|
||||
/* Hash table for all existing tasks */
|
||||
struct ktcb_list {
|
||||
struct link list;
|
||||
@@ -157,7 +161,7 @@ void tcb_init(struct ktcb *tcb);
|
||||
struct ktcb *tcb_alloc_init(void);
|
||||
void tcb_delete(struct ktcb *tcb);
|
||||
|
||||
void init_ktcb_list(void);
|
||||
void init_ktcb_list(struct ktcb_list *ktcb_list);
|
||||
void task_update_utcb(struct ktcb *cur, struct ktcb *next);
|
||||
int tcb_check_and_lazy_map_utcb(struct ktcb *task);
|
||||
|
||||
|
||||
@@ -121,5 +121,8 @@ pte_t virt_to_pte(unsigned long virtual);
|
||||
pte_t virt_to_pte_from_pgd(unsigned long virtual, pgd_table_t *pgd);
|
||||
unsigned long virt_to_phys_by_pgd(unsigned long vaddr, pgd_table_t *pgd);
|
||||
|
||||
struct ktcb;
|
||||
void task_init_registers(struct ktcb *task, unsigned long pc);
|
||||
|
||||
#endif /* __GLUE_ARM_MEMORY_H__ */
|
||||
|
||||
|
||||
@@ -4,7 +4,17 @@
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
|
||||
/* One page size minus the structure fields */
|
||||
#define CONFIG_MAX_SYSTEM_IDS (1023*32)
|
||||
#define SYSTEM_IDS_MAX (CONFIG_MAX_SYSTEM_IDS >> 5)
|
||||
|
||||
struct id_pool {
|
||||
struct spinlock lock;
|
||||
int nwords;
|
||||
u32 bitmap[SYSTEM_IDS_MAX];
|
||||
};
|
||||
|
||||
struct id_pool_variable {
|
||||
struct spinlock lock;
|
||||
int nwords;
|
||||
u32 bitmap[];
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/api/kip.h>
|
||||
@@ -16,21 +17,22 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
void init_mutex_queue_head(void)
|
||||
void init_mutex_queue_head(struct mutex_queue_head *mqhead)
|
||||
{
|
||||
memset(&mutex_queue_head, 0, sizeof (mutex_queue_head));
|
||||
link_init(&mutex_queue_head.list);
|
||||
mutex_init(&mutex_queue_head.mutex_control_mutex);
|
||||
}
|
||||
void mutex_queue_head_lock()
|
||||
{
|
||||
mutex_lock(&mutex_queue_head.mutex_control_mutex);
|
||||
memset(mqhead, 0, sizeof(*mqhead));
|
||||
link_init(&mqhead->list);
|
||||
mutex_init(&mqhead->mutex_control_mutex);
|
||||
}
|
||||
|
||||
void mutex_queue_head_unlock()
|
||||
void mutex_queue_head_lock(struct mutex_queue_head *mqhead)
|
||||
{
|
||||
mutex_lock(&mqhead->mutex_control_mutex);
|
||||
}
|
||||
|
||||
void mutex_queue_head_unlock(struct mutex_queue_head *mqhead)
|
||||
{
|
||||
/* Async unlock because in some cases preemption may be disabled here */
|
||||
mutex_unlock_async(&mutex_queue_head.mutex_control_mutex);
|
||||
mutex_unlock_async(&mqhead->mutex_control_mutex);
|
||||
}
|
||||
|
||||
|
||||
@@ -44,27 +46,28 @@ void mutex_queue_init(struct mutex_queue *mq, unsigned long physical)
|
||||
waitqueue_head_init(&mq->wqh_contenders);
|
||||
}
|
||||
|
||||
void mutex_control_add(struct mutex_queue *mq)
|
||||
void mutex_control_add(struct mutex_queue_head *mqhead, struct mutex_queue *mq)
|
||||
{
|
||||
BUG_ON(!list_empty(&mq->list));
|
||||
|
||||
list_insert(&mq->list, &mutex_queue_head.list);
|
||||
mutex_queue_head.count++;
|
||||
list_insert(&mq->list, &mqhead->list);
|
||||
mqhead->count++;
|
||||
}
|
||||
|
||||
void mutex_control_remove(struct mutex_queue *mq)
|
||||
void mutex_control_remove(struct mutex_queue_head *mqhead, struct mutex_queue *mq)
|
||||
{
|
||||
list_remove_init(&mq->list);
|
||||
mutex_queue_head.count--;
|
||||
mqhead->count--;
|
||||
}
|
||||
|
||||
/* Note, this has ptr/negative error returns instead of ptr/zero. */
|
||||
struct mutex_queue *mutex_control_find(unsigned long mutex_physical)
|
||||
struct mutex_queue *mutex_control_find(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_physical)
|
||||
{
|
||||
struct mutex_queue *mutex_queue;
|
||||
|
||||
/* Find the mutex queue with this key */
|
||||
list_foreach_struct(mutex_queue, &mutex_queue_head.list, list)
|
||||
list_foreach_struct(mutex_queue, &mqhead->list, list)
|
||||
if (mutex_queue->physical == mutex_physical)
|
||||
return mutex_queue;
|
||||
|
||||
@@ -109,21 +112,22 @@ void mutex_control_delete(struct mutex_queue *mq)
|
||||
* until a wake up event occurs. If there is already an asleep
|
||||
* lock holder (i.e. unlocker) that is woken up and we return.
|
||||
*/
|
||||
int mutex_control_lock(unsigned long mutex_address)
|
||||
int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_address)
|
||||
{
|
||||
struct mutex_queue *mutex_queue;
|
||||
|
||||
mutex_queue_head_lock();
|
||||
mutex_queue_head_lock(mqhead);
|
||||
|
||||
/* Search for the mutex queue */
|
||||
if (!(mutex_queue = mutex_control_find(mutex_address))) {
|
||||
if (!(mutex_queue = mutex_control_find(mqhead, mutex_address))) {
|
||||
/* Create a new one */
|
||||
if (!(mutex_queue = mutex_control_create(mutex_address))) {
|
||||
mutex_queue_head_unlock();
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mutex_queue);
|
||||
mutex_control_add(mqhead, mutex_queue);
|
||||
} else {
|
||||
/* See if there is a lock holder */
|
||||
if (mutex_queue->wqh_holders.sleepers) {
|
||||
@@ -134,11 +138,11 @@ int mutex_control_lock(unsigned long mutex_address)
|
||||
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mutex_queue);
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock();
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -150,7 +154,7 @@ int mutex_control_lock(unsigned long mutex_address)
|
||||
wait_on_prepare(&mutex_queue->wqh_contenders, &wq);
|
||||
|
||||
/* Release lock */
|
||||
mutex_queue_head_unlock();
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
/* Initiate prepared wait */
|
||||
return wait_on_prepared_wait();
|
||||
@@ -170,23 +174,24 @@ int mutex_control_lock(unsigned long mutex_address)
|
||||
* to acquire the mutex, waking up all of them increases the
|
||||
* chances that some thread may acquire it.
|
||||
*/
|
||||
int mutex_control_unlock(unsigned long mutex_address)
|
||||
int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_address)
|
||||
{
|
||||
struct mutex_queue *mutex_queue;
|
||||
|
||||
mutex_queue_head_lock();
|
||||
mutex_queue_head_lock(mqhead);
|
||||
|
||||
/* Search for the mutex queue */
|
||||
if (!(mutex_queue = mutex_control_find(mutex_address))) {
|
||||
if (!(mutex_queue = mutex_control_find(mqhead, mutex_address))) {
|
||||
|
||||
/* No such mutex, create one and sleep on it */
|
||||
if (!(mutex_queue = mutex_control_create(mutex_address))) {
|
||||
mutex_queue_head_unlock();
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mutex_queue);
|
||||
mutex_control_add(mqhead, mutex_queue);
|
||||
|
||||
/* Prepare to wait on the lock holders queue */
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
@@ -195,7 +200,7 @@ int mutex_control_unlock(unsigned long mutex_address)
|
||||
wait_on_prepare(&mutex_queue->wqh_holders, &wq);
|
||||
|
||||
/* Release lock first */
|
||||
mutex_queue_head_unlock();
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
/* Initiate prepared wait */
|
||||
return wait_on_prepared_wait();
|
||||
@@ -209,11 +214,11 @@ int mutex_control_unlock(unsigned long mutex_address)
|
||||
wake_up(&mutex_queue->wqh_contenders, WAKEUP_ASYNC);
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mutex_queue);
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock();
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -238,15 +243,17 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
/* Find and check physical address for virtual mutex address */
|
||||
if (!(mutex_physical =
|
||||
virt_to_phys_by_pgd(mutex_address,
|
||||
TASK_PGD(current))))
|
||||
TASK_PGD(current))))
|
||||
return -EINVAL;
|
||||
|
||||
switch (mutex_op) {
|
||||
case MUTEX_CONTROL_LOCK:
|
||||
ret = mutex_control_lock(mutex_physical);
|
||||
ret = mutex_control_lock(&curcont->mutex_queue_head,
|
||||
mutex_physical);
|
||||
break;
|
||||
case MUTEX_CONTROL_UNLOCK:
|
||||
ret = mutex_control_unlock(mutex_physical);
|
||||
ret = mutex_control_unlock(&curcont->mutex_queue_head,
|
||||
mutex_physical);
|
||||
break;
|
||||
default:
|
||||
printk("%s: Invalid operands\n", __FUNCTION__);
|
||||
@@ -256,4 +263,3 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/api/space.h>
|
||||
@@ -201,7 +201,7 @@ int sys_kmem_control(unsigned long pfn, int npages, int grant)
|
||||
return -EINVAL;
|
||||
|
||||
/* Add the granted pages to the allocator */
|
||||
if (pgalloc_add_new_grant(pfn, npages))
|
||||
// if (pgalloc_add_new_grant(pfn, npages))
|
||||
BUG();
|
||||
} else /* Reclaim not implemented yet */
|
||||
BUG();
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include <l4/lib/idpool.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
#include <l4/lib/wait.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
@@ -334,7 +334,7 @@ int thread_create(struct task_ids *ids, unsigned int flags)
|
||||
|
||||
out_err:
|
||||
/* Pre-mature tcb needs freeing by free_page */
|
||||
free_page(new);
|
||||
free_ktcb(new);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/bootmem.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
@@ -535,6 +536,16 @@ out_error:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int pgd_count_pmds(pgd_table_t *pgd)
|
||||
{
|
||||
int npmd = 0;
|
||||
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
|
||||
if ((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)
|
||||
npmd++;
|
||||
return npmd;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates and copies all levels of page tables from one task to another.
|
||||
* Useful when forking.
|
||||
@@ -600,66 +611,56 @@ out_error:
|
||||
|
||||
extern pmd_table_t *pmd_array;
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Moves the section mapped kspace that resides far apart from kernel as close
|
||||
* as possible to the kernel image, and unmaps the old 1MB kspace section which
|
||||
* is really largely unused.
|
||||
* Jumps from boot page tables to tables allocated from the cache.
|
||||
*/
|
||||
void relocate_page_tables(void)
|
||||
pgd_table_t *realloc_page_tables(void)
|
||||
{
|
||||
/* Adjust the end of kernel address to page table alignment. */
|
||||
unsigned long pt_new = align_up(_end_kernel, sizeof(pgd_table_t));
|
||||
unsigned long reloc_offset = (unsigned long)_start_kspace - pt_new;
|
||||
unsigned long pt_area_size = (unsigned long)_end_kspace -
|
||||
(unsigned long)_start_kspace;
|
||||
pgd_table_t *pgd_new = alloc_pgd();
|
||||
pgd_table_t *pgd_old = &init_pgd;
|
||||
pmd_table_t *orig, *pmd;
|
||||
|
||||
BUG_ON(reloc_offset & (SZ_1K - 1))
|
||||
/* Copy whole pgd entries */
|
||||
memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
|
||||
|
||||
/* Map the new page table area into the current pgd table */
|
||||
add_mapping(virt_to_phys(pt_new), pt_new, pt_area_size,
|
||||
MAP_IO_DEFAULT_FLAGS);
|
||||
/* Allocate and copy all pmds */
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
|
||||
/* Detect a pmd entry */
|
||||
if ((pgd_old->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
|
||||
/* Allocate new pmd */
|
||||
if (!(pmd = alloc_pmd())) {
|
||||
printk("FATAL: PMD allocation "
|
||||
"failed during system initialization\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Copy the entire kspace area, i.e. the pgd + static pmds. */
|
||||
memcpy((void *)pt_new, _start_kspace, pt_area_size);
|
||||
/* Find original pmd */
|
||||
orig = (pmd_table_t *)
|
||||
phys_to_virt((pgd_old->entry[i] &
|
||||
PGD_COARSE_ALIGN_MASK));
|
||||
|
||||
/* Update the only reference to current pgd table */
|
||||
TASK_PGD(current) = (pgd_table_t *)pt_new;
|
||||
/* Copy original to new */
|
||||
memcpy(pmd, orig, sizeof(pmd_table_t));
|
||||
|
||||
/*
|
||||
* Since pmd's are also moved, update the pmd references in pgd by
|
||||
* subtracting the relocation offset from each valid pmd entry.
|
||||
* TODO: This would be best done within a helper function.
|
||||
*/
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
|
||||
/* If there's a coarse 2nd level entry */
|
||||
if ((TASK_PGD(current)->entry[i] & PGD_TYPE_MASK)
|
||||
== PGD_TYPE_COARSE)
|
||||
TASK_PGD(current)->entry[i] -= reloc_offset;
|
||||
|
||||
/* Update the pmd array pointer. */
|
||||
pmd_array = (pmd_table_t *)((unsigned long)_start_pmd - reloc_offset);
|
||||
/* Replace original pmd entry in pgd with new */
|
||||
pgd_new->entry[i] = (pgd_t)virt_to_phys(pmd);
|
||||
pgd_new->entry[i] |= PGD_TYPE_COARSE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Switch the virtual memory system into new area */
|
||||
arm_clean_invalidate_cache();
|
||||
arm_drain_writebuffer();
|
||||
arm_invalidate_tlb();
|
||||
arm_set_ttb(virt_to_phys(TASK_PGD(current)));
|
||||
arm_set_ttb(virt_to_phys(pgd_new));
|
||||
arm_invalidate_tlb();
|
||||
|
||||
/* Unmap the old page table area */
|
||||
remove_section_mapping((unsigned long)&kspace);
|
||||
|
||||
/* Update the page table markers to the new area. Any references would
|
||||
* go to these markers. */
|
||||
__pt_start = pt_new;
|
||||
__pt_end = pt_new + pt_area_size;
|
||||
|
||||
printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
|
||||
__KERNELNAME__, virt_to_phys(&kspace),
|
||||
virt_to_phys(TASK_PGD(current)));
|
||||
__KERNELNAME__, virt_to_phys(pgd_old),
|
||||
virt_to_phys(pgd_new));
|
||||
|
||||
return pgd_new;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Useful for upgrading to page-grained control over a section mapping:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'pgalloc.c', 'kmalloc.c', 'space.c', 'bootm.c', 'resource.c', 'container.c']
|
||||
src_local = ['physmem.c', 'irq.c', 'scheduler.c', 'time.c', 'tcb.c', 'kmalloc.c', 'space.c', 'bootm.c', 'resource.c', 'container.c', 'capability.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -3,5 +3,21 @@
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/capability.h>
|
||||
|
||||
struct capability *capability_create(void)
|
||||
{
|
||||
struct capability *cap = alloc_capability();
|
||||
|
||||
capability_init(cap);
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
void capability_init(struct capability *cap)
|
||||
{
|
||||
cap->capid = id_new(&kernel_container.capability_ids);
|
||||
link_init(&cap->list);
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/*
|
||||
@@ -22,7 +24,7 @@ struct container_info cinfo[] = {
|
||||
.pager_lma = __pfn(0x38000),
|
||||
.pager_vma = __pfn(0xE0000000),
|
||||
.pager_size = __pfn(0x96000),
|
||||
.ncaps = 11,
|
||||
.ncaps = 14,
|
||||
.caps = {
|
||||
[0] = {
|
||||
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
|
||||
@@ -51,6 +53,16 @@ struct container_info cinfo[] = {
|
||||
.size = __pfn(0x10000000),
|
||||
},
|
||||
[3] = {
|
||||
.type = CAP_TYPE_MAP | CAP_RTYPE_VIRTMEM,
|
||||
.access = CAP_MAP_READ | CAP_MAP_WRITE
|
||||
| CAP_MAP_EXEC | CAP_MAP_UNMAP
|
||||
| CAP_MAP_UTCB,
|
||||
.start = __pfn(0xF8000000),
|
||||
.end = __pfn(0xF9000000),
|
||||
.size = __pfn(0x1000000),
|
||||
},
|
||||
|
||||
[4] = {
|
||||
.type = CAP_TYPE_MAP | CAP_RTYPE_PHYSMEM,
|
||||
.access = CAP_MAP_CACHED | CAP_MAP_UNCACHED
|
||||
| CAP_MAP_READ | CAP_MAP_WRITE
|
||||
@@ -58,51 +70,244 @@ struct container_info cinfo[] = {
|
||||
.start = __pfn(0x38000),
|
||||
.end = __pfn(0x1000000), /* 16 MB for all posix services */
|
||||
},
|
||||
[4] = {
|
||||
[5] = {
|
||||
.type = CAP_TYPE_IPC | CAP_RTYPE_CONTAINER,
|
||||
.access = CAP_IPC_SEND | CAP_IPC_RECV
|
||||
| CAP_IPC_FULL | CAP_IPC_SHORT
|
||||
| CAP_IPC_EXTENDED,
|
||||
.start = 0, .end = 0, .size = 0,
|
||||
},
|
||||
[5] = {
|
||||
[6] = {
|
||||
.type = CAP_TYPE_TCTRL | CAP_RTYPE_CONTAINER,
|
||||
.access = CAP_TCTRL_CREATE | CAP_TCTRL_DESTROY
|
||||
| CAP_TCTRL_SUSPEND | CAP_TCTRL_RESUME
|
||||
| CAP_TCTRL_RECYCLE,
|
||||
.start = 0, .end = 0, .size = 0,
|
||||
},
|
||||
[6] = {
|
||||
[7] = {
|
||||
.type = CAP_TYPE_EXREGS | CAP_RTYPE_CONTAINER,
|
||||
.access = CAP_EXREGS_RW_PAGER
|
||||
| CAP_EXREGS_RW_UTCB | CAP_EXREGS_RW_SP
|
||||
| CAP_EXREGS_RW_PC | CAP_EXREGS_RW_REGS,
|
||||
.start = 0, .end = 0, .size = 0,
|
||||
},
|
||||
[7] = {
|
||||
[8] = {
|
||||
.type = CAP_TYPE_QUANTITY
|
||||
| CAP_RTYPE_THREADPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 64,
|
||||
},
|
||||
[8] = {
|
||||
[9] = {
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_SPACEPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 64,
|
||||
},
|
||||
[9] = {
|
||||
[10] = {
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_CPUPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 50, /* Percentage */
|
||||
},
|
||||
[10] = {
|
||||
[11] = {
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_MUTEXPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
.size = 100,
|
||||
},
|
||||
[12] = {
|
||||
/* For pmd accounting */
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_MAPPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
/* Function of mem regions, nthreads etc. */
|
||||
.size = (64 * 30 + 100),
|
||||
},
|
||||
[13] = {
|
||||
/* For cap spliting, creating, etc. */
|
||||
.type = CAP_TYPE_QUANTITY | CAP_RTYPE_CAPPOOL,
|
||||
.access = 0, .start = 0, .end = 0,
|
||||
/* This may be existing caps X 2 etc. */
|
||||
.size = 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
int container_init(struct container *c)
|
||||
{
|
||||
/* Allocate new container id */
|
||||
c->cid = id_new(&kernel_container.container_ids);
|
||||
|
||||
/* Init data structures */
|
||||
link_init(&c->pager_list);
|
||||
init_address_space_list(&c->space_list);
|
||||
init_ktcb_list(&c->ktcb_list);
|
||||
init_mutex_queue_head(&c->mutex_queue_head);
|
||||
|
||||
/* Init scheduler */
|
||||
sched_init(&c->scheduler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct container *container_create(void)
|
||||
{
|
||||
struct container *c = alloc_container();
|
||||
|
||||
container_init(c);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
void kcont_insert_container(struct container *c,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
list_insert(&c->list, &kcont->containers.list);
|
||||
kcont->containers.ncont++;
|
||||
}
|
||||
|
||||
void task_setup_utcb(struct ktcb *task, struct pager *pager)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
/* Find a virtual memory capability with UTCB map permissions */
|
||||
list_foreach_struct(cap, &pager->cap_list.caps, list) {
|
||||
if (((cap->type & CAP_RTYPE_MASK) ==
|
||||
CAP_RTYPE_VIRTMEM) &&
|
||||
(cap->access & CAP_MAP_UTCB)) {
|
||||
/* Use first address slot as pager's utcb */
|
||||
task->utcb_address = __pfn_to_addr(cap->start);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
*
|
||||
* Create a purer address_space_create that takes
|
||||
* flags for extra ops such as copying kernel tables,
|
||||
* user tables of an existing pgd etc.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The first pager initialization is a special-case
|
||||
* since it uses the current kernel pgd.
|
||||
*/
|
||||
int init_first_pager(struct pager *pager,
|
||||
struct container *cont,
|
||||
pgd_table_t *current_pgd)
|
||||
{
|
||||
struct ktcb *task = tcb_alloc_init();
|
||||
struct address_space *space;
|
||||
|
||||
/* Initialize ktcb */
|
||||
task_init_registers(task, pager->start_vma);
|
||||
task_setup_utcb(task, pager);
|
||||
|
||||
/* Allocate space structure */
|
||||
if (!(space = alloc_space()))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set up space id */
|
||||
space->spid = id_new(&kernel_container.space_ids);
|
||||
|
||||
/* Initialize space structure */
|
||||
link_init(&space->list);
|
||||
mutex_init(&space->lock);
|
||||
space->pgd = current_pgd;
|
||||
|
||||
task->space = space;
|
||||
task->container = cont;
|
||||
|
||||
/* Map the task's space */
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
|
||||
|
||||
printk("Mapping %lu pages from 0x%lx to 0x%lx for %s\n",
|
||||
__pfn(page_align_up(pager->memsize)),
|
||||
pager->start_lma, pager->start_vma, cont->name);
|
||||
|
||||
/* Initialize task scheduler parameters */
|
||||
sched_init_task(task, TASK_PRIO_PAGER);
|
||||
|
||||
/* Give it a kick-start tick and make runnable */
|
||||
task->ticks_left = 1;
|
||||
sched_resume_async(task);
|
||||
|
||||
/* Container list that keeps all tasks */
|
||||
tcb_add(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Inspects pager parameters defined in the container,
|
||||
* and sets up an execution environment for the pager.
|
||||
*
|
||||
* This involves setting up pager's ktcb, space, utcb,
|
||||
* all ids, registers, and mapping its (perhaps) first
|
||||
* few pages in order to make it runnable.
|
||||
*/
|
||||
int init_pager(struct pager *pager, struct container *cont)
|
||||
{
|
||||
struct ktcb *task = tcb_alloc_init();
|
||||
|
||||
task_init_registers(task, pager->start_vma);
|
||||
|
||||
task_setup_utcb(task, pager);
|
||||
|
||||
task->space = address_space_create(0);
|
||||
|
||||
task->container = cont;
|
||||
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_DEFAULT_FLAGS, TASK_PGD(task));
|
||||
|
||||
printk("Mapping %lu pages from 0x%lx to 0x%lx for %s\n",
|
||||
__pfn(page_align_up(pager->memsize)),
|
||||
pager->start_lma, pager->start_vma, cont->name);
|
||||
|
||||
/* Initialize task scheduler parameters */
|
||||
sched_init_task(task, TASK_PRIO_PAGER);
|
||||
|
||||
/* Give it a kick-start tick and make runnable */
|
||||
task->ticks_left = 1;
|
||||
sched_resume_async(task);
|
||||
|
||||
/* Container list that keeps all tasks */
|
||||
tcb_add(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize all containers with their initial set of tasks,
|
||||
* spaces, scheduler parameters such that they can be started.
|
||||
*/
|
||||
int container_init_pagers(struct kernel_container *kcont,
|
||||
pgd_table_t *current_pgd)
|
||||
{
|
||||
struct container *cont;
|
||||
struct pager *pager;
|
||||
int pgidx = 0;
|
||||
|
||||
list_foreach_struct(cont, &kcont->containers.list, list) {
|
||||
for (int i = 0; i < cont->npagers; i++) {
|
||||
pager = &cont->pager[i];
|
||||
|
||||
/* First pager initializes specially */
|
||||
if (pgidx == 0)
|
||||
init_first_pager(pager, cont,
|
||||
current_pgd);
|
||||
else
|
||||
init_pager(pager, cont);
|
||||
pgidx++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/memcache.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* Supports this many different kmalloc sizes */
|
||||
@@ -36,117 +36,18 @@ void init_kmalloc()
|
||||
mutex_init(&km_pool.kmalloc_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* KMALLOC implementation:
|
||||
*
|
||||
* Allocates memory from mem_caches that it generates on-the-fly,
|
||||
* for up to KMALLOC_POOLS_MAX different sizes.
|
||||
*/
|
||||
void *__kmalloc(int size)
|
||||
{
|
||||
struct mem_cache *cache;
|
||||
int right_sized_pool_idx = -1;
|
||||
int index;
|
||||
|
||||
BUG_ON(!size); /* It is a kernel bug if size is 0 */
|
||||
|
||||
for (int i = 0; i < km_pool.total; i++) {
|
||||
/* Check if this pool has right size */
|
||||
if (km_pool.pool_head[i].cache_size == size) {
|
||||
right_sized_pool_idx = i;
|
||||
/*
|
||||
* Found the pool, now see if any
|
||||
* cache has available slots
|
||||
*/
|
||||
list_foreach_struct(cache, &km_pool.pool_head[i].cache_list,
|
||||
list) {
|
||||
if (cache->free)
|
||||
return mem_cache_alloc(cache);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* All pools are allocated and none has requested size
|
||||
*/
|
||||
if ((right_sized_pool_idx < 0) &&
|
||||
(km_pool.total == KMALLOC_POOLS_MAX - 1)) {
|
||||
printk("kmalloc: Too many types of pool sizes requested. "
|
||||
"Giving up.\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* A pool exists with given size? (But no cache in it is free) */
|
||||
if (right_sized_pool_idx >= 0)
|
||||
index = right_sized_pool_idx;
|
||||
else /* No pool of this size, allocate new by incrementing total */
|
||||
index = km_pool.total++;
|
||||
|
||||
/* Only allow up to page size */
|
||||
BUG_ON(size >= PAGE_SIZE);
|
||||
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
|
||||
size, 0)));
|
||||
// printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
|
||||
list_insert(&cache->list, &km_pool.pool_head[index].cache_list);
|
||||
km_pool.pool_head[index].occupied = 1;
|
||||
km_pool.pool_head[index].total_caches++;
|
||||
km_pool.pool_head[index].cache_size = size;
|
||||
return mem_cache_alloc(cache);
|
||||
}
|
||||
|
||||
void *kmalloc(int size)
|
||||
{
|
||||
void *p;
|
||||
|
||||
mutex_lock(&km_pool.kmalloc_mutex);
|
||||
p = __kmalloc(size);
|
||||
mutex_unlock(&km_pool.kmalloc_mutex);
|
||||
return p;
|
||||
}
|
||||
|
||||
/* FIXME:
|
||||
* Horrible complexity O(n^2) because we don't know which cache
|
||||
* we're freeing from!!! But its simple. ;-)
|
||||
*/
|
||||
int __kfree(void *p)
|
||||
{
|
||||
struct mem_cache *cache, *tmp;
|
||||
|
||||
for (int i = 0; i < km_pool.total; i++)
|
||||
list_foreach_removable_struct(cache, tmp,
|
||||
&km_pool.pool_head[i].cache_list,
|
||||
list) {
|
||||
if (!mem_cache_free(cache, p)) {
|
||||
if (mem_cache_is_empty(cache)) {
|
||||
km_pool.pool_head[i].total_caches--;
|
||||
list_remove(&cache->list);
|
||||
free_page(cache);
|
||||
/*
|
||||
* Total remains the same but slot
|
||||
* may have no caches left.
|
||||
*/
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kfree(void *p)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&km_pool.kmalloc_mutex);
|
||||
ret = __kfree(p);
|
||||
mutex_unlock(&km_pool.kmalloc_mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *kzalloc(int size)
|
||||
{
|
||||
void *p = kmalloc(size);
|
||||
memset(p, 0, size);
|
||||
return p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
/*
|
||||
* Simple kernel memory allocator built on top of memcache
|
||||
* implementation.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/lib/memcache.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/physmem.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
/* FIXME:
|
||||
*
|
||||
* mem_cache_alloc() now has an interruptible mutex.
|
||||
* All routines defined here should check returned errors.
|
||||
*/
|
||||
|
||||
#define PGALLOC_PGD_CACHE 0
|
||||
#define PGALLOC_PMD_CACHE 1
|
||||
#define PGALLOC_PG_CACHE 2
|
||||
#define PGALLOC_CACHE_TOTAL 3
|
||||
|
||||
/* The initial chunk of physical memory allocated before any pagers. */
|
||||
#define PGALLOC_INIT_GRANT SZ_1MB
|
||||
|
||||
/* Covers 3 main types of memory needed by the kernel. */
|
||||
struct pgalloc {
|
||||
struct link cache_list[3];
|
||||
};
|
||||
static struct pgalloc pgalloc;
|
||||
|
||||
void pgalloc_add_new_cache(struct mem_cache *cache, int cidx)
|
||||
{
|
||||
link_init(&cache->list);
|
||||
BUG_ON(cidx >= PGALLOC_CACHE_TOTAL || cidx < 0);
|
||||
list_insert(&cache->list, &pgalloc.cache_list[cidx]);
|
||||
}
|
||||
|
||||
void print_kmem_grant_params(grant_kmem_usage_t *params)
|
||||
{
|
||||
printk("%s: %lu bytes physical memory granted.\n", __KERNELNAME__, params->total_size);
|
||||
printk("%s: Possible kmem usage on this memory grant:\n", __KERNELNAME__);
|
||||
printk("%s: PGDs: %lu, PMDs: %lu, TCBs: %lu, Extra: %lu bytes.\n", __KERNELNAME__,
|
||||
params->total_pgds, params->total_pmds, params->total_tcbs,
|
||||
params->extra);
|
||||
}
|
||||
|
||||
#define TASK_AVERAGE_SIZE SZ_16MB
|
||||
#define TASK_AVERAGE_PMDS TASK_AVERAGE_SIZE / PMD_MAP_SIZE
|
||||
|
||||
void calc_grant_kmem_usage(grant_kmem_usage_t *params, unsigned long total_size)
|
||||
{
|
||||
/* Kmem usage per task */
|
||||
unsigned long task_avg_kmem_usage = PGD_SIZE + PMD_SIZE * 16 + PAGE_SIZE;
|
||||
unsigned long total_tasks = total_size / task_avg_kmem_usage;
|
||||
unsigned long extra = total_size - total_tasks * task_avg_kmem_usage;
|
||||
|
||||
params->total_size = total_size;
|
||||
params->total_tasks = total_tasks;
|
||||
params->total_pgds = total_tasks;
|
||||
params->total_pmds = total_tasks * 16;
|
||||
params->total_tcbs = total_tasks;
|
||||
params->extra = extra;
|
||||
|
||||
print_kmem_grant_params(params);
|
||||
}
|
||||
|
||||
|
||||
int pgalloc_add_new_grant(unsigned long pfn, int npages)
|
||||
{
|
||||
unsigned long physical = __pfn_to_addr(pfn);
|
||||
void *virtual = (void *)phys_to_virt(physical);
|
||||
struct mem_cache *pgd_cache, *pmd_cache, *pg_cache;
|
||||
grant_kmem_usage_t params;
|
||||
|
||||
/* First map the whole grant */
|
||||
add_mapping(physical, phys_to_virt(physical), __pfn_to_addr(npages),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
|
||||
/* Calculate how to divide buffer into different caches */
|
||||
calc_grant_kmem_usage(¶ms, __pfn_to_addr(npages));
|
||||
|
||||
/* Create the caches, least alignment-needing, most, then others. */
|
||||
pmd_cache = mem_cache_init(virtual, params.total_pmds * PMD_SIZE,
|
||||
PMD_SIZE, 1);
|
||||
virtual += params.total_pmds * PMD_SIZE;
|
||||
|
||||
pgd_cache = mem_cache_init(virtual, params.total_pgds * PGD_SIZE,
|
||||
PGD_SIZE, 1);
|
||||
virtual += params.total_pgds * PGD_SIZE;
|
||||
|
||||
pg_cache = mem_cache_init(virtual, params.total_tcbs * PAGE_SIZE
|
||||
+ params.extra, PAGE_SIZE, 1);
|
||||
|
||||
/* Add the caches */
|
||||
pgalloc_add_new_cache(pgd_cache, PGALLOC_PGD_CACHE);
|
||||
pgalloc_add_new_cache(pmd_cache, PGALLOC_PMD_CACHE);
|
||||
pgalloc_add_new_cache(pg_cache, PGALLOC_PG_CACHE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void init_pgalloc(void)
|
||||
{
|
||||
int initial_grant = PGALLOC_INIT_GRANT;
|
||||
|
||||
for (int i = 0; i < PGALLOC_CACHE_TOTAL; i++)
|
||||
link_init(&pgalloc.cache_list[i]);
|
||||
|
||||
/* Grant ourselves with an initial chunk of physical memory */
|
||||
physmem.free_cur = page_align_up(physmem.free_cur);
|
||||
set_page_map(physmem.free_cur, __pfn(initial_grant), 1);
|
||||
pgalloc_add_new_grant(__pfn(physmem.free_cur), __pfn(initial_grant));
|
||||
physmem.free_cur += initial_grant;
|
||||
|
||||
/* Activate kmalloc */
|
||||
init_kmalloc();
|
||||
}
|
||||
|
||||
void pgalloc_remove_cache(struct mem_cache *cache)
|
||||
{
|
||||
list_remove_init(&cache->list);
|
||||
}
|
||||
|
||||
static inline void *pgalloc_from_cache(int cidx)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
|
||||
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
|
||||
if (mem_cache_total_empty(cache))
|
||||
return mem_cache_zalloc(cache);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kfree_to_cache(int cidx, void *virtual)
|
||||
{
|
||||
struct mem_cache *cache, *n;
|
||||
|
||||
list_foreach_removable_struct(cache, n, &pgalloc.cache_list[cidx], list)
|
||||
if (mem_cache_free(cache, virtual) == 0)
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void *alloc_page(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PG_CACHE);
|
||||
}
|
||||
|
||||
void *alloc_pmd(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PMD_CACHE);
|
||||
}
|
||||
|
||||
void *alloc_pgd(void)
|
||||
{
|
||||
return pgalloc_from_cache(PGALLOC_PGD_CACHE);
|
||||
}
|
||||
|
||||
int free_page(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PG_CACHE, v);
|
||||
}
|
||||
|
||||
int free_pmd(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PMD_CACHE, v);
|
||||
}
|
||||
|
||||
int free_pgd(void *v)
|
||||
{
|
||||
return kfree_to_cache(PGALLOC_PGD_CACHE, v);
|
||||
}
|
||||
|
||||
void *zalloc_page(void)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (!(p = alloc_page()))
|
||||
return 0;
|
||||
|
||||
memset(p, 0, PAGE_SIZE);
|
||||
return p;
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <l4/generic/physmem.h>
|
||||
#include <l4/generic/pgalloc.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
@@ -88,6 +88,6 @@ void physmem_init()
|
||||
|
||||
void memory_init()
|
||||
{
|
||||
init_pgalloc();
|
||||
//init_pgalloc();
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
*
|
||||
* Copyright (C) 2009 Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4/generic/capability.h>
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/generic/container.h>
|
||||
@@ -16,6 +15,76 @@
|
||||
|
||||
struct kernel_container kernel_container;
|
||||
|
||||
pgd_table_t *alloc_pgd(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.pgd_cache);
|
||||
}
|
||||
|
||||
pmd_table_t *alloc_pmd(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.pmd_cache);
|
||||
}
|
||||
|
||||
struct address_space *alloc_space(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.space_cache);
|
||||
}
|
||||
|
||||
struct ktcb *alloc_ktcb(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.ktcb_cache);
|
||||
}
|
||||
|
||||
struct capability *alloc_capability(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.cap_cache);
|
||||
}
|
||||
|
||||
struct container *alloc_container(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.cont_cache);
|
||||
}
|
||||
|
||||
struct mutex_queue *alloc_user_mutex(void)
|
||||
{
|
||||
return mem_cache_zalloc(kernel_container.mutex_cache);
|
||||
}
|
||||
|
||||
void free_pgd(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.pgd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_pmd(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.pmd_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_space(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.space_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_ktcb(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.ktcb_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_capability(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.cap_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_container(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.cont_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void free_user_mutex(void *addr)
|
||||
{
|
||||
BUG_ON(mem_cache_free(kernel_container.mutex_cache, addr) < 0);
|
||||
}
|
||||
|
||||
void cap_list_init(struct cap_list *clist)
|
||||
{
|
||||
clist->ncaps = 0;
|
||||
@@ -78,6 +147,10 @@ int memcap_shrink(struct capability *cap, struct cap_list *cap_list,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a single memory cap (that definitely overlaps) removes
|
||||
* the portion of pfns specified by start/end.
|
||||
*/
|
||||
int memcap_unmap_range(struct capability *cap,
|
||||
struct cap_list *cap_list,
|
||||
const unsigned long start,
|
||||
@@ -128,65 +201,6 @@ int memcap_unmap(struct cap_list *cap_list,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do all system accounting for this capability info
|
||||
* structure that belongs to a container, such as
|
||||
* count its resource requirements, remove its portion
|
||||
* from global kernel capabilities etc.
|
||||
*/
|
||||
int process_cap_info(struct cap_info *cap,
|
||||
struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (cap->type & CAP_RTYPE_MASK) {
|
||||
case CAP_RTYPE_THREADPOOL:
|
||||
bootres->nthreads += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_SPACEPOOL:
|
||||
bootres->nspaces += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_MUTEXPOOL:
|
||||
bootres->nmutex += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_VIRTMEM:
|
||||
/* Area size in pages divided by mapsize in pages */
|
||||
bootres->npmds +=
|
||||
cap->size / __pfn(PMD_MAP_SIZE);
|
||||
if ((ret = memcap_unmap(&kcont->virtmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
case CAP_RTYPE_PHYSMEM:
|
||||
if ((ret = memcap_unmap(&kcont->physmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Migrate any boot allocations to their relevant caches.
|
||||
*/
|
||||
@@ -219,65 +233,28 @@ int free_boot_memory(struct boot_resources *bootres,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
struct kernel_container *kcont,
|
||||
int aligned)
|
||||
{
|
||||
struct capability *cap;
|
||||
unsigned long bufsize;
|
||||
|
||||
/* In all unused physical memory regions */
|
||||
list_foreach_struct(cap, &kcont->physmem_free.caps, list) {
|
||||
/* Get buffer size needed for cache */
|
||||
bufsize = mem_cache_bufsize((void *)__pfn_to_addr(cap->start),
|
||||
struct_size, nstruct,
|
||||
aligned);
|
||||
/*
|
||||
* Check if memcap region size is enough to cover
|
||||
* resource allocation
|
||||
*/
|
||||
if (__pfn_to_addr(cap->end - cap->start) >= bufsize) {
|
||||
unsigned long virtual =
|
||||
phys_to_virt(__pfn_to_addr(cap->start));
|
||||
/*
|
||||
* Map the buffer as boot mapping if pmd caches
|
||||
* are not initialized
|
||||
*/
|
||||
if (!kcont->pmd_cache) {
|
||||
add_boot_mapping(__pfn_to_addr(cap->start),
|
||||
virtual, bufsize,
|
||||
MAP_SVC_RW_FLAGS);
|
||||
} else {
|
||||
add_mapping(__pfn_to_addr(cap->start),
|
||||
virtual, bufsize,
|
||||
MAP_SVC_RW_FLAGS);
|
||||
}
|
||||
/* Unmap area from memcap */
|
||||
memcap_unmap_range(cap, &kcont->physmem_free,
|
||||
cap->start, cap->start +
|
||||
__pfn(page_align_up((bufsize))));
|
||||
|
||||
/* TODO: Manipulate memcaps for virtual range??? */
|
||||
|
||||
/* Initialize the cache */
|
||||
return mem_cache_init((void *)virtual, bufsize,
|
||||
PGD_SIZE, 1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes kernel caplists, and sets up total of physical
|
||||
* and virtual memory as single capabilities of the kernel.
|
||||
* They will then get split into caps of different lengths
|
||||
* during the traversal of container capabilities.
|
||||
* during the traversal of container capabilities, and memcache
|
||||
* allocations.
|
||||
*/
|
||||
void init_kernel_container(struct kernel_container *kcont)
|
||||
{
|
||||
struct capability *physmem, *virtmem, *kernel_area;
|
||||
|
||||
/* Initialize system id pools */
|
||||
kcont->space_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->ktcb_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->resource_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->container_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->mutex_ids.nwords = SYSTEM_IDS_MAX;
|
||||
kcont->capability_ids.nwords = SYSTEM_IDS_MAX;
|
||||
|
||||
/* Get first container id for itself */
|
||||
kcont->cid = id_new(&kcont->container_ids);
|
||||
|
||||
/* Initialize kernel capability lists */
|
||||
cap_list_init(&kcont->physmem_used);
|
||||
cap_list_init(&kcont->physmem_free);
|
||||
@@ -318,40 +295,164 @@ void init_kernel_container(struct kernel_container *kcont)
|
||||
*/
|
||||
}
|
||||
|
||||
void create_containers(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
/*
|
||||
* Copies cinfo structures to real capabilities for each pager.
|
||||
*
|
||||
* FIXME: Check if pager has enough resources to create its caps.
|
||||
*/
|
||||
int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
|
||||
{
|
||||
struct capability *cap;
|
||||
struct cap_info *cap_info;
|
||||
|
||||
pager->start_lma = pinfo->pager_lma;
|
||||
pager->start_vma = pinfo->pager_vma;
|
||||
pager->memsize = pinfo->pager_size;
|
||||
|
||||
/* Copy all cinfo structures into real capabilities */
|
||||
for (int i = 0; i < pinfo->ncaps; i++) {
|
||||
cap = capability_create();
|
||||
|
||||
cap_info = &pinfo->caps[i];
|
||||
|
||||
cap->type = cap_info->type;
|
||||
cap->access = cap_info->access;
|
||||
cap->start = cap_info->start;
|
||||
cap->end = cap_info->end;
|
||||
cap->size = cap_info->size;
|
||||
|
||||
cap_list_insert(cap, &pager->cap_list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void create_capabilities(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
/*
|
||||
* Copies container info from a given compact container descriptor to
|
||||
* a real container
|
||||
*/
|
||||
int copy_container_info(struct container *c, struct container_info *cinfo)
|
||||
{
|
||||
strncpy(c->name, cinfo->name, CONFIG_CONTAINER_NAMESIZE);
|
||||
c->npagers = cinfo->npagers;
|
||||
|
||||
/* Copy capabilities */
|
||||
for (int i = 0; i < c->npagers; i++)
|
||||
copy_pager_info(&c->pager[i], &cinfo->pager[i]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create real containers from compile-time created cinfo structures
|
||||
*/
|
||||
void setup_containers(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
struct container *container;
|
||||
pgd_table_t *current_pgd;
|
||||
|
||||
/*
|
||||
* Move to real page tables, accounted by
|
||||
* pgds and pmds provided from the caches
|
||||
*/
|
||||
current_pgd = realloc_page_tables();
|
||||
|
||||
/* Create all containers but leave pagers */
|
||||
for (int i = 0; i < bootres->nconts; i++) {
|
||||
/* Allocate & init container */
|
||||
container = container_create();
|
||||
|
||||
/* Fill in its information */
|
||||
copy_container_info(container, &cinfo[i]);
|
||||
|
||||
/* Add it to kernel container list */
|
||||
kcont_insert_container(container, kcont);
|
||||
}
|
||||
|
||||
/* Initialize pagers */
|
||||
container_init_pagers(kcont, current_pgd);
|
||||
}
|
||||
|
||||
void setup_capabilities(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure to count boot pmds, and kernel capabilities
|
||||
* created in boot memory.
|
||||
* Given a structure size and numbers, it initializes a memory cache
|
||||
* using free memory available from free kernel memory capabilities.
|
||||
*/
|
||||
struct mem_cache *init_resource_cache(int nstruct, int struct_size,
|
||||
struct kernel_container *kcont,
|
||||
int aligned)
|
||||
{
|
||||
struct capability *cap;
|
||||
unsigned long bufsize;
|
||||
|
||||
/* In all unused physical memory regions */
|
||||
list_foreach_struct(cap, &kcont->physmem_free.caps, list) {
|
||||
/* Get buffer size needed for cache */
|
||||
bufsize = mem_cache_bufsize((void *)__pfn_to_addr(cap->start),
|
||||
struct_size, nstruct,
|
||||
aligned);
|
||||
/*
|
||||
* Check if memcap region size is enough to cover
|
||||
* resource allocation
|
||||
*/
|
||||
if (__pfn_to_addr(cap->end - cap->start) >= bufsize) {
|
||||
unsigned long virtual =
|
||||
phys_to_virt(__pfn_to_addr(cap->start));
|
||||
/*
|
||||
* Map the buffer as boot mapping if pmd caches
|
||||
* are not initialized
|
||||
*/
|
||||
if (!kcont->pmd_cache) {
|
||||
add_boot_mapping(__pfn_to_addr(cap->start),
|
||||
virtual,
|
||||
page_align_up(bufsize),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
} else {
|
||||
add_mapping(__pfn_to_addr(cap->start),
|
||||
virtual, page_align_up(bufsize),
|
||||
MAP_SVC_RW_FLAGS);
|
||||
}
|
||||
/* Unmap area from memcap */
|
||||
memcap_unmap_range(cap, &kcont->physmem_free,
|
||||
cap->start, cap->start +
|
||||
__pfn(page_align_up((bufsize))));
|
||||
|
||||
/* TODO: Manipulate memcaps for virtual range??? */
|
||||
|
||||
/* Initialize the cache */
|
||||
return mem_cache_init((void *)virtual, bufsize,
|
||||
struct_size, 1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Initialize ID cache
|
||||
*
|
||||
* Also total capabilities in the system + number of
|
||||
* capabilities containers are allowed to create dynamically.
|
||||
*
|
||||
* Count the extra pgd + space needed in case all containers quit
|
||||
* Given a kernel container and the set of boot resources required,
|
||||
* initializes all memory caches for allocations. Once caches are
|
||||
* initialized, earlier boot allocations are migrated to caches.
|
||||
*/
|
||||
void init_resource_allocators(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
/*
|
||||
* An extra space reserved for kernel
|
||||
* in case all containers quit
|
||||
*/
|
||||
bootres->nspaces++;
|
||||
|
||||
/* Initialise PGD cache */
|
||||
kcont->pgd_cache = init_resource_cache(bootres->nspaces,
|
||||
PGD_SIZE, kcont, 1);
|
||||
|
||||
/* Initialise PMD cache */
|
||||
kcont->pmd_cache = init_resource_cache(bootres->npmds,
|
||||
PMD_SIZE, kcont, 1);
|
||||
|
||||
/* Initialise struct address_space cache */
|
||||
kcont->address_space_cache =
|
||||
kcont->space_cache =
|
||||
init_resource_cache(bootres->nspaces,
|
||||
sizeof(struct address_space),
|
||||
kcont, 0);
|
||||
@@ -364,33 +465,122 @@ void init_resource_allocators(struct boot_resources *bootres,
|
||||
kcont->mutex_cache = init_resource_cache(bootres->nmutex,
|
||||
sizeof(struct mutex_queue),
|
||||
kcont, 0);
|
||||
/* TODO: Initialize ID cache */
|
||||
|
||||
/* Initialise capability cache */
|
||||
kcont->cap_cache = init_resource_cache(bootres->ncaps, /* FIXME: Count correctly */
|
||||
sizeof(struct capability),
|
||||
kcont, 0);
|
||||
/* Initialise container cache */
|
||||
kcont->cont_cache = init_resource_cache(bootres->nconts,
|
||||
sizeof(struct container),
|
||||
kcont, 0);
|
||||
|
||||
/* Create system containers */
|
||||
create_containers(bootres, kcont);
|
||||
/*
|
||||
* Add all caps used by the kernel + two extra in case
|
||||
* more memcaps get split after cap cache init below.
|
||||
*/
|
||||
bootres->ncaps += kcont->virtmem_used.ncaps +
|
||||
kcont->virtmem_free.ncaps +
|
||||
kcont->physmem_used.ncaps +
|
||||
kcont->physmem_free.ncaps + 2;
|
||||
|
||||
/* Create capabilities */
|
||||
create_capabilities(bootres, kcont);
|
||||
/* Initialise capability cache */
|
||||
kcont->cap_cache = init_resource_cache(bootres->ncaps,
|
||||
sizeof(struct capability),
|
||||
kcont, 0);
|
||||
|
||||
/* Count boot pmds used so far and add them */
|
||||
bootres->npmds += pgd_count_pmds(&init_pgd);
|
||||
|
||||
/*
|
||||
* Calculate maximum possible pmds
|
||||
* that may be used during this pmd
|
||||
* cache init and add them.
|
||||
*/
|
||||
bootres->npmds += ((bootres->npmds * PMD_SIZE) / PMD_MAP_SIZE);
|
||||
if (!is_aligned(bootres->npmds * PMD_SIZE, PMD_MAP_SIZE))
|
||||
bootres->npmds++;
|
||||
|
||||
/* Initialise PMD cache */
|
||||
kcont->pmd_cache = init_resource_cache(bootres->npmds,
|
||||
PMD_SIZE, kcont, 1);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Do all system accounting for a given capability info
|
||||
* structure that belongs to a container, such as
|
||||
* count its resource requirements, remove its portion
|
||||
* from global kernel resource capabilities etc.
|
||||
*/
|
||||
int process_cap_info(struct cap_info *cap,
|
||||
struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (cap->type & CAP_RTYPE_MASK) {
|
||||
case CAP_RTYPE_THREADPOOL:
|
||||
bootres->nthreads += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_SPACEPOOL:
|
||||
bootres->nspaces += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_MUTEXPOOL:
|
||||
bootres->nmutex += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_MAPPOOL:
|
||||
/* Speficies how many pmds can be mapped */
|
||||
bootres->npmds += cap->size;
|
||||
break;
|
||||
case CAP_RTYPE_CAPPOOL:
|
||||
/* Specifies how many new caps can be created */
|
||||
bootres->ncaps += cap->size;
|
||||
break;
|
||||
|
||||
case CAP_RTYPE_VIRTMEM:
|
||||
if ((ret = memcap_unmap(&kcont->virtmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
case CAP_RTYPE_PHYSMEM:
|
||||
if ((ret = memcap_unmap(&kcont->physmem_free,
|
||||
cap->start, cap->end))) {
|
||||
if (ret < 0)
|
||||
printk("FATAL: Insufficient boot memory "
|
||||
"to split capability\n");
|
||||
if (ret > 0)
|
||||
printk("FATAL: Memory capability range "
|
||||
"overlaps with another one. "
|
||||
"start=0x%lx, end=0x%lx\n",
|
||||
__pfn_to_addr(cap->start),
|
||||
__pfn_to_addr(cap->end));
|
||||
BUG();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int init_boot_resources(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
/*
|
||||
* Initializes the kernel container by describing both virtual
|
||||
* and physical memory. Then traverses cap_info structures
|
||||
* to figure out resource requirements of containers.
|
||||
*/
|
||||
int setup_boot_resources(struct boot_resources *bootres,
|
||||
struct kernel_container *kcont)
|
||||
{
|
||||
struct cap_info *cap;
|
||||
|
||||
init_kernel_container(kcont);
|
||||
|
||||
/* Number of containers known at compile-time */
|
||||
bootres->nconts = TOTAL_CONTAINERS;
|
||||
bootres->nconts = CONFIG_TOTAL_CONTAINERS;
|
||||
|
||||
/* Traverse all containers */
|
||||
for (int i = 0; i < bootres->nconts; i++) {
|
||||
@@ -409,29 +599,34 @@ int init_boot_resources(struct boot_resources *bootres,
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Count all ids needed to represent all */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Add error handling
|
||||
*
|
||||
* Initializes all system resources and handling of those
|
||||
* resources. First descriptions are done by allocating from
|
||||
* boot memory, once memory caches are initialized, boot
|
||||
* memory allocations are migrated over to caches.
|
||||
*/
|
||||
int init_system_resources(struct kernel_container *kcont)
|
||||
{
|
||||
|
||||
struct boot_resources bootres;
|
||||
|
||||
memset(&bootres, 0, sizeof(bootres));
|
||||
|
||||
init_boot_resources(&bootres, kcont);
|
||||
setup_boot_resources(&bootres, kcont);
|
||||
|
||||
init_resource_allocators(&bootres, kcont);
|
||||
|
||||
free_boot_memory(&bootres, kcont);
|
||||
/* Create system containers */
|
||||
setup_containers(&bootres, kcont);
|
||||
|
||||
/* Create capabilities */
|
||||
setup_capabilities(&bootres, kcont);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/lib/spinlock.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
@@ -23,9 +25,8 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
|
||||
|
||||
static struct runqueue sched_rq[SCHED_RQ_TOTAL];
|
||||
static struct runqueue *rq_runnable, *rq_expired;
|
||||
static int prio_total; /* Total priority of all tasks */
|
||||
//static struct runqueue *rq_runnable, *rq_expired;
|
||||
//static int prio_total; /* Total priority of all tasks */
|
||||
|
||||
/* This is incremented on each irq or voluntarily by preempt_disable() */
|
||||
extern unsigned int current_irq_nest_count;
|
||||
@@ -35,14 +36,14 @@ static int voluntary_preempt = 0;
|
||||
|
||||
void sched_lock_runqueues(void)
|
||||
{
|
||||
spin_lock(&sched_rq[0].lock);
|
||||
spin_lock(&sched_rq[1].lock);
|
||||
spin_lock(&curcont->scheduler.sched_rq[0].lock);
|
||||
spin_lock(&curcont->scheduler.sched_rq[1].lock);
|
||||
}
|
||||
|
||||
void sched_unlock_runqueues(void)
|
||||
{
|
||||
spin_unlock(&sched_rq[0].lock);
|
||||
spin_unlock(&sched_rq[1].lock);
|
||||
spin_unlock(&curcont->scheduler.sched_rq[0].lock);
|
||||
spin_unlock(&curcont->scheduler.sched_rq[1].lock);
|
||||
}
|
||||
|
||||
int preemptive()
|
||||
@@ -105,17 +106,21 @@ void idle_task(void)
|
||||
while(1);
|
||||
}
|
||||
|
||||
void sched_init_runqueues(void)
|
||||
void sched_init_runqueue(struct runqueue *rq)
|
||||
{
|
||||
for (int i = 0; i < SCHED_RQ_TOTAL; i++) {
|
||||
memset(&sched_rq[i], 0, sizeof(struct runqueue));
|
||||
link_init(&sched_rq[i].task_list);
|
||||
spin_lock_init(&sched_rq[i].lock);
|
||||
}
|
||||
memset(rq, 0, sizeof(struct runqueue));
|
||||
link_init(&rq->task_list);
|
||||
spin_lock_init(&rq->lock);
|
||||
}
|
||||
|
||||
rq_runnable = &sched_rq[0];
|
||||
rq_expired = &sched_rq[1];
|
||||
prio_total = 0;
|
||||
void sched_init(struct scheduler *scheduler)
|
||||
{
|
||||
for (int i = 0; i < SCHED_RQ_TOTAL; i++)
|
||||
sched_init_runqueue(&scheduler->sched_rq[i]);
|
||||
|
||||
scheduler->rq_runnable = &scheduler->sched_rq[0];
|
||||
scheduler->rq_expired = &scheduler->sched_rq[1];
|
||||
scheduler->prio_total = 0;
|
||||
}
|
||||
|
||||
/* Swap runnable and expired runqueues. */
|
||||
@@ -123,13 +128,14 @@ static void sched_rq_swap_runqueues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
BUG_ON(list_empty(&rq_expired->task_list));
|
||||
BUG_ON(rq_expired->total == 0);
|
||||
BUG_ON(list_empty(&curcont->scheduler.rq_expired->task_list));
|
||||
BUG_ON(curcont->scheduler.rq_expired->total == 0);
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = rq_runnable;
|
||||
rq_runnable = rq_expired;
|
||||
rq_expired = temp;
|
||||
temp = curcont->scheduler.rq_runnable;
|
||||
curcont->scheduler.rq_runnable =
|
||||
curcont->scheduler.rq_expired;
|
||||
curcont->scheduler.rq_expired = temp;
|
||||
}
|
||||
|
||||
/* Set policy on where to add tasks in the runqueue */
|
||||
@@ -202,7 +208,9 @@ void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
sched_rq_add_task(task,
|
||||
curcont->scheduler.rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
|
||||
@@ -215,7 +223,7 @@ void sched_resume_sync(struct ktcb *task)
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task, rq_runnable, RQ_ADD_FRONT);
|
||||
sched_rq_add_task(task, curcont->scheduler.rq_runnable, RQ_ADD_FRONT);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -228,8 +236,8 @@ void sched_suspend_sync(void)
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
prio_total -= current->priority;
|
||||
BUG_ON(prio_total <= 0);
|
||||
curcont->scheduler.prio_total -= current->priority;
|
||||
BUG_ON(curcont->scheduler.prio_total <= 0);
|
||||
preempt_enable();
|
||||
|
||||
/* Async wake up any waiters */
|
||||
@@ -243,8 +251,8 @@ void sched_suspend_async(void)
|
||||
sched_rq_remove_task(current);
|
||||
current->state = TASK_INACTIVE;
|
||||
current->flags &= ~TASK_SUSPENDING;
|
||||
prio_total -= current->priority;
|
||||
BUG_ON(prio_total <= 0);
|
||||
curcont->scheduler.prio_total -= current->priority;
|
||||
BUG_ON(curcont->scheduler.prio_total <= 0);
|
||||
|
||||
/* This will make sure we yield soon */
|
||||
preempt_enable();
|
||||
@@ -338,9 +346,13 @@ void schedule()
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current, rq_runnable, RQ_ADD_BEHIND);
|
||||
sched_rq_add_task(current,
|
||||
curcont->scheduler.rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(current, rq_expired, RQ_ADD_BEHIND);
|
||||
sched_rq_add_task(current,
|
||||
curcont->scheduler.rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -352,14 +364,16 @@ void schedule()
|
||||
wake_up_task(current, WAKEUP_INTERRUPT);
|
||||
|
||||
/* Determine the next task to be run */
|
||||
if (rq_runnable->total > 0) {
|
||||
next = link_to_struct(rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
if (curcont->scheduler.rq_runnable->total > 0) {
|
||||
next = link_to_struct(
|
||||
curcont->scheduler.rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
} else {
|
||||
if (rq_expired->total > 0) {
|
||||
if (curcont->scheduler.rq_expired->total > 0) {
|
||||
sched_rq_swap_runqueues();
|
||||
next = link_to_struct(rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
next = link_to_struct(
|
||||
curcont->scheduler.rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
} else {
|
||||
idle_task();
|
||||
}
|
||||
@@ -367,7 +381,7 @@ void schedule()
|
||||
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING) {
|
||||
prio_total += next->priority;
|
||||
curcont->scheduler.prio_total += next->priority;
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
}
|
||||
|
||||
@@ -378,7 +392,7 @@ void schedule()
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, prio_total);
|
||||
sched_recalc_ticks(next, curcont->scheduler.prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
@@ -392,25 +406,11 @@ void schedule()
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise pager as runnable for first-ever scheduling,
|
||||
* and start the scheduler.
|
||||
* Start the timer and switch to current task
|
||||
* for first-ever scheduling.
|
||||
*/
|
||||
void scheduler_start()
|
||||
{
|
||||
/* Initialise runqueues */
|
||||
sched_init_runqueues();
|
||||
|
||||
/* Initialise scheduler fields of pager */
|
||||
sched_init_task(current, TASK_PRIO_PAGER);
|
||||
|
||||
/* Add task to runqueue first */
|
||||
sched_rq_add_task(current, rq_runnable, RQ_ADD_FRONT);
|
||||
|
||||
/* Give it a kick-start tick and make runnable */
|
||||
current->ticks_left = 1;
|
||||
current->state = TASK_RUNNABLE;
|
||||
|
||||
/* Start the timer and switch */
|
||||
timer_start();
|
||||
switch_to_user(current);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/kmalloc.h>
|
||||
#include <l4/api/space.h>
|
||||
@@ -16,25 +17,23 @@
|
||||
#include <l4/lib/idpool.h>
|
||||
|
||||
|
||||
static struct address_space_list address_space_list;
|
||||
|
||||
void init_address_space_list(void)
|
||||
void init_address_space_list(struct address_space_list *space_list)
|
||||
{
|
||||
memset(&address_space_list, 0, sizeof(address_space_list));
|
||||
memset(space_list, 0, sizeof(*space_list));
|
||||
|
||||
mutex_init(&address_space_list.ref_lock);
|
||||
spin_lock_init(&address_space_list.list_lock);
|
||||
link_init(&address_space_list.list);
|
||||
mutex_init(&space_list->ref_lock);
|
||||
spin_lock_init(&space_list->list_lock);
|
||||
link_init(&space_list->list);
|
||||
}
|
||||
|
||||
void address_space_reference_lock()
|
||||
{
|
||||
mutex_lock(&address_space_list.ref_lock);
|
||||
mutex_lock(&curcont->space_list.ref_lock);
|
||||
}
|
||||
|
||||
void address_space_reference_unlock()
|
||||
{
|
||||
mutex_unlock(&address_space_list.ref_lock);
|
||||
mutex_unlock(&curcont->space_list.ref_lock);
|
||||
}
|
||||
|
||||
void address_space_attach(struct ktcb *tcb, struct address_space *space)
|
||||
@@ -47,33 +46,33 @@ struct address_space *address_space_find(l4id_t spid)
|
||||
{
|
||||
struct address_space *space;
|
||||
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
list_foreach_struct(space, &address_space_list.list, list) {
|
||||
spin_lock(&curcont->space_list.list_lock);
|
||||
list_foreach_struct(space, &curcont->space_list.list, list) {
|
||||
if (space->spid == spid) {
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
return space;
|
||||
}
|
||||
}
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void address_space_add(struct address_space *space)
|
||||
{
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
spin_lock(&curcont->space_list.list_lock);
|
||||
BUG_ON(!list_empty(&space->list));
|
||||
list_insert(&space->list, &address_space_list.list);
|
||||
BUG_ON(!++address_space_list.count);
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
list_insert(&space->list, &curcont->space_list.list);
|
||||
BUG_ON(!++curcont->space_list.count);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
}
|
||||
|
||||
void address_space_remove(struct address_space *space)
|
||||
{
|
||||
spin_lock(&address_space_list.list_lock);
|
||||
spin_lock(&curcont->space_list.list_lock);
|
||||
BUG_ON(list_empty(&space->list));
|
||||
BUG_ON(--address_space_list.count < 0);
|
||||
BUG_ON(--curcont->space_list.count < 0);
|
||||
list_remove_init(&space->list);
|
||||
spin_unlock(&address_space_list.list_lock);
|
||||
spin_unlock(&curcont->space_list.list_lock);
|
||||
}
|
||||
|
||||
/* Assumes address space reflock is already held */
|
||||
@@ -98,12 +97,12 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
int err;
|
||||
|
||||
/* Allocate space structure */
|
||||
if (!(space = kzalloc(sizeof(*space))))
|
||||
if (!(space = alloc_space()))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
/* Allocate pgd */
|
||||
if (!(pgd = alloc_pgd())) {
|
||||
kfree(space);
|
||||
free_space(space);
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -120,7 +119,7 @@ struct address_space *address_space_create(struct address_space *orig)
|
||||
* is not allowed since spid field is used to indicate the space to
|
||||
* copy from.
|
||||
*/
|
||||
space->spid = id_new(space_id_pool);
|
||||
space->spid = id_new(&kernel_container.space_ids);
|
||||
|
||||
/* If an original space is supplied */
|
||||
if (orig) {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/scheduler.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include <l4/generic/preempt.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/lib/idpool.h>
|
||||
@@ -20,17 +21,18 @@ struct id_pool *thread_id_pool;
|
||||
struct id_pool *space_id_pool;
|
||||
|
||||
|
||||
static struct ktcb_list ktcb_list;
|
||||
|
||||
void init_ktcb_list(void)
|
||||
void init_ktcb_list(struct ktcb_list *ktcb_list)
|
||||
{
|
||||
memset(&ktcb_list, 0, sizeof(ktcb_list));
|
||||
spin_lock_init(&ktcb_list.list_lock);
|
||||
link_init(&ktcb_list.list);
|
||||
memset(ktcb_list, 0, sizeof(*ktcb_list));
|
||||
spin_lock_init(&ktcb_list->list_lock);
|
||||
link_init(&ktcb_list->list);
|
||||
}
|
||||
|
||||
void tcb_init(struct ktcb *new)
|
||||
{
|
||||
new->tid = id_new(&kernel_container.ktcb_ids);
|
||||
new->tgid = new->tid;
|
||||
|
||||
link_init(&new->task_list);
|
||||
mutex_init(&new->thread_control_lock);
|
||||
|
||||
@@ -46,7 +48,7 @@ void tcb_init(struct ktcb *new)
|
||||
|
||||
struct ktcb *tcb_alloc(void)
|
||||
{
|
||||
return zalloc_page();
|
||||
return alloc_ktcb();
|
||||
}
|
||||
|
||||
struct ktcb *tcb_alloc_init(void)
|
||||
@@ -93,21 +95,21 @@ void tcb_delete(struct ktcb *tcb)
|
||||
id_del(thread_id_pool, tcb->tid);
|
||||
|
||||
/* Free the tcb */
|
||||
free_page(tcb);
|
||||
free_ktcb(tcb);
|
||||
}
|
||||
|
||||
struct ktcb *tcb_find_by_space(l4id_t spid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &ktcb_list.list, task_list) {
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
|
||||
if (task->space->spid == spid) {
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -115,33 +117,33 @@ struct ktcb *tcb_find(l4id_t tid)
|
||||
{
|
||||
struct ktcb *task;
|
||||
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &ktcb_list.list, task_list) {
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
list_foreach_struct(task, &curcont->ktcb_list.list, task_list) {
|
||||
if (task->tid == tid) {
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tcb_add(struct ktcb *new)
|
||||
{
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
BUG_ON(!list_empty(&new->task_list));
|
||||
BUG_ON(!++ktcb_list.count);
|
||||
list_insert(&new->task_list, &ktcb_list.list);
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
BUG_ON(!++curcont->ktcb_list.count);
|
||||
list_insert(&new->task_list, &curcont->ktcb_list.list);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
void tcb_remove(struct ktcb *new)
|
||||
{
|
||||
spin_lock(&ktcb_list.list_lock);
|
||||
spin_lock(&curcont->ktcb_list.list_lock);
|
||||
BUG_ON(list_empty(&new->task_list));
|
||||
BUG_ON(--ktcb_list.count < 0);
|
||||
BUG_ON(--curcont->ktcb_list.count < 0);
|
||||
list_remove_init(&new->task_list);
|
||||
spin_unlock(&ktcb_list.list_lock);
|
||||
spin_unlock(&curcont->ktcb_list.list_lock);
|
||||
}
|
||||
|
||||
/* Offsets for ktcb fields that are accessed from assembler */
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <l4/generic/tcb.h>
|
||||
#include <l4/generic/bootmem.h>
|
||||
#include <l4/generic/resource.h>
|
||||
#include <l4/generic/container.h>
|
||||
#include INC_ARCH(linker.h)
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_ARCH(bootdesc.h)
|
||||
@@ -256,6 +257,7 @@ void switch_to_user(struct ktcb *task)
|
||||
jump(task);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Initialize the pager in the system.
|
||||
*
|
||||
@@ -328,6 +330,7 @@ void init_pager(char *name, struct task_ids *ids)
|
||||
|
||||
/* Scheduler initialises the very first task itself */
|
||||
}
|
||||
#endif
|
||||
|
||||
void init_tasks()
|
||||
{
|
||||
@@ -341,9 +344,9 @@ void init_tasks()
|
||||
ids.tgid = ids.tid;
|
||||
|
||||
/* Initialise the global task and address space lists */
|
||||
init_ktcb_list();
|
||||
init_address_space_list();
|
||||
init_mutex_queue_head();
|
||||
//init_ktcb_list();
|
||||
//init_address_space_list();
|
||||
//init_mutex_queue_head();
|
||||
|
||||
printk("%s: Initialized. Starting %s as pager.\n",
|
||||
__KERNELNAME__, __PAGERNAME__);
|
||||
@@ -351,7 +354,7 @@ void init_tasks()
|
||||
* This must come last so that other tasks can copy its pgd before it
|
||||
* modifies it for its own specifics.
|
||||
*/
|
||||
init_pager(__PAGERNAME__, &ids);
|
||||
// init_pager(__PAGERNAME__, &ids);
|
||||
}
|
||||
|
||||
void start_kernel(void)
|
||||
@@ -381,20 +384,14 @@ void start_kernel(void)
|
||||
/* Move the initial pgd into a more convenient place, mapped as pages. */
|
||||
// relocate_page_tables();
|
||||
|
||||
/* Evaluate system resources and set up resource pools */
|
||||
init_system_resources(&kernel_container);
|
||||
|
||||
/* Initialise memory allocators */
|
||||
paging_init();
|
||||
|
||||
/* Initialise kip and map for userspace access */
|
||||
kip_init();
|
||||
|
||||
/* Initialise system call page */
|
||||
syscall_init();
|
||||
|
||||
/* Setup inittask's ktcb and push it to scheduler runqueue */
|
||||
init_tasks();
|
||||
/* Evaluate system resources and set up resource pools */
|
||||
init_system_resources(&kernel_container);
|
||||
|
||||
/* Start the scheduler with available tasks in the runqueue */
|
||||
scheduler_start();
|
||||
|
||||
@@ -39,6 +39,12 @@ unsigned int space_flags_to_ptflags(unsigned int flags)
|
||||
BUG(); return 0;
|
||||
}
|
||||
|
||||
void task_init_registers(struct ktcb *task, unsigned long pc)
|
||||
{
|
||||
task->context.pc = (u32)pc;
|
||||
task->context.spsr = ARM_MODE_USR;
|
||||
}
|
||||
|
||||
/* Sets up struct page array and the physical memory descriptor. */
|
||||
void paging_init(void)
|
||||
{
|
||||
|
||||
@@ -126,6 +126,7 @@ int mem_cache_bufsize(void *start, int struct_size, int nstructs, int aligned)
|
||||
return start_address - (unsigned long)start;
|
||||
}
|
||||
|
||||
#if 0
|
||||
struct mem_cache *mem_cache_init(void *bufstart,
|
||||
int cache_size,
|
||||
int struct_size,
|
||||
@@ -214,5 +215,92 @@ struct mem_cache *mem_cache_init(void *bufstart,
|
||||
|
||||
return cache;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct mem_cache *mem_cache_init(void *bufstart, int cache_size,
|
||||
int struct_size, unsigned int aligned)
|
||||
{
|
||||
void *start;
|
||||
struct mem_cache *cache;
|
||||
unsigned int area_start;
|
||||
unsigned int *bitmap;
|
||||
int bwords, total, bsize;
|
||||
|
||||
/* Align to nearest word boundary */
|
||||
start = (void *)align_up(bufstart, sizeof(int));
|
||||
cache_size -= (int)start - (int)bufstart;
|
||||
cache = start;
|
||||
|
||||
if ((struct_size < 0) || (cache_size < 0) ||
|
||||
((unsigned long)start == ~(0))) {
|
||||
printk("Invalid parameters.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The cache definition itself is at the beginning.
|
||||
* Skipping it to get to start of free memory. i.e. the cache.
|
||||
*/
|
||||
area_start = (unsigned long)start + sizeof(struct mem_cache);
|
||||
cache_size -= sizeof(struct mem_cache);
|
||||
|
||||
if (cache_size < struct_size) {
|
||||
printk("Cache too small for given struct_size\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get how much bitmap words occupy */
|
||||
total = cache_size / struct_size;
|
||||
bwords = total >> 5; /* Divide by 32 */
|
||||
if (total & 0x1F) { /* Remainder? */
|
||||
bwords++; /* Add one more word for remainder */
|
||||
}
|
||||
bsize = bwords * 4;
|
||||
|
||||
/* Reduce bitmap bytes from cache size */
|
||||
cache_size -= bsize;
|
||||
|
||||
/* Recalculate total - it may or may not have changed */
|
||||
total = cache_size / struct_size;
|
||||
|
||||
/* This should always catch too small caches */
|
||||
if (total <= 0) {
|
||||
printk("Cache too small for given struct_size\n");
|
||||
return 0;
|
||||
}
|
||||
if (cache_size <= 0) {
|
||||
printk("Cache too small for given struct_size\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
bitmap = (unsigned int *)area_start;
|
||||
area_start = (unsigned int)(bitmap + bwords);
|
||||
|
||||
if (aligned) {
|
||||
unsigned int addr = area_start;
|
||||
unsigned int addr_aligned = align_up(area_start, struct_size);
|
||||
unsigned int diff = addr_aligned - addr;
|
||||
|
||||
BUG_ON(diff >= struct_size);
|
||||
cache_size -= diff;
|
||||
|
||||
/* Recalculate total */
|
||||
total = cache_size / struct_size;
|
||||
area_start = addr_aligned;
|
||||
}
|
||||
|
||||
link_init(&cache->list);
|
||||
cache->start = area_start;
|
||||
cache->end = area_start + cache_size;
|
||||
cache->total = total;
|
||||
cache->free = cache->total;
|
||||
cache->struct_size = struct_size;
|
||||
cache->bitmap = bitmap;
|
||||
|
||||
mutex_init(&cache->mutex);
|
||||
memset(cache->bitmap, 0, bwords*SZ_WORD);
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user