mirror of
https://github.com/drasko/codezero.git
synced 2026-01-11 18:33:16 +01:00
Added preliminary support for execve(). Updates to clone, fork, exit, task handling.
It turned out we used one version of kmalloc for malloc() and another for kfree()! Now fixed. Added parent-child relationship to tasks. Need to polish handling CLONE_PARENT and THREAD.
This commit is contained in:
3
LICENSE
3
LICENSE
@@ -4,7 +4,8 @@ license below and this version only, unless it is stated otherwise as
|
||||
part of the file. To be more precise, for every source file where it
|
||||
says: "Copyright (C) 2007, 2008 Bahadir Balban" or a similar wording
|
||||
(capitalisation, years or the format of the name may change), the below
|
||||
license has effect. For questions please contact me on bbalban@b-labs.co.uk
|
||||
license has effect. Any other source file may or may not be inclusive.
|
||||
For questions please contact me on bbalban@b-labs.co.uk
|
||||
|
||||
Bahadir Balban
|
||||
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
#ifndef __API_SPACE_H__
|
||||
#define __API_SPACE_H__
|
||||
|
||||
#define UNMAP_ALL_SPACE 0xFFFFFFFF
|
||||
|
||||
enum space_control_opcode {
|
||||
SPCCTRL_SHM = 0
|
||||
};
|
||||
|
||||
#if 0
|
||||
struct shm_kdata {
|
||||
l4id_t creator;
|
||||
unsigned long npages;
|
||||
unsigned long server_pfn;
|
||||
unsigned long client_pfn;
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* __API_SPACE_H__ */
|
||||
|
||||
@@ -117,6 +117,7 @@ void add_mapping(unsigned int paddr, unsigned int vaddr,
|
||||
unsigned int size, unsigned int flags);
|
||||
int remove_mapping(unsigned long vaddr);
|
||||
int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd);
|
||||
int remove_mapping_pgd_all_user(pgd_table_t *pgd);
|
||||
void prealloc_phys_pagedesc(void);
|
||||
|
||||
int check_mapping_pgd(unsigned long vaddr, unsigned long size,
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include INC_API(syscall.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include <l4/api/errno.h>
|
||||
#include <l4/api/space.h>
|
||||
|
||||
/* NOTE:
|
||||
* For lazy mm switching, a list of newly created mappings that are common to
|
||||
@@ -58,6 +59,16 @@ int sys_unmap(syscall_context_t *regs)
|
||||
else if (!(target = find_task(tid)))
|
||||
return -ESRCH;
|
||||
|
||||
/*
|
||||
* These special values mean unmap all the mappings
|
||||
* from task space except the kernel mappings
|
||||
*/
|
||||
if (virtual == UNMAP_ALL_SPACE &&
|
||||
npages == UNMAP_ALL_SPACE) {
|
||||
remove_mapping_pgd_all_user(target->pgd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < npages; i++) {
|
||||
ret = remove_mapping_pgd(virtual + i * PAGE_SIZE, target->pgd);
|
||||
if (ret)
|
||||
|
||||
@@ -76,6 +76,10 @@ int thread_destroy(struct task_ids *ids)
|
||||
*/
|
||||
BUG_ON(task->wqh_pager.sleepers > 0);
|
||||
|
||||
/*
|
||||
* FIXME: We need to free the pgd and any thread specific pmds!!!
|
||||
*/
|
||||
|
||||
/* We can now safely delete the task */
|
||||
free_page(task);
|
||||
|
||||
|
||||
@@ -300,6 +300,57 @@ int __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell if a pgd index is a common kernel index. This is used to distinguish
|
||||
* common kernel entries in a pgd, when copying page tables.
|
||||
*/
|
||||
int is_kern_pgdi(int i)
|
||||
{
|
||||
if ((i >= PGD_INDEX(KERNEL_AREA_START) && i < PGD_INDEX(KERNEL_AREA_END)) ||
|
||||
(i >= PGD_INDEX(IO_AREA_START) && i < PGD_INDEX(IO_AREA_END)) ||
|
||||
(i == PGD_INDEX(USER_KIP_PAGE)) ||
|
||||
(i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
|
||||
(i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
|
||||
(i == PGD_INDEX(USERSPACE_UART_BASE)))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes all userspace mappings from a pgd. Frees any pmds that it
|
||||
* detects to be user pmds
|
||||
*/
|
||||
int remove_mapping_pgd_all_user(pgd_table_t *pgd)
|
||||
{
|
||||
pmd_table_t *pmd;
|
||||
|
||||
/* Traverse through all pgd entries */
|
||||
for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
|
||||
|
||||
/* Detect a pgd entry that is not a kernel entry */
|
||||
if (!is_kern_pgdi(i)) {
|
||||
|
||||
/* Detect a pmd entry */
|
||||
if (((pgd->entry[i] & PGD_TYPE_MASK)
|
||||
== PGD_TYPE_COARSE)) {
|
||||
|
||||
/* Obtain the user pmd handle */
|
||||
pmd = (pmd_table_t *)
|
||||
phys_to_virt((pgd->entry[i] &
|
||||
PGD_COARSE_ALIGN_MASK));
|
||||
/* Free it */
|
||||
free_pmd(pmd);
|
||||
}
|
||||
|
||||
/* Clear the pgd entry */
|
||||
pgd->entry[i] = PGD_TYPE_FAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
|
||||
{
|
||||
pgd_t pgd_i = PGD_INDEX(vaddr);
|
||||
@@ -367,23 +418,6 @@ int remove_mapping(unsigned long vaddr)
|
||||
return remove_mapping_pgd(vaddr, current->pgd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell if a pgd index is a common kernel index. This is used to distinguish
|
||||
* common kernel entries in a pgd, when copying page tables.
|
||||
*/
|
||||
int is_kern_pgdi(int i)
|
||||
{
|
||||
if ((i >= PGD_INDEX(KERNEL_AREA_START) && i < PGD_INDEX(KERNEL_AREA_END)) ||
|
||||
(i >= PGD_INDEX(IO_AREA_START) && i < PGD_INDEX(IO_AREA_END)) ||
|
||||
(i == PGD_INDEX(USER_KIP_PAGE)) ||
|
||||
(i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
|
||||
(i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
|
||||
(i == PGD_INDEX(USERSPACE_UART_BASE)))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates and copies all levels of page tables from one task to another.
|
||||
* Useful when forking.
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <stdio.h>
|
||||
#include <fs.h>
|
||||
#include <task.h>
|
||||
#include <vfs.h>
|
||||
|
||||
const char *pathdata_next_component(struct pathdata *pdata)
|
||||
{
|
||||
@@ -90,8 +91,11 @@ struct pathdata *pathdata_parse(const char *pathname,
|
||||
comp->str = VFS_STR_ROOTDIR;
|
||||
list_add_tail(&comp->list, &pdata->list);
|
||||
|
||||
/* Lookup start vnode is root vnode */
|
||||
pdata->vstart = task->fs_data->rootdir;
|
||||
if (task)
|
||||
/* Lookup start vnode is root vnode */
|
||||
pdata->vstart = task->fs_data->rootdir;
|
||||
else /* If no task, we use the root mountpoint pivot vnode */
|
||||
pdata->vstart = vfs_root.pivot;
|
||||
|
||||
/* Otherwise start from current directory */
|
||||
} else {
|
||||
|
||||
@@ -1,435 +0,0 @@
|
||||
/*
|
||||
* Simple linked-list based kernel memory allocator.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <l4/config.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/types.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <mm/alloc_page.h>
|
||||
|
||||
/* Initial free area descriptor.
|
||||
*
|
||||
* Basic description of how free areas are tracked:
|
||||
*
|
||||
* A km_area marked with pg_alloc_pages means it is located at the beginning
|
||||
* of a new page allocation, and it is the first struct to describe those
|
||||
* allocated page(s).
|
||||
*
|
||||
* If, for all subpage_areas, pg_alloc_pages = {SA, SB, ..., SZ}, and `fragments
|
||||
* of pg_alloc_pages' = {sa(n), sb(n), ..., sz(n)} where n is the sequence number
|
||||
* of that fragment, and for each SX, SX = sx(1), and "->" denotes "next"
|
||||
* pointer relationship, on a random occasion, the areas could look like this:
|
||||
*
|
||||
* SA->sa(2)->sa(3)->SB->sb(2)->SC->SD->SE->se(2)->se(3)->se(4)
|
||||
*
|
||||
* With regard to all alloc/free functions defined below, in this example's
|
||||
* context, sa(1..3) can merge if any adjacent pair of them are free. Whereas if
|
||||
* adjacent(SC,SD) were true, SC and SD cannot be merged even if they are both
|
||||
* free, because they are pg_alloc_pages. Also, for each SX, it can be freed IFF
|
||||
* it is the only element in SX, and it is free. For instance, each of SC or SD
|
||||
* can be individually freed, provided they are marked unused.
|
||||
*
|
||||
* We could have used a bucket for each, e.g:
|
||||
*
|
||||
* SA->sa(2)->sa(3)
|
||||
* |
|
||||
* v
|
||||
* SB->sb(2)->sb(3)
|
||||
* |
|
||||
* v
|
||||
* SC
|
||||
* |
|
||||
* v
|
||||
* SD
|
||||
*
|
||||
* etc. But the original is simple enough for now and does the job.
|
||||
*
|
||||
*/
|
||||
|
||||
struct list_head km_area_start;
|
||||
|
||||
/*
|
||||
* Initialises a km_area descriptor according to the free area parameters
|
||||
* supplied along with it. @ppage = pointer to start of free memory.
|
||||
* @npages = number of pages the region contains. @km_areas = head of the list
|
||||
* of km_areas on the system that belongs to kmalloc.
|
||||
*/
|
||||
void kmalloc_add_new_pages(void *ppage, int npages, struct list_head *km_areas)
|
||||
{
|
||||
struct km_area *new = (struct km_area *)ppage;
|
||||
|
||||
new->vaddr = (unsigned long)ppage + sizeof(struct km_area);
|
||||
new->size = (npages * PAGE_SIZE) - sizeof(struct km_area);
|
||||
new->used = 0;
|
||||
new->pg_alloc_pages = npages;
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
|
||||
/*
|
||||
* The first entry is a pg_alloc_pages. Adding the new pg_alloc_pages
|
||||
* in tail ensures each pg_alloc_pages are adjacent, and their
|
||||
* children are never intermixed.
|
||||
*/
|
||||
list_add_tail(&new->list, km_areas);
|
||||
}
|
||||
|
||||
#define KM_INIT_PAGES 3
|
||||
void kmalloc_init()
|
||||
{
|
||||
/* Initially allocated pages with one big free km_area */
|
||||
void *ppage = l4_map_helper(alloc_page(KM_INIT_PAGES),
|
||||
KM_INIT_PAGES);
|
||||
struct km_area *new = (struct km_area *)ppage;
|
||||
|
||||
BUG_ON(!new);
|
||||
new->vaddr = (unsigned long)ppage + sizeof(struct km_area);
|
||||
new->size = (KM_INIT_PAGES * PAGE_SIZE)
|
||||
- sizeof(struct km_area);
|
||||
new->used = 0;
|
||||
new->pg_alloc_pages = KM_INIT_PAGES;
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
INIT_LIST_HEAD(&km_area_start);
|
||||
|
||||
/* Add the first area to the global list head */
|
||||
list_add(&new->list, &km_area_start);
|
||||
|
||||
/* NOTE: If needed, initialise mutex here */
|
||||
}
|
||||
|
||||
static struct km_area *
|
||||
find_free_km_area(int size, struct list_head *km_areas)
|
||||
{
|
||||
struct km_area *new;
|
||||
struct km_area *area;
|
||||
const unsigned long max = SZ_WORD - 1;
|
||||
int used= 0, unused = 0;
|
||||
|
||||
/* The minimum size needed if the area will be divided into two */
|
||||
int dividable = size + sizeof(struct km_area) + max;
|
||||
|
||||
list_for_each_entry (area, km_areas, list) {
|
||||
/* Is this a free region that fits? */
|
||||
if ((area->size) >= dividable && !area->used) {
|
||||
unsigned long addr, aligned;
|
||||
|
||||
/*
|
||||
* Cut the free area from the end, as much as
|
||||
* we want to use
|
||||
*/
|
||||
area->size -= size + sizeof(struct km_area);
|
||||
|
||||
addr = (area->vaddr + area->size);
|
||||
aligned = align(addr, SZ_WORD); /* Align by rewinding */
|
||||
used = addr - aligned; /* We rewinded this much bytes */
|
||||
unused = max - used;
|
||||
|
||||
/*
|
||||
* Reduce the extra bit that's rewinded for alignment
|
||||
* to original subpage
|
||||
*/
|
||||
area->size -= used;
|
||||
|
||||
/*
|
||||
* Allocate the new link structure at the end
|
||||
* of the free area shortened previously.
|
||||
*/
|
||||
new = (struct km_area *)aligned;
|
||||
|
||||
/*
|
||||
* Actual allocated memory starts after subpage
|
||||
* descriptor
|
||||
*/
|
||||
new->vaddr = (unsigned long)new
|
||||
+ sizeof(struct km_area);
|
||||
new->size = size + sizeof(struct km_area)
|
||||
+ used;
|
||||
new->used = 1;
|
||||
|
||||
/* Divides other allocated page(s) */
|
||||
new->pg_alloc_pages = 0;
|
||||
|
||||
/* Add used region to the page area list */
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
list_add(&new->list, &area->list);
|
||||
return new;
|
||||
|
||||
} else if (area->size < dividable &&
|
||||
area->size >= size && !area->used) {
|
||||
/*
|
||||
* Area not at dividable size but can satisfy request,
|
||||
* so it's simply returned.
|
||||
*/
|
||||
area->used = 1;
|
||||
return area;
|
||||
}
|
||||
}
|
||||
/* Traversed all areas and can't satisfy request. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a free list, finds a free region of requested size plus one subpage
|
||||
* area descriptor. Allocates and initialises the new descriptor, adds it to
|
||||
* the list and returns it.
|
||||
*/
|
||||
struct km_area *
|
||||
find_free_km_area_orig(int size, struct list_head *km_areas)
|
||||
{
|
||||
struct km_area *new;
|
||||
struct km_area *area;
|
||||
const unsigned long alignment_extra_max = SZ_WORD - 1;
|
||||
int alignment_used = 0, alignment_unused = 0;
|
||||
|
||||
/* The minimum size needed if the area will be divided into two */
|
||||
int dividable_size = size + sizeof(struct km_area)
|
||||
+ alignment_extra_max;
|
||||
|
||||
list_for_each_entry (area, km_areas, list) {
|
||||
/* Is this a free region that fits? */
|
||||
if ((area->size) >= dividable_size && !area->used) {
|
||||
unsigned long addr, addr_aligned;
|
||||
|
||||
/*
|
||||
* Cut the free area from the end, as much as
|
||||
* we want to use
|
||||
*/
|
||||
area->size -= size + sizeof(struct km_area);
|
||||
|
||||
addr = (area->vaddr + area->size);
|
||||
addr_aligned = align_up(addr, SZ_WORD);
|
||||
alignment_used = addr_aligned - addr;
|
||||
alignment_unused = alignment_extra_max
|
||||
- alignment_used;
|
||||
|
||||
/*
|
||||
* Add the extra bit that's skipped for alignment
|
||||
* to original subpage
|
||||
*/
|
||||
area->size += alignment_used;
|
||||
|
||||
/*
|
||||
* Allocate the new link structure at the end
|
||||
* of the free area shortened previously.
|
||||
*/
|
||||
new = (struct km_area *)addr_aligned;
|
||||
|
||||
/*
|
||||
* Actual allocated memory starts after subpage
|
||||
* descriptor
|
||||
*/
|
||||
new->vaddr = (unsigned long)new
|
||||
+ sizeof(struct km_area);
|
||||
new->size = size + sizeof(struct km_area)
|
||||
+ alignment_unused;
|
||||
new->used = 1;
|
||||
|
||||
/* Divides other allocated page(s) */
|
||||
new->pg_alloc_pages = 0;
|
||||
|
||||
/* Add used region to the page area list */
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
list_add(&new->list, &area->list);
|
||||
return new;
|
||||
|
||||
} else if (area->size < dividable_size &&
|
||||
area->size >= size && !area->used) {
|
||||
/*
|
||||
* Area not at dividable size but can satisfy request,
|
||||
* so it's simply returned.
|
||||
*/
|
||||
area->used = 1;
|
||||
return area;
|
||||
}
|
||||
}
|
||||
/* Traversed all areas and can't satisfy request. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate, initialise a km_area along with its free memory of minimum
|
||||
* size as @size, and add it to km_area list.
|
||||
*/
|
||||
static int
|
||||
kmalloc_get_free_pages(int size, struct list_head *km_areas)
|
||||
{
|
||||
int totalsize = size + sizeof(struct km_area) * 2;
|
||||
int npages = totalsize / PAGE_SIZE;
|
||||
void *ppage;
|
||||
|
||||
if (totalsize & PAGE_MASK)
|
||||
npages++;
|
||||
|
||||
if ((ppage = l4_map_helper(alloc_page(npages), npages)) == 0)
|
||||
/* TODO: Return specific error code, e.g. ENOMEM */
|
||||
return -1;
|
||||
|
||||
BUG_ON((npages * PAGE_SIZE) < (size + sizeof(struct km_area)));
|
||||
|
||||
kmalloc_add_new_pages(ppage, npages, km_areas);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Linked list based kernel memory allocator. This has the simplicity of
|
||||
* allocating list structures together with the requested memory area. This
|
||||
* can't be done with the page allocator, because it works in page-size chunks.
|
||||
* In kmalloc we can allocate more fine-grain sizes, so a link structure can
|
||||
* also be embedded together with requested data.
|
||||
*/
|
||||
|
||||
/* Allocates given @size, requests more free pages if free areas depleted. */
|
||||
void *kmalloc(int size)
|
||||
{
|
||||
struct km_area *new_area;
|
||||
void *allocation;
|
||||
|
||||
/* NOTE: If needed, lock mutex here */
|
||||
new_area = find_free_km_area(size, &km_area_start);
|
||||
if (!new_area) {
|
||||
if (kmalloc_get_free_pages(size, &km_area_start) < 0) {
|
||||
allocation = 0;
|
||||
goto out;
|
||||
}
|
||||
else
|
||||
new_area = find_free_km_area(size, &km_area_start);
|
||||
}
|
||||
BUG_ON(!new_area);
|
||||
allocation = (void *)new_area->vaddr;
|
||||
out:
|
||||
/* NOTE: If locked, unlock mutex here */
|
||||
return allocation;
|
||||
}
|
||||
|
||||
/* kmalloc with zero initialised memory */
|
||||
void *kzalloc(int size)
|
||||
{
|
||||
void *mem = kmalloc(size);
|
||||
if (mem)
|
||||
memset(mem, 0, size);
|
||||
return mem;
|
||||
}
|
||||
|
||||
void km_free_empty_pages(struct km_area *free_area)
|
||||
{
|
||||
unsigned long wholesize;
|
||||
|
||||
/* Not allocated from page allocator */
|
||||
if (!free_area->pg_alloc_pages)
|
||||
return;
|
||||
|
||||
/* The first km_area in memory from the page allocator: */
|
||||
|
||||
/* Must be on a page boundary */
|
||||
BUG_ON((unsigned long)free_area & PAGE_MASK);
|
||||
|
||||
/* Must be unused */
|
||||
BUG_ON(free_area->used);
|
||||
|
||||
/* Must be whole, (i.e. not divided into other km_areas) */
|
||||
wholesize = free_area->pg_alloc_pages * PAGE_SIZE;
|
||||
if ((free_area->size + sizeof(struct km_area)) < wholesize)
|
||||
return;
|
||||
|
||||
/* Must have at least PAGE_SIZE size, when itself included */
|
||||
BUG_ON(free_area->size < (PAGE_SIZE - sizeof(struct km_area)));
|
||||
|
||||
/* Its size must be a multiple of PAGE_SIZE, when itself included */
|
||||
if ((free_area->size + sizeof(struct km_area)) & PAGE_MASK) {
|
||||
printk("Error: free_area->size: 0x%lu, with km_area_struct:"
|
||||
" 0x%lu, PAGE_MASK: 0x%x\n", free_area->size,
|
||||
free_area->size + sizeof(struct km_area), PAGE_MASK);
|
||||
BUG();
|
||||
}
|
||||
list_del(&free_area->list);
|
||||
|
||||
/* And finally must be freed without problems */
|
||||
BUG_ON(free_page(l4_unmap_helper(free_area, __pfn(wholesize))) < 0);
|
||||
return;
|
||||
}
|
||||
|
||||
struct km_area *km_merge_free_areas(struct km_area *before,
|
||||
struct km_area *after)
|
||||
{
|
||||
|
||||
/*
|
||||
* If `after' has pg_alloc_pages set, it means it can't be merged and
|
||||
* has to be returned explicitly to the page allocator.
|
||||
*/
|
||||
if (after->pg_alloc_pages)
|
||||
return 0;
|
||||
|
||||
BUG_ON(before->vaddr + before->size != after->vaddr);
|
||||
BUG_ON(before->used || after->used)
|
||||
BUG_ON(before == after);
|
||||
|
||||
/*
|
||||
* km_area structures are at the beginning of the memory
|
||||
* areas they describe. By simply merging them with another
|
||||
* area they're effectively freed.
|
||||
*/
|
||||
before->size += after->size + sizeof(struct km_area);
|
||||
list_del(&after->list);
|
||||
return before;
|
||||
}
|
||||
|
||||
|
||||
int find_and_free_km_area(void *vaddr, struct list_head *areas)
|
||||
{
|
||||
struct km_area *area, *prev, *next, *merged;
|
||||
|
||||
if (!vaddr) /* A well-known invalid address */
|
||||
return -1;
|
||||
|
||||
list_for_each_entry(area, areas, list)
|
||||
if (area->vaddr == (unsigned long)vaddr && area->used)
|
||||
goto found;
|
||||
|
||||
/* Area not found */
|
||||
return -1;
|
||||
|
||||
found:
|
||||
|
||||
area->used = 0;
|
||||
|
||||
/* Now merge with adjacent areas if possible */
|
||||
if (area->list.prev != areas) {
|
||||
prev = list_entry(area->list.prev, struct km_area, list);
|
||||
if (!prev->used)
|
||||
if ((merged = km_merge_free_areas(prev, area)))
|
||||
area = merged;
|
||||
}
|
||||
if (area->list.next != areas) {
|
||||
next = list_entry(area->list.next, struct km_area, list);
|
||||
if (!next->used)
|
||||
if ((merged = km_merge_free_areas(area, next)))
|
||||
area = merged;
|
||||
}
|
||||
|
||||
/*
|
||||
* After freeing and all possible merging, try returning region back
|
||||
* to page allocator.
|
||||
*/
|
||||
km_free_empty_pages(area);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kfree(void *virtual)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* NOTE: If needed, lock mutex here */
|
||||
ret = find_and_free_km_area(virtual, &km_area_start);
|
||||
/* NOTE: If locked, unlock mutex here */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1,457 +0,0 @@
|
||||
/*
|
||||
* Kernel memory allocator.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <l4/config.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/types.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
#include INC_GLUE(memlayout.h)
|
||||
#include INC_SUBARCH(mm.h)
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <mm/alloc_page.h>
|
||||
|
||||
/* Initial free area descriptor.
|
||||
*
|
||||
* Basic description of how free areas are tracked:
|
||||
*
|
||||
* A subpage_area marked as head_of_pages means it is located at the beginning
|
||||
* of a new page allocation, and it is the first struct to describe those
|
||||
* allocated page(s).
|
||||
*
|
||||
* If, for all subpage_areas, head_of_pages = {SA, SB, ..., SZ}, and `fragments
|
||||
* of head_of_pages' = {sa(n), sb(n), ..., sz(n)} where n is the sequence number
|
||||
* of that fragment, and for each SX, SX = sx(1), and "->" denotes "next"
|
||||
* pointer relationship, on a random occasion, the areas could look like this:
|
||||
*
|
||||
* SA->sa(2)->sa(3)->SB->sb(2)->SC->SD->SE->se(2)->se(3)->se(4)
|
||||
*
|
||||
* With regard to all alloc/free functions defined below, in this example's
|
||||
* context, sa(1..3) can merge if any adjacent pair of them are free. Whereas if
|
||||
* adjacent(SC,SD) were true, SC and SD cannot be merged even if they are both
|
||||
* free, because they are head_of_pages. Also, for each SX, it can be freed IFF
|
||||
* it is the only element in SX, and it is free. For instance, each of SC or SD
|
||||
* can be individually freed, provided they are marked unused.
|
||||
*
|
||||
* We could have used a bucket for each, e.g:
|
||||
*
|
||||
* SA->sa(2)->sa(3)
|
||||
* |
|
||||
* v
|
||||
* SB->sb(2)->sb(3)
|
||||
* |
|
||||
* v
|
||||
* SC
|
||||
* |
|
||||
* v
|
||||
* SD
|
||||
*
|
||||
* etc. But the original is simple enough for now and does the job.
|
||||
*
|
||||
*/
|
||||
|
||||
struct subpage_area km_areas;
|
||||
|
||||
/* Initialises a subpage area descriptor according to the free area parameters
|
||||
* supplied along with it. @ppage = pointer to start of free memory.
|
||||
* @npages = number of pages the region contains. @areas = head of the list of
|
||||
* subpage_areas on the system that belongs to kmalloc. */
|
||||
void kmalloc_add_new_pages(void *ppage, int npages, struct subpage_area **areas)
|
||||
{
|
||||
struct subpage_area *new = (struct subpage_area *)ppage;
|
||||
|
||||
new->vaddr = (unsigned int)ppage + sizeof(struct subpage_area);
|
||||
new->size = (npages * PAGE_SIZE) - sizeof(struct subpage_area);
|
||||
new->used = 0;
|
||||
new->head_of_pages = npages;
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
|
||||
/* The first entry is a head_of_pages. Adding the new head_of_pages
|
||||
* in tail ensures each head_of_pages are adjacent, and their
|
||||
* children are never intermixed */
|
||||
list_add_tail(&new->list, &(*areas)->list);
|
||||
}
|
||||
|
||||
#define KMALLOC_INITIAL_PAGES 3
|
||||
void kmalloc_init()
|
||||
{
|
||||
/* Initially allocated pages with one big free km_area */
|
||||
void *ppage = alloc_page(KMALLOC_INITIAL_PAGES);
|
||||
|
||||
ppage = l4_map_helper(ppage, KMALLOC_INITIAL_PAGES);
|
||||
struct subpage_area *new = (struct subpage_area *)ppage;
|
||||
BUG_ON(!new);
|
||||
new->vaddr = (unsigned int)ppage + sizeof(struct subpage_area);
|
||||
new->size = (KMALLOC_INITIAL_PAGES * PAGE_SIZE)
|
||||
- sizeof(struct subpage_area);
|
||||
new->used = 0;
|
||||
new->head_of_pages = KMALLOC_INITIAL_PAGES;
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
/* Assign the first area to global list pointer */
|
||||
km_areas = new;
|
||||
|
||||
/* NOTE: If needed, initialise mutex here */
|
||||
}
|
||||
|
||||
/* Given a free list, finds a free region of requested size plus one subpage
|
||||
* area descriptor. Allocates and initialises the new descriptor, adds it to
|
||||
* the list and returns it.
|
||||
*/
|
||||
static struct subpage_area *
|
||||
find_free_subpage_area(int size, struct subpage_area **areas)
|
||||
{
|
||||
struct subpage_area *new;
|
||||
struct subpage_area *cur = *areas;
|
||||
const unsigned int alignment_extra_max = SZ_WORD - 1;
|
||||
unsigned int alignment_used = 0, alignment_unused = 0;
|
||||
|
||||
/* The minimum size needed if the area will be divided into two */
|
||||
int dividable_size = size + sizeof(struct subpage_area)
|
||||
+ alignment_extra_max;
|
||||
|
||||
/* Is this a free region that fits? */
|
||||
if ((cur->size) >= dividable_size && !cur->used) {
|
||||
unsigned int addr, addr_aligned;
|
||||
/* Cut the free area as much as we want to used */
|
||||
cur->size -= size + sizeof(struct subpage_area);
|
||||
|
||||
addr = (cur->vaddr + cur->size);
|
||||
addr_aligned = align_up(addr, SZ_WORD);
|
||||
alignment_used = addr_aligned - addr;
|
||||
alignment_unused = alignment_extra_max - alignment_used;
|
||||
|
||||
/* Add the extra bit that's skipped for alignment to original subpage */
|
||||
cur->size += alignment_used;
|
||||
|
||||
/* Allocate the new link structure at the end
|
||||
* of the free area shortened previously. */
|
||||
new = (struct subpage_area *)addr_aligned;
|
||||
|
||||
/* Actual allocated memory starts after subpage descriptor */
|
||||
new->vaddr = (unsigned int)new + sizeof(struct subpage_area);
|
||||
new->size = size + sizeof(struct subpage_area) + alignment_unused;
|
||||
new->used = 1;
|
||||
new->head_of_pages = 0; /* Divides other allocated page(s) */
|
||||
/* Add used region to the subpage_area list */
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
list_add(&new->list, &cur->list);
|
||||
return new;
|
||||
|
||||
} else if (cur->size < dividable_size &&
|
||||
cur->size >= size && !cur->used) {
|
||||
/* The area can't be divided, but has enough room for the
|
||||
* actual allocation, it just misses the few bytes for a
|
||||
* new subpage_area for splitting. In this case the current
|
||||
* page area is simply marked used and returned. This is a
|
||||
* rare but important case, because on-demand free page
|
||||
* allocations don't ensure new free areas are sufficiently
|
||||
* large to be divisable. */
|
||||
cur->used = 1;
|
||||
return cur;
|
||||
}
|
||||
/* Do the same for all other entries */
|
||||
list_for_each_entry (cur, &(*areas)->list, list) {
|
||||
/* Is this a free region that fits? */
|
||||
if ((cur->size) >= dividable_size && !cur->used) {
|
||||
unsigned int addr, addr_aligned;
|
||||
/* Cut the free area from the end, as much as
|
||||
* we want to use */
|
||||
cur->size -= size + sizeof(struct subpage_area);
|
||||
|
||||
addr = (cur->vaddr + cur->size);
|
||||
addr_aligned = align_up(addr, SZ_WORD);
|
||||
alignment_used = addr_aligned - addr;
|
||||
alignment_unused = alignment_extra_max
|
||||
- alignment_used;
|
||||
|
||||
/* Add the extra bit that's skipped for alignment
|
||||
* to original subpage */
|
||||
cur->size += alignment_used;
|
||||
|
||||
/* Allocate the new link structure at the end
|
||||
* of the free area shortened previously. */
|
||||
new = (struct subpage_area *)addr_aligned;
|
||||
|
||||
/* Actual allocated memory starts after subpage
|
||||
* descriptor */
|
||||
new->vaddr = (unsigned int)new
|
||||
+ sizeof(struct subpage_area);
|
||||
new->size = size + sizeof(struct subpage_area)
|
||||
+ alignment_unused;
|
||||
new->used = 1;
|
||||
/* Divides other allocated page(s) */
|
||||
new->head_of_pages = 0;
|
||||
/* Add used region to the page area list */
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
list_add(&new->list, &cur->list);
|
||||
return new;
|
||||
|
||||
} else if (cur->size < dividable_size &&
|
||||
cur->size >= size && !cur->used) {
|
||||
/* Area not at dividable size but can satisfy request,
|
||||
* so it's simply returned. */
|
||||
cur->used = 1;
|
||||
return cur;
|
||||
}
|
||||
}
|
||||
/* Traversed all areas and can't satisfy request. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate, initialise a subpage area along with its free
|
||||
* memory of minimum size as @size, and add it to subpage list. */
|
||||
static int
|
||||
kmalloc_get_free_pages(int size, struct subpage_area **areas)
|
||||
{
|
||||
int totalsize = size + sizeof(struct subpage_area) * 2;
|
||||
int npages = totalsize / PAGE_SIZE;
|
||||
void *ppage;
|
||||
|
||||
if (totalsize & PAGE_MASK)
|
||||
npages++;
|
||||
|
||||
if ((ppage = l4_map_helper(alloc_page(npages), npages))
|
||||
== 0)
|
||||
/* TODO: Return specific error code, e.g. ENOMEM */
|
||||
return -1;
|
||||
|
||||
BUG_ON((npages * PAGE_SIZE) < (size + sizeof(struct subpage_area)));
|
||||
|
||||
kmalloc_add_new_pages(ppage, npages, areas);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Linked list based subpage allocator. This has the simplicity of allocating
|
||||
* list structures together with the requested memory area. This can't be done
|
||||
* with the page allocator, because it works in page-size chunks. In kmalloc
|
||||
* we can allocate more fine-grain sizes, so a link structure can also be
|
||||
* embedded together with requested data.
|
||||
*/
|
||||
|
||||
/* Allocates given @size, requests more free pages if free areas depleted. */
|
||||
void *kmalloc(int size)
|
||||
{
|
||||
struct subpage_area *new_area;
|
||||
void *allocation;
|
||||
|
||||
/* NOTE: If needed, lock mutex here */
|
||||
new_area = find_free_subpage_area(size, &km_areas);
|
||||
if (!new_area) {
|
||||
if (kmalloc_get_free_pages(size, &km_areas) < 0) {
|
||||
allocation = 0;
|
||||
goto out;
|
||||
}
|
||||
else
|
||||
new_area = find_free_subpage_area(size, &km_areas);
|
||||
}
|
||||
BUG_ON(!new_area);
|
||||
allocation = (void *)new_area->vaddr;
|
||||
out:
|
||||
/* NOTE: If locked, unlock mutex here */
|
||||
return allocation;
|
||||
}
|
||||
|
||||
/* kmalloc with zero initialised memory */
|
||||
void *kzalloc(int size)
|
||||
{
|
||||
void *mem = kmalloc(size);
|
||||
if (mem)
|
||||
memset(mem, 0, size);
|
||||
return mem;
|
||||
}
|
||||
|
||||
void km_free_empty_pages(struct subpage_area *free_area,
|
||||
struct subpage_area **start)
|
||||
{
|
||||
unsigned int wholesize;
|
||||
if (!free_area->head_of_pages)
|
||||
return; /* Not allocated from page allocator */
|
||||
|
||||
if (free_area == *start)
|
||||
return; /* First subpage area is allocated at
|
||||
initialisation and never deallocated */
|
||||
|
||||
/* A head of page: */
|
||||
|
||||
/* Can't be the only element, start is always there. */
|
||||
BUG_ON(list_empty(&free_area->list));
|
||||
/* Must be on a page boundary */
|
||||
BUG_ON((unsigned int)free_area & PAGE_MASK);
|
||||
/* Must be unused */
|
||||
BUG_ON(free_area->used);
|
||||
|
||||
/* Furthermore, a head of page that can be freed must be whole:
|
||||
* Total number of pages when as a whole, is kept in [31:1] */
|
||||
wholesize = free_area->head_of_pages * PAGE_SIZE;
|
||||
if ((free_area->size + sizeof(struct subpage_area)) < wholesize)
|
||||
return;
|
||||
|
||||
/* Must have at least PAGE_SIZE size, when itself included */
|
||||
BUG_ON(free_area->size < (PAGE_SIZE - sizeof(struct subpage_area)));
|
||||
|
||||
/* Its size must be a multiple of PAGE_SIZE, when itself included */
|
||||
// BUG_ON((free_area->size + sizeof(struct subpage_area)) & PAGE_MASK);
|
||||
if ((free_area->size + sizeof(struct subpage_area)) & PAGE_MASK) {
|
||||
printk("Error: free_area->size: 0x%x, with subpage: 0x%x, PAGE_MASK: 0x%x\n",
|
||||
free_area->size, free_area->size + sizeof(struct subpage_area), PAGE_MASK);
|
||||
BUG();
|
||||
}
|
||||
list_del(&free_area->list);
|
||||
|
||||
/* And finally must be freed without problems */
|
||||
if (free_page(l4_unmap_helper(free_area, wholesize)) < 0)
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
km_merge_with_prev_subpage(struct subpage_area *start,
|
||||
struct subpage_area *this,
|
||||
struct subpage_area *prev)
|
||||
{
|
||||
BUG_ON(this == prev);
|
||||
BUG_ON(this->used);
|
||||
|
||||
/* Can't merge used and unused regions */
|
||||
if (prev->used)
|
||||
return 0;
|
||||
|
||||
/* At the beginning. this is head, prev is tail. Can't merge. */
|
||||
if (start == this)
|
||||
return 0;
|
||||
|
||||
/* Can't merge head descriptors of page allocations. They
|
||||
* are to be returned back to the page allocator on their own. */
|
||||
if (this->head_of_pages)
|
||||
return 0;
|
||||
|
||||
/* Subpage areas can be non-contiguous, if they are not a part of
|
||||
* the same page(s) allocation. This usually holds if prev and this
|
||||
* are fragments from the same page allocation. */
|
||||
if (prev->vaddr + prev->size != (unsigned int)this)
|
||||
return 0;
|
||||
|
||||
/* Remember that subpage_area structures are at the beginning of
|
||||
* the memory areas they describe. By simply merging them with
|
||||
* another area they're effectively freed. */
|
||||
prev->size += this->size + sizeof(struct subpage_area);
|
||||
list_del(&this->list);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
km_merge_with_next_subpage(struct subpage_area *start,
|
||||
struct subpage_area *this,
|
||||
struct subpage_area *next)
|
||||
{
|
||||
BUG_ON(this == next);
|
||||
BUG_ON(this->used);
|
||||
|
||||
/* At the end. this is tail, next is head. Can't merge. */
|
||||
if (start == next)
|
||||
return 0;
|
||||
|
||||
/* Can't merge used and unused regions */
|
||||
if (next->used)
|
||||
return 0;
|
||||
|
||||
/* Can't merge head descriptors of page allocations. They
|
||||
* are to be returned back to the page allocator on their own. */
|
||||
if (next->head_of_pages)
|
||||
return 0;
|
||||
|
||||
/* Subpage areas can be non-contiguous, if they are not a part of
|
||||
* the same head_of_page(s) allocation. This usually holds if next
|
||||
* and this are fragments from the same head_of_page. */
|
||||
if (this->vaddr + this->size != (unsigned int)next)
|
||||
return 0;
|
||||
|
||||
/* Remember that subpage_area structures are at the beginning of
|
||||
* the memory areas they describe. By simply merging them with
|
||||
* another area they're effectively freed. */
|
||||
this->size += next->size + sizeof(struct subpage_area);
|
||||
list_del(&next->list);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int find_and_free_subpage_area(void *vaddr, struct subpage_area **areas)
|
||||
{
|
||||
struct subpage_area *cur = *areas;
|
||||
|
||||
if (!vaddr) /* A well-known invalid address */
|
||||
return -1;
|
||||
|
||||
if (cur->vaddr == (unsigned int)vaddr) {
|
||||
struct subpage_area *prev, *next;
|
||||
BUG_ON(!cur->used);
|
||||
cur->used = 0;
|
||||
if (!list_empty(&cur->list)) {
|
||||
prev = list_entry(cur->list.prev,
|
||||
struct subpage_area,
|
||||
list);
|
||||
if (km_merge_with_prev_subpage(*areas, cur, prev))
|
||||
cur = prev;
|
||||
|
||||
if (!list_empty(&cur->list)) {
|
||||
/* Last merge did not reduce to last
|
||||
* element. */
|
||||
next = list_entry(cur->list.next,
|
||||
struct subpage_area,
|
||||
list);
|
||||
km_merge_with_next_subpage(*areas, cur, next);
|
||||
}
|
||||
}
|
||||
km_free_empty_pages(cur, areas);
|
||||
return 0;
|
||||
}
|
||||
list_for_each_entry(cur, &(*areas)->list, list) {
|
||||
if (cur->vaddr == (unsigned int)vaddr) {
|
||||
struct subpage_area *prev, *next;
|
||||
BUG_ON(!cur->used);
|
||||
cur->used = 0;
|
||||
if (!list_empty(&cur->list)) {
|
||||
prev = list_entry(cur->list.prev,
|
||||
struct subpage_area,
|
||||
list);
|
||||
if (km_merge_with_prev_subpage(*areas,
|
||||
cur, prev))
|
||||
cur = prev;
|
||||
|
||||
if (!list_empty(&cur->list)) {
|
||||
/* Last merge did not reduce to last
|
||||
* element. */
|
||||
next = list_entry(cur->list.next,
|
||||
struct subpage_area,
|
||||
list);
|
||||
km_merge_with_next_subpage(*areas, cur,
|
||||
next);
|
||||
}
|
||||
}
|
||||
/* After freeing and all possible merging, try
|
||||
* returning region back to page allocator. */
|
||||
km_free_empty_pages(cur, areas);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* TODO, Return a specific error code. Here, this is a
|
||||
* serious error. (Trying to free non-existing memory) */
|
||||
return -1;
|
||||
}
|
||||
|
||||
int kfree(void *vaddr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* NOTE: If needed, lock mutex here */
|
||||
ret = find_and_free_subpage_area(vaddr, &km_areas);
|
||||
/* NOTE: If locked, unlock mutex here */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
#ifndef __KMALLOC_H__
|
||||
#define __KMALLOC_H__
|
||||
|
||||
#include <mm/alloc_page.h>
|
||||
#include <l4/lib/list.h>
|
||||
|
||||
/*
|
||||
* List member to keep track of free and unused regions in subpages.
|
||||
* Smallest unit it represents is one byte, but note that it is also
|
||||
* used for describing regions that span across multiple pages.
|
||||
*/
|
||||
struct km_area {
|
||||
struct list_head list;
|
||||
unsigned long vaddr;
|
||||
unsigned long size;
|
||||
int used;
|
||||
int pg_alloc_pages; /* Means borrowed from alloc_page() */
|
||||
};
|
||||
|
||||
extern struct list_head km_area_start;
|
||||
|
||||
/* Kmalloc initialisation */
|
||||
void kmalloc_init(void);
|
||||
|
||||
/* Kmalloc allocation functions */
|
||||
void *kmalloc(int size) __attribute__((weak));
|
||||
void *kzalloc(int size) __attribute__((weak));
|
||||
int kfree(void *vaddr) __attribute__((weak));
|
||||
|
||||
#endif /* __KMALLOC_H__ */
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#ifndef __KMALLOC_H__
|
||||
#define __KMALLOC_H__
|
||||
|
||||
#include <mm/alloc_page.h>
|
||||
#include <l4/lib/list.h>
|
||||
/* List member to keep track of free and unused regions in subpages.
|
||||
* Smallest unit it represents is one byte, but note that it is also
|
||||
* used for describing regions that span across multiple pages. */
|
||||
struct subpage_area {
|
||||
struct list_head list;
|
||||
unsigned int vaddr;
|
||||
unsigned int size;
|
||||
unsigned int used;
|
||||
unsigned int head_of_pages; /* Means head of alloc_page() */
|
||||
};
|
||||
|
||||
extern struct subpage_area subpage_area_start;
|
||||
|
||||
/* Kmalloc initialisation */
|
||||
void kmalloc_init(void);
|
||||
|
||||
/* Kmalloc allocation functions */
|
||||
void *kmalloc(int size);
|
||||
void *kzalloc(int size);
|
||||
int kfree(void *vaddr);
|
||||
|
||||
#endif /* __KMALLOC_H__ */
|
||||
|
||||
28
tasks/mm0/include/boot.h
Normal file
28
tasks/mm0/include/boot.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef __BOOT_H__
|
||||
#define __BOOT_H__
|
||||
|
||||
#include <vm_area.h>
|
||||
#include <task.h>
|
||||
|
||||
/* Structures to use when sending new task information to vfs */
|
||||
struct task_data {
|
||||
unsigned long tid;
|
||||
unsigned long utcb_address;
|
||||
};
|
||||
|
||||
struct task_data_head {
|
||||
unsigned long total;
|
||||
struct task_data tdata[];
|
||||
};
|
||||
|
||||
int boottask_setup_regions(struct vm_file *file, struct tcb *task,
|
||||
unsigned long task_start, unsigned long task_end);
|
||||
|
||||
int boottask_mmap_regions(struct tcb *task, struct vm_file *file);
|
||||
|
||||
struct tcb *boottask_exec(struct vm_file *f, unsigned long task_region_start,
|
||||
unsigned long task_region_end, struct task_ids *ids);
|
||||
|
||||
int vfs_send_task_data(struct tcb *vfs);
|
||||
|
||||
#endif /* __BOOT_H__ */
|
||||
18
tasks/mm0/include/exec.h
Normal file
18
tasks/mm0/include/exec.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Definitions for executables
|
||||
*
|
||||
* Copyright (C) 2008 Bahadir Balban
|
||||
*/
|
||||
#ifndef __EXEC_H__
|
||||
#define __EXEC_H__
|
||||
|
||||
/*
|
||||
* This presents extra executable file information that is
|
||||
* not present in the tcb, in a generic format.
|
||||
*/
|
||||
struct exec_file_desc {
|
||||
unsigned long text_offset; /* File offset of text section */
|
||||
unsigned long data_offset; /* File offset of data section */
|
||||
};
|
||||
|
||||
#endif /* __EXEC_H__ */
|
||||
15
tasks/mm0/include/exit.h
Normal file
15
tasks/mm0/include/exit.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
* Definitions for do_exit() flags
|
||||
*
|
||||
* Copyright (C) 2008 Bahadir Balban
|
||||
*/
|
||||
|
||||
#ifndef __EXIT_H__
|
||||
#define __EXIT_H__
|
||||
|
||||
#define EXIT_THREAD_DESTROY (1 << 0)
|
||||
#define EXIT_UNMAP_ALL_SPACE (1 << 1)
|
||||
|
||||
|
||||
void do_exit(struct tcb *task, unsigned int flags, int status);
|
||||
#endif /* __EXIT_H__ */
|
||||
35
tasks/mm0/include/lib/elf/elfprg.h
Normal file
35
tasks/mm0/include/lib/elf/elfprg.h
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Definitions for ELF program headers
|
||||
* Based on Portable Formats Specification v1.1
|
||||
*
|
||||
* Copyright (C) 2008 Bahadir Balban
|
||||
*/
|
||||
#ifndef __ELFPRG_H__
|
||||
#define __ELFPRG_H__
|
||||
|
||||
#include <l4/types.h>
|
||||
|
||||
struct elf_program_header {
|
||||
u32 p_type; /* Type of segment */
|
||||
u32 p_offset; /* Segment file offset */
|
||||
u32 p_vaddr; /* Virtual start address */
|
||||
u32 p_paddr; /* Physical start address */
|
||||
u32 p_filesz; /* Size in stored file */
|
||||
u32 p_memsz; /* Size in memory image */
|
||||
u32 p_flags; /* Segment attributes */
|
||||
u32 p_align; /* Alignment requirement */
|
||||
} __attribute__((__packed__));
|
||||
|
||||
/* Program segment type definitions */
|
||||
#define PT_NULL 0
|
||||
#define PT_LOAD 1
|
||||
#define PT_DYNAMIC 2
|
||||
#define PT_INTERP 3
|
||||
#define PT_NOTE 4
|
||||
#define PT_SHLIB 5
|
||||
#define PT_PHDR 6
|
||||
#define PT_LOPROC 0x70000000
|
||||
#define PT_HIPROC 0x7FFFFFFF
|
||||
|
||||
|
||||
#endif /* __ELFPRG_H__ */
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
void *kmalloc(size_t size);
|
||||
void kfree(void *blk);
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
#ifndef __MM0_PROC__
|
||||
#define __MM0_PROC__
|
||||
|
||||
#include <vm_area.h>
|
||||
|
||||
struct proc_vm_objects {
|
||||
struct vm_object *stack; /* ZI, RO: devzero, RW: private */
|
||||
struct vm_object *env; /* NON-ZI, RO: private, RW: private */
|
||||
struct vm_object *data; /* NON-ZI, RO: shared, RW: private */
|
||||
struct vm_object *bss; /* ZI, RO: devzero, RW: private */
|
||||
};
|
||||
|
||||
int task_setup_vm_objects(struct tcb *t);
|
||||
|
||||
#endif
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <l4lib/utcb.h>
|
||||
#include <lib/addr.h>
|
||||
#include <l4/api/kip.h>
|
||||
#include <exec.h>
|
||||
|
||||
#define __TASKNAME__ __PAGERNAME__
|
||||
|
||||
@@ -31,7 +32,8 @@
|
||||
#define TCB_SHARED_VM (1 << 0)
|
||||
#define TCB_SHARED_FILES (1 << 1)
|
||||
#define TCB_SHARED_FS (1 << 2)
|
||||
#define TCB_SAME_GROUP (1 << 3)
|
||||
#define TCB_SHARED_TGROUP (1 << 3)
|
||||
#define TCB_SHARED_PARENT (1 << 4)
|
||||
|
||||
struct vm_file;
|
||||
|
||||
@@ -57,6 +59,14 @@ struct tcb {
|
||||
/* Task list */
|
||||
struct list_head list;
|
||||
|
||||
/* Fields for parent-child relations */
|
||||
struct list_head child_ref; /* Child ref in parent's list */
|
||||
struct list_head children; /* List of children */
|
||||
struct tcb *parent; /* Parent task */
|
||||
|
||||
/* Task creation flags */
|
||||
unsigned int clone_flags;
|
||||
|
||||
/* Name of the task */
|
||||
char name[16];
|
||||
|
||||
@@ -66,13 +76,14 @@ struct tcb {
|
||||
int tgid;
|
||||
|
||||
/* Related task ids */
|
||||
unsigned int pagerid; /* Task's pager */
|
||||
unsigned int pagerid; /* Task's pager */
|
||||
|
||||
/* Task's main address space region, usually USER_AREA_START/END */
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
|
||||
/* Page aligned program segment marks, ends exclusive as usual */
|
||||
unsigned long entry;
|
||||
unsigned long text_start;
|
||||
unsigned long text_end;
|
||||
unsigned long data_start;
|
||||
@@ -102,17 +113,6 @@ struct tcb {
|
||||
struct task_fd_head *files;
|
||||
};
|
||||
|
||||
/* Structures to use when sending new task information to vfs */
|
||||
struct task_data {
|
||||
unsigned long tid;
|
||||
unsigned long utcb_address;
|
||||
};
|
||||
|
||||
struct task_data_head {
|
||||
unsigned long total;
|
||||
struct task_data tdata[];
|
||||
};
|
||||
|
||||
struct tcb_head {
|
||||
struct list_head list;
|
||||
int total; /* Total threads */
|
||||
@@ -121,20 +121,16 @@ struct tcb_head {
|
||||
struct tcb *find_task(int tid);
|
||||
void global_add_task(struct tcb *task);
|
||||
void global_remove_task(struct tcb *task);
|
||||
int send_task_data(struct tcb *requester);
|
||||
void task_map_prefault_utcb(struct tcb *mapper, struct tcb *owner);
|
||||
int task_mmap_regions(struct tcb *task, struct vm_file *file);
|
||||
int task_setup_regions(struct vm_file *file, struct tcb *task,
|
||||
unsigned long task_start, unsigned long task_end);
|
||||
int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_desc *efd);
|
||||
int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
unsigned int sp, l4id_t pager);
|
||||
struct tcb *tcb_alloc_init(unsigned int flags);
|
||||
int tcb_destroy(struct tcb *task);
|
||||
struct tcb *task_exec(struct vm_file *f, unsigned long task_region_start,
|
||||
unsigned long task_region_end, struct task_ids *ids);
|
||||
int task_start(struct tcb *task, struct task_ids *ids);
|
||||
int task_start(struct tcb *task);
|
||||
int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags);
|
||||
int task_release_vmas(struct task_vma_head *vma_head);
|
||||
int task_prefault_regions(struct tcb *task, struct vm_file *f);
|
||||
struct tcb *task_create(struct tcb *orig,
|
||||
struct task_ids *ids,
|
||||
unsigned int ctrl_flags,
|
||||
|
||||
@@ -225,6 +225,7 @@ struct vm_object *vm_object_create(void);
|
||||
struct vm_file *vm_file_create(void);
|
||||
int vm_file_delete(struct vm_file *f);
|
||||
int vm_object_delete(struct vm_object *vmo);
|
||||
void vm_file_put(struct vm_file *f);
|
||||
|
||||
/* Printing objects, files */
|
||||
void vm_object_print(struct vm_object *vmo);
|
||||
@@ -235,6 +236,9 @@ void vm_print_files(struct list_head *file_list);
|
||||
int prefault_page(struct tcb *task, unsigned long address,
|
||||
unsigned int vmflags);
|
||||
struct page *page_init(struct page *page);
|
||||
struct page *find_page(struct vm_object *vmo, unsigned long page_offset);
|
||||
void *pager_map_page(struct vm_file *f, unsigned long page_offset);
|
||||
void pager_unmap_page(void *vaddr);
|
||||
|
||||
/* To get currently mapped page of a virtual address on a task */
|
||||
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual);
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <utcb.h>
|
||||
#include <mmap.h>
|
||||
#include <test.h>
|
||||
#include <boot.h>
|
||||
|
||||
void handle_requests(void)
|
||||
{
|
||||
@@ -67,7 +68,7 @@ void handle_requests(void)
|
||||
|
||||
case L4_IPC_TAG_TASKDATA:
|
||||
/* Send runnable task information to fs0 */
|
||||
ret = send_task_data(sender);
|
||||
ret = vfs_send_task_data(sender);
|
||||
break;
|
||||
|
||||
case L4_IPC_TAG_SHMGET: {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <mmap.h>
|
||||
#include <utcb.h>
|
||||
#include <shm.h>
|
||||
#include <test.h>
|
||||
#include <clone.h>
|
||||
|
||||
/*
|
||||
@@ -110,14 +111,15 @@ int do_clone(struct tcb *parent, unsigned long child_stack, unsigned int flags)
|
||||
ids.tid = TASK_ID_INVALID;
|
||||
ids.spid = parent->spid;
|
||||
|
||||
if (flags & TCB_SAME_GROUP)
|
||||
|
||||
/* Determine whether the cloned thread is in parent's thread group */
|
||||
if (flags & TCB_SHARED_TGROUP)
|
||||
ids.tgid = parent->tgid;
|
||||
else
|
||||
ids.tgid = TASK_ID_INVALID;
|
||||
|
||||
if (IS_ERR(child = task_create(parent, &ids, THREAD_SAME_SPACE, flags)))
|
||||
return (int)child;
|
||||
|
||||
/* Set up child stack marks with given stack argument */
|
||||
child->stack_end = child_stack;
|
||||
child->stack_start = 0;
|
||||
@@ -167,7 +169,9 @@ int sys_clone(struct tcb *parent, void *child_stack, unsigned int clone_flags)
|
||||
if (clone_flags & CLONE_FILES)
|
||||
flags |= TCB_SHARED_FILES;
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
flags |= TCB_SAME_GROUP;
|
||||
flags |= TCB_SHARED_TGROUP;
|
||||
if (clone_flags & CLONE_PARENT)
|
||||
flags |= TCB_SHARED_PARENT;
|
||||
|
||||
return do_clone(parent, (unsigned long)child_stack, flags);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
#include <l4/lib/list.h>
|
||||
#include <vm_area.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
|
||||
/*
|
||||
* This is yet unused, it is more of an anticipation
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
#include <vm_area.h>
|
||||
#include <syscalls.h>
|
||||
#include <string.h>
|
||||
#include <exec.h>
|
||||
#include <file.h>
|
||||
#include <user.h>
|
||||
#include <task.h>
|
||||
#include <exit.h>
|
||||
|
||||
/*
|
||||
* Different from vfs_open(), which validates an already opened
|
||||
@@ -67,40 +69,100 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
int do_execve(char *filename)
|
||||
/*
|
||||
* Probes and parses the low-level executable file format and creates a
|
||||
* generic execution description that can be used to run the task.
|
||||
*/
|
||||
int task_setup_from_executable(struct vm_file *vmfile, struct tcb *task,
|
||||
struct exec_file_desc *efd)
|
||||
{
|
||||
memset(efd, 0, sizeof(*efd));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int do_execve(struct tcb *sender, char *filename)
|
||||
{
|
||||
int err;
|
||||
unsigned long vnum, length;
|
||||
struct vm_file *f;
|
||||
struct vm_file *vmfile;
|
||||
struct exec_file_desc efd;
|
||||
struct tcb *new_task, *tgleader;
|
||||
int err;
|
||||
|
||||
/* Get file info from vfs */
|
||||
if ((err = vfs_open_bypath(filename, &vnum, &length)) < 0)
|
||||
return err;
|
||||
|
||||
/* Create and get the file structure */
|
||||
if (IS_ERR(f = do_open2(0, 0, vnum, length)))
|
||||
return (int)f;
|
||||
if (IS_ERR(vmfile = do_open2(0, 0, vnum, length)))
|
||||
return (int)vmfile;
|
||||
|
||||
/* Create a new tcb */
|
||||
if (IS_ERR(new_task = tcb_alloc_init(TCB_NO_SHARING))) {
|
||||
vm_file_put(vmfile);
|
||||
return (int)new_task;
|
||||
}
|
||||
|
||||
/* Determine file segments to be mapped */
|
||||
/* Fill in tcb memory segment markers from executable file */
|
||||
if ((err = task_setup_from_executable(vmfile, new_task, &efd)) < 0) {
|
||||
vm_file_put(vmfile);
|
||||
kfree(new_task);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* See if an interpreter (dynamic linker) is needed */
|
||||
|
||||
/* Destroy all threads in the same thread group except group leader */
|
||||
|
||||
/* Release all task resources, do almost everything done in exit() */
|
||||
/* Map task segment markers as virtual memory regions */
|
||||
if ((err = task_mmap_segments(new_task, vmfile, &efd)) < 0) {
|
||||
vm_file_put(vmfile);
|
||||
kfree(new_task);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create new process address space. Start by mapping all
|
||||
* static file segments. We will need brk() for bss.
|
||||
* If sender is a thread in a group, need to find the
|
||||
* group leader and destroy all threaded children in
|
||||
* the group.
|
||||
*/
|
||||
if (sender->clone_flags & TCB_SHARED_TGROUP) {
|
||||
struct tcb *thread;
|
||||
|
||||
/* Find the thread group leader of sender */
|
||||
BUG_ON(!(tgleader = find_task(sender->tgid)));
|
||||
|
||||
/*
|
||||
* Destroy all children threads.
|
||||
* TODO: Set up parents for children's children
|
||||
*/
|
||||
list_for_each_entry(thread, &tgleader->children, child_ref)
|
||||
do_exit(thread, EXIT_THREAD_DESTROY, 0);
|
||||
} else {
|
||||
/* Otherwise group leader is same as sender */
|
||||
tgleader = sender;
|
||||
}
|
||||
|
||||
/* Copy data to new task that is to be retained from exec'ing task */
|
||||
new_task->tid = tgleader->tid;
|
||||
new_task->spid = tgleader->spid;
|
||||
new_task->tgid = tgleader->tgid;
|
||||
new_task->pagerid = tgleader->pagerid;
|
||||
|
||||
/*
|
||||
* Release all task resources, do everything done in
|
||||
* exit() except destroying the actual thread.
|
||||
*/
|
||||
do_exit(tgleader, EXIT_UNMAP_ALL_SPACE, 0);
|
||||
|
||||
/* Set up task registers via exchange_registers() */
|
||||
task_setup_registers(new_task, 0, 0, new_task->pagerid);
|
||||
|
||||
/* Start the task */
|
||||
task_start(new_task);
|
||||
|
||||
#if 0
|
||||
TODO:
|
||||
Dynamic Linking.
|
||||
|
||||
/* See if an interpreter (dynamic linker) is needed */
|
||||
|
||||
/* Find the interpreter executable file, if needed */
|
||||
|
||||
/*
|
||||
@@ -113,9 +175,9 @@ Dynamic Linking.
|
||||
/* Run the interpreter */
|
||||
|
||||
/*
|
||||
* The interpreter:
|
||||
* - May need some initial info (dyn sym tables) at a certain location
|
||||
* - Will find necessary shared library files in userspace
|
||||
* The interpreter will:
|
||||
* - Need some initial info (dyn sym tables) at a certain location
|
||||
* - Find necessary shared library files in userspace
|
||||
* (will use open/read).
|
||||
* - Map them into process address space via mmap()
|
||||
* - Reinitialise references to symbols in the shared libraries
|
||||
@@ -206,7 +268,7 @@ int sys_execve(struct tcb *sender, char *pathname, char *argv[], char *envp[])
|
||||
return err;
|
||||
printf("%s: Copied pathname: %s\n", __FUNCTION__, path);
|
||||
|
||||
return do_execve(path);
|
||||
return do_execve(sender, path);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#include <task.h>
|
||||
#include <file.h>
|
||||
#include <utcb.h>
|
||||
#include <exit.h>
|
||||
#include <test.h>
|
||||
#include <vm_area.h>
|
||||
#include <syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
@@ -13,6 +15,7 @@
|
||||
#include <l4lib/exregs.h>
|
||||
#include <l4lib/ipcdefs.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <l4/api/space.h>
|
||||
|
||||
/*
|
||||
* Sends vfs task information about forked child, and its utcb
|
||||
@@ -65,7 +68,7 @@ int task_close_files(struct tcb *task)
|
||||
return err;
|
||||
}
|
||||
|
||||
void sys_exit(struct tcb *task, int status)
|
||||
void do_exit(struct tcb *task, unsigned int flags, int status)
|
||||
{
|
||||
struct task_ids ids = {
|
||||
.tid = task->tid,
|
||||
@@ -86,8 +89,14 @@ void sys_exit(struct tcb *task, int status)
|
||||
/* Free task's local tcb */
|
||||
tcb_destroy(task);
|
||||
|
||||
/* Ask the kernel to delete it from its records */
|
||||
l4_thread_control(THREAD_DESTROY, &ids);
|
||||
/* Ask the kernel to reset this thread's page tables */
|
||||
if (flags & EXIT_UNMAP_ALL_SPACE)
|
||||
l4_unmap((void *)UNMAP_ALL_SPACE,
|
||||
UNMAP_ALL_SPACE, task->tid);
|
||||
|
||||
/* Ask the kernel to delete the thread from its records */
|
||||
if (flags & EXIT_THREAD_DESTROY)
|
||||
l4_thread_control(THREAD_DESTROY, &ids);
|
||||
|
||||
/* TODO: Wake up any waiters about task's destruction */
|
||||
#if 0
|
||||
@@ -100,3 +109,8 @@ void sys_exit(struct tcb *task, int status)
|
||||
#endif
|
||||
}
|
||||
|
||||
void sys_exit(struct tcb *task, int status)
|
||||
{
|
||||
do_exit(task, EXIT_THREAD_DESTROY, status);
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#include <vm_area.h>
|
||||
#include <task.h>
|
||||
#include <mm/alloc_page.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
*/
|
||||
#include <init.h>
|
||||
#include <vm_area.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <mm/alloc_page.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include <l4lib/types.h>
|
||||
@@ -18,6 +19,7 @@
|
||||
#include <globals.h>
|
||||
#include <file.h>
|
||||
#include <user.h>
|
||||
#include <test.h>
|
||||
|
||||
/* Copy from one page's buffer into another page */
|
||||
int page_copy(struct page *dst, struct page *src,
|
||||
@@ -306,6 +308,27 @@ int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Maps a page from a vm_file to the pager's address space */
|
||||
void *pager_map_page(struct vm_file *f, unsigned long page_offset)
|
||||
{
|
||||
int err;
|
||||
struct page *p;
|
||||
|
||||
if ((err = read_file_pages(f, page_offset, page_offset + 1)) < 0)
|
||||
return PTR_ERR(err);
|
||||
|
||||
if ((p = find_page(&f->vm_obj, page_offset)))
|
||||
return (void *)l4_map_helper((void *)page_to_phys(p), 1);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unmaps a page's virtual address from the pager's address space */
|
||||
void pager_unmap_page(void *addr)
|
||||
{
|
||||
l4_unmap_helper(addr, 1);
|
||||
}
|
||||
|
||||
int vfs_write(unsigned long vnum, unsigned long file_offset,
|
||||
unsigned long npages, void *pagebuf)
|
||||
{
|
||||
@@ -456,6 +479,17 @@ int fsync_common(struct tcb *task, int fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vm_file_put(struct vm_file *file)
|
||||
{
|
||||
/* Reduce file's opener count */
|
||||
if (!(file->openers))
|
||||
/* No openers left, check any mappers */
|
||||
if (!file->vm_obj.nlinks)
|
||||
/* No links or openers, delete the file */
|
||||
vm_file_delete(file);
|
||||
|
||||
}
|
||||
|
||||
/* Closes the file descriptor and notifies vfs */
|
||||
int do_close(struct tcb *task, int fd)
|
||||
{
|
||||
@@ -466,12 +500,8 @@ int do_close(struct tcb *task, int fd)
|
||||
if ((err = vfs_close(task->tid, fd)) < 0)
|
||||
return err;
|
||||
|
||||
/* Reduce file's opener count */
|
||||
if (!(--task->files->fd[fd].vmfile->openers))
|
||||
/* No openers left, check any mappers */
|
||||
if (!task->files->fd[fd].vmfile->vm_obj.nlinks)
|
||||
/* No links or openers, delete the file */
|
||||
vm_file_delete(task->files->fd[fd].vmfile);
|
||||
/* Reduce file refcount etc. */
|
||||
vm_file_put(task->files->fd[fd].vmfile);
|
||||
|
||||
task->files->fd[fd].vnum = 0;
|
||||
task->files->fd[fd].cursor = 0;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <string.h>
|
||||
#include <memory.h>
|
||||
#include <mm/alloc_page.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
#include <l4lib/utcb.h>
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <init.h>
|
||||
#include <utcb.h>
|
||||
#include <test.h>
|
||||
#include <boot.h>
|
||||
|
||||
/* A separate list than the generic file list that keeps just the boot files */
|
||||
LIST_HEAD(boot_file_list);
|
||||
@@ -44,12 +45,16 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
|
||||
task->spid = ids->spid;
|
||||
task->tgid = ids->tgid;
|
||||
|
||||
if ((err = task_setup_regions(f, task, task_start, task_end)) < 0)
|
||||
if ((err = boottask_setup_regions(f, task, task_start, task_end)) < 0)
|
||||
return err;
|
||||
|
||||
if ((err = task_mmap_regions(task, f)) < 0)
|
||||
if ((err = boottask_mmap_regions(task, f)) < 0)
|
||||
return err;
|
||||
|
||||
/* Set pager as child and parent of itself */
|
||||
list_add(&task->child_ref, &task->children);
|
||||
task->parent = task;
|
||||
|
||||
/* Add the task to the global task list */
|
||||
global_add_task(task);
|
||||
|
||||
@@ -118,7 +123,7 @@ int start_boot_tasks(struct initdata *initdata)
|
||||
ids.tgid = VFS_TID;
|
||||
|
||||
printf("%s: Initialising fs0\n",__TASKNAME__);
|
||||
BUG_ON((IS_ERR(fs0_task = task_exec(fs0_file, USER_AREA_START, USER_AREA_END, &ids))));
|
||||
BUG_ON((IS_ERR(fs0_task = boottask_exec(fs0_file, USER_AREA_START, USER_AREA_END, &ids))));
|
||||
total++;
|
||||
|
||||
/* Initialise other tasks */
|
||||
@@ -128,7 +133,7 @@ int start_boot_tasks(struct initdata *initdata)
|
||||
ids.spid = TASK_ID_INVALID;
|
||||
ids.tgid = TASK_ID_INVALID;
|
||||
list_del_init(&file->list);
|
||||
BUG_ON(IS_ERR(task_exec(file, USER_AREA_START, USER_AREA_END, &ids)));
|
||||
BUG_ON(IS_ERR(boottask_exec(file, USER_AREA_START, USER_AREA_END, &ids)));
|
||||
total++;
|
||||
}
|
||||
|
||||
@@ -151,10 +156,6 @@ void init_mm(struct initdata *initdata)
|
||||
init_page_allocator(membank[0].free, membank[0].end);
|
||||
// printf("%s: Initialised page allocator.\n", __TASKNAME__);
|
||||
|
||||
/* Initialise the pager's memory allocator */
|
||||
kmalloc_init();
|
||||
// printf("%s: Initialised kmalloc.\n", __TASKNAME__);
|
||||
|
||||
/* Initialise the zero page */
|
||||
init_devzero();
|
||||
// printf("%s: Initialised devzero.\n", __TASKNAME__);
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <string.h>
|
||||
#include <init.h>
|
||||
#include INC_API(kip.h)
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
|
||||
/* Kernel data acquired during initialisation */
|
||||
|
||||
42
tasks/mm0/src/lib/elf/elf.c
Normal file
42
tasks/mm0/src/lib/elf/elf.c
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* ELF manipulation routines
|
||||
*
|
||||
* Copyright (C) 2008 Bahadir Balban
|
||||
*/
|
||||
#include <vm_area.h>
|
||||
#include <lib/elf.h>
|
||||
#include <lib/elfprg.h>
|
||||
#include <lib/elfsym.h>
|
||||
#include <lib/elfsect.h>
|
||||
|
||||
|
||||
int elf_probe(struct elf_header *header)
|
||||
{
|
||||
/* Test that it is a 32-bit little-endian ELF file */
|
||||
if (header->e_ident[EI_MAG0] == ELFMAG0 &&
|
||||
header->e_ident[EI_MAG1] == ELFMAG1 &&
|
||||
header->e_ident[EI_MAG2] == ELFMAG2 &&
|
||||
header->e_ident[EI_MAG3] == ELFMAG3 &&
|
||||
header->e_ident[EI_CLASS] == ELFCLASS32 &&
|
||||
header->e_ident[EI_DATA] == ELFDATA2LSB)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
int elf_parse_executable(struct vm_file *f)
|
||||
{
|
||||
int err;
|
||||
struct elf_header *elf_header = pager_map_page(f, 0);
|
||||
struct elf_program_header *prg_header;
|
||||
struct elf_section_header *sect_header;
|
||||
|
||||
/* Test that it is a valid elf file */
|
||||
if ((err = elf_probe(elf_header)) < 0)
|
||||
return err;
|
||||
|
||||
/* Get the program header table */
|
||||
prg_header = (struct elf_program_header *)((void *)elf_header + elf_header->e_phoff);
|
||||
}
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
*/
|
||||
#include <lib/idpool.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <l4/macros.h>
|
||||
#include INC_GLUE(memory.h)
|
||||
#include <stdio.h>
|
||||
#include <l4/api/errno.h>
|
||||
|
||||
@@ -205,14 +205,6 @@ create a new, free block */
|
||||
return (char *)n + sizeof(malloc_t);
|
||||
}
|
||||
|
||||
static inline void *kzalloc(size_t size)
|
||||
{
|
||||
void *buf = kmalloc(size);
|
||||
|
||||
memset(buf, 0, size);
|
||||
return buf;
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
*****************************************************************************/
|
||||
void kfree(void *blk)
|
||||
@@ -254,6 +246,9 @@ void kfree(void *blk)
|
||||
}
|
||||
/* free the block */
|
||||
m->used = 0;
|
||||
/* BB: Addition: put 0xFF to block memory so we know if we use freed memory */
|
||||
memset(blk, 0xFF, m->size);
|
||||
|
||||
/* coalesce adjacent free blocks
|
||||
Hard to spell, hard to do */
|
||||
for(m = (malloc_t *)g_heap_bot; m != NULL; m = m->next)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
#include <l4/lib/math.h>
|
||||
#include <vm_area.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include INC_API(errno.h)
|
||||
#include <posix/sys/types.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <mm/alloc_page.h>
|
||||
#include <vm_area.h>
|
||||
#include <string.h>
|
||||
@@ -62,6 +62,7 @@ int default_release_pages(struct vm_object *vm_obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int file_page_out(struct vm_object *vm_obj, unsigned long page_offset)
|
||||
{
|
||||
struct vm_file *f = vm_object_to_file(vm_obj);
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <utcb.h>
|
||||
#include <vm_area.h>
|
||||
#include <globals.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <l4lib/arch/syscalls.h>
|
||||
#include <l4lib/arch/syslib.h>
|
||||
#include <lib/idpool.h>
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
#include <l4lib/ipcdefs.h>
|
||||
#include <l4lib/exregs.h>
|
||||
#include <lib/addr.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <init.h>
|
||||
#include <string.h>
|
||||
#include <vm_area.h>
|
||||
@@ -25,9 +25,12 @@
|
||||
#include <file.h>
|
||||
#include <utcb.h>
|
||||
#include <task.h>
|
||||
#include <exec.h>
|
||||
#include <shm.h>
|
||||
#include <mmap.h>
|
||||
#include <boot.h>
|
||||
#include <globals.h>
|
||||
#include <test.h>
|
||||
|
||||
struct global_list global_tasks = {
|
||||
.list = { &global_tasks.list, &global_tasks.list },
|
||||
@@ -103,6 +106,8 @@ struct tcb *tcb_alloc_init(unsigned int flags)
|
||||
|
||||
/* Initialise list structure */
|
||||
INIT_LIST_HEAD(&task->list);
|
||||
INIT_LIST_HEAD(&task->child_ref);
|
||||
INIT_LIST_HEAD(&task->children);
|
||||
|
||||
return task;
|
||||
}
|
||||
@@ -138,11 +143,31 @@ int task_free_resources(struct tcb *task)
|
||||
|
||||
int tcb_destroy(struct tcb *task)
|
||||
{
|
||||
struct tcb *child, *n;
|
||||
|
||||
global_remove_task(task);
|
||||
|
||||
/* Free all resources of the task */
|
||||
task_free_resources(task);
|
||||
|
||||
/*
|
||||
* All children of the current task becomes children
|
||||
* of the parent of this task.
|
||||
*/
|
||||
list_for_each_entry_safe(child, n, &task->children,
|
||||
child_ref) {
|
||||
list_del_init(&child->child_ref);
|
||||
list_add_tail(&child->child_ref,
|
||||
&task->parent->children);
|
||||
child->parent = task->parent;
|
||||
}
|
||||
/* The task is not a child of its parent */
|
||||
list_del_init(&task->child_ref);
|
||||
|
||||
/* Now task deletion make sure task is in no list */
|
||||
BUG_ON(!list_empty(&task->list));
|
||||
BUG_ON(!list_empty(&task->child_ref));
|
||||
BUG_ON(!list_empty(&task->children));
|
||||
kfree(task);
|
||||
|
||||
return 0;
|
||||
@@ -154,7 +179,7 @@ int tcb_destroy(struct tcb *task)
|
||||
* Note, that we don't copy vm objects but just the links to
|
||||
* them, because vm objects are not per-process data.
|
||||
*/
|
||||
int copy_vmas(struct tcb *to, struct tcb *from)
|
||||
int task_copy_vmas(struct tcb *to, struct tcb *from)
|
||||
{
|
||||
struct vm_area *vma, *new_vma;
|
||||
|
||||
@@ -223,7 +248,7 @@ int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags)
|
||||
to->vm_area_head->tcb_refs++;
|
||||
} else {
|
||||
/* Copy all vm areas */
|
||||
copy_vmas(to, from);
|
||||
task_copy_vmas(to, from);
|
||||
}
|
||||
|
||||
/* Copy all file descriptors */
|
||||
@@ -243,7 +268,7 @@ int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tcb *task_create(struct tcb *orig, struct task_ids *ids,
|
||||
struct tcb *task_create(struct tcb *parent, struct task_ids *ids,
|
||||
unsigned int ctrl_flags, unsigned int share_flags)
|
||||
{
|
||||
struct tcb *task;
|
||||
@@ -264,36 +289,64 @@ struct tcb *task_create(struct tcb *orig, struct task_ids *ids,
|
||||
task->spid = ids->spid;
|
||||
task->tgid = ids->tgid;
|
||||
|
||||
/* Set task's creation flags */
|
||||
task->clone_flags = share_flags;
|
||||
|
||||
/*
|
||||
* If an original task has been specified, that means either
|
||||
* If a parent task has been specified, that means either
|
||||
* we are forking, or we are cloning the original tcb fully
|
||||
* or partially. Therefore we copy tcbs depending on share flags.
|
||||
*/
|
||||
if (orig)
|
||||
copy_tcb(task, orig, share_flags);
|
||||
if (parent) {
|
||||
copy_tcb(task, parent, share_flags);
|
||||
|
||||
/* Set up parent-child relationship */
|
||||
list_add_tail(&task->child_ref, &parent->children);
|
||||
task->parent = parent;
|
||||
} else {
|
||||
struct tcb *pager = find_task(PAGER_TID);
|
||||
|
||||
/* All parentless tasks are children of the pager */
|
||||
list_add_tail(&task->child_ref, &pager->children);
|
||||
task->parent = pager;
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
|
||||
int task_mmap_regions(struct tcb *task, struct vm_file *file)
|
||||
int task_mmap_segments(struct tcb *task, struct vm_file *file, struct exec_file_desc *efd)
|
||||
{
|
||||
void *mapped;
|
||||
struct vm_file *shm;
|
||||
|
||||
/*
|
||||
* mmap each task's physical image to task's address space.
|
||||
* TODO: Map data and text separately when available from bootdesc.
|
||||
*/
|
||||
if (IS_ERR(mapped = do_mmap(file, 0, task, task->text_start,
|
||||
/* mmap task's text to task's address space. */
|
||||
if (IS_ERR(mapped = do_mmap(file, efd->text_offset, task, task->text_start,
|
||||
VM_READ | VM_WRITE | VM_EXEC | VMA_PRIVATE,
|
||||
__pfn(page_align_up(task->text_end) -
|
||||
task->text_start)))) {
|
||||
page_align(task->text_start))))) {
|
||||
printf("do_mmap: failed with %d.\n", (int)mapped);
|
||||
return (int)mapped;
|
||||
}
|
||||
|
||||
/* mmap each task's environment as anonymous memory. */
|
||||
/* mmap task's data to task's address space. */
|
||||
if (IS_ERR(mapped = do_mmap(file, efd->data_offset, task, task->data_start,
|
||||
VM_READ | VM_WRITE | VMA_PRIVATE,
|
||||
__pfn(page_align_up(task->data_end) -
|
||||
page_align(task->data_start))))) {
|
||||
printf("do_mmap: failed with %d.\n", (int)mapped);
|
||||
return (int)mapped;
|
||||
}
|
||||
|
||||
/* mmap task's bss as anonymous memory. */
|
||||
if (IS_ERR(mapped = do_mmap(0, 0, task, task->bss_start,
|
||||
VM_READ | VM_WRITE |
|
||||
VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
__pfn(task->bss_end - task->bss_start)))) {
|
||||
printf("do_mmap: Mapping environment failed with %d.\n",
|
||||
(int)mapped);
|
||||
return (int)mapped;
|
||||
}
|
||||
/* mmap task's environment as anonymous memory. */
|
||||
if (IS_ERR(mapped = do_mmap(0, 0, task, task->env_start,
|
||||
VM_READ | VM_WRITE |
|
||||
VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
@@ -303,7 +356,7 @@ int task_mmap_regions(struct tcb *task, struct vm_file *file)
|
||||
return (int)mapped;
|
||||
}
|
||||
|
||||
/* mmap each task's stack as anonymous memory. */
|
||||
/* mmap task's stack as anonymous memory. */
|
||||
if (IS_ERR(mapped = do_mmap(0, 0, task, task->stack_start,
|
||||
VM_READ | VM_WRITE |
|
||||
VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
@@ -324,39 +377,6 @@ int task_mmap_regions(struct tcb *task, struct vm_file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int task_setup_regions(struct vm_file *file, struct tcb *task,
|
||||
unsigned long task_start, unsigned long task_end)
|
||||
{
|
||||
/*
|
||||
* Set task's main address space boundaries. Not all tasks
|
||||
* run in the default user boundaries, e.g. mm0 pager.
|
||||
*/
|
||||
task->start = task_start;
|
||||
task->end = task_end;
|
||||
|
||||
/* Prepare environment boundaries. */
|
||||
task->env_end = task->end;
|
||||
task->env_start = task->env_end - DEFAULT_ENV_SIZE;
|
||||
task->args_end = task->env_start;
|
||||
task->args_start = task->env_start;
|
||||
|
||||
/* Task stack starts right after the environment. */
|
||||
task->stack_end = task->env_start;
|
||||
task->stack_start = task->stack_end - DEFAULT_STACK_SIZE;
|
||||
|
||||
/* Currently RO text and RW data are one region. TODO: Fix this */
|
||||
task->data_start = task->start;
|
||||
task->data_end = task->start + page_align_up(file->length);
|
||||
task->text_start = task->data_start;
|
||||
task->text_end = task->data_end;
|
||||
|
||||
/* Task's region available for mmap */
|
||||
task->map_start = task->data_end;
|
||||
task->map_end = task->stack_start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
unsigned int sp, l4id_t pager)
|
||||
{
|
||||
@@ -367,7 +387,7 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
if (!sp)
|
||||
sp = align(task->stack_end - 1, 8);
|
||||
if (!pc)
|
||||
pc = task->text_start;
|
||||
pc = task->entry;
|
||||
if (!pager)
|
||||
pager = self_tid();
|
||||
|
||||
@@ -384,13 +404,18 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int task_start(struct tcb *task, struct task_ids *ids)
|
||||
int task_start(struct tcb *task)
|
||||
{
|
||||
int err;
|
||||
struct task_ids ids = {
|
||||
.tid = task->tid,
|
||||
.spid = task->spid,
|
||||
.tgid = task->tgid,
|
||||
};
|
||||
|
||||
/* Start the thread */
|
||||
printf("Starting task with id %d, spid: %d\n", task->tid, task->spid);
|
||||
if ((err = l4_thread_control(THREAD_RUN, ids)) < 0) {
|
||||
if ((err = l4_thread_control(THREAD_RUN, &ids)) < 0) {
|
||||
printf("l4_thread_control failed with %d\n", err);
|
||||
return err;
|
||||
}
|
||||
@@ -398,75 +423,12 @@ int task_start(struct tcb *task, struct task_ids *ids)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefaults all mapped regions of a task. The reason we have this is
|
||||
* some servers are in the page fault handling path (e.g. fs0), and we
|
||||
* don't want them to fault and cause deadlocks and circular deps.
|
||||
*
|
||||
* Normally fs0 faults dont cause dependencies because its faults
|
||||
* are handled by the boot pager, which is part of mm0. BUT: It may
|
||||
* cause deadlocks because fs0 may fault while serving a request
|
||||
* from mm0.(Which is expected to also handle the fault).
|
||||
*/
|
||||
int task_prefault_regions(struct tcb *task, struct vm_file *f)
|
||||
{
|
||||
struct vm_area *vma;
|
||||
|
||||
list_for_each_entry(vma, &task->vm_area_head->list, list) {
|
||||
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
|
||||
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
|
||||
VM_READ | VM_WRITE) < 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Main entry point for the creation, initialisation and
|
||||
* execution of a new task.
|
||||
*/
|
||||
struct tcb *task_exec(struct vm_file *f, unsigned long task_region_start,
|
||||
unsigned long task_region_end, struct task_ids *ids)
|
||||
{
|
||||
struct tcb *task;
|
||||
int err;
|
||||
|
||||
if (IS_ERR(task = task_create(0, ids, THREAD_NEW_SPACE,
|
||||
TCB_NO_SHARING)))
|
||||
return task;
|
||||
|
||||
if ((err = task_setup_regions(f, task, task_region_start,
|
||||
task_region_end)) < 0)
|
||||
return PTR_ERR(err);
|
||||
|
||||
if ((err = task_mmap_regions(task, f)) < 0)
|
||||
return PTR_ERR(err);
|
||||
|
||||
if ((err = task_setup_registers(task, 0, 0, 0)) < 0)
|
||||
return PTR_ERR(err);
|
||||
|
||||
/* Add the task to the global task list */
|
||||
global_add_task(task);
|
||||
|
||||
/* Add the file to global vm lists */
|
||||
global_add_vm_file(f);
|
||||
|
||||
/* Prefault all its regions */
|
||||
if (ids->tid == VFS_TID)
|
||||
task_prefault_regions(task, f);
|
||||
|
||||
/* Start the task */
|
||||
if ((err = task_start(task, ids)) < 0)
|
||||
return PTR_ERR(err);
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
/*
|
||||
* During its initialisation FS0 wants to learn how many boot tasks
|
||||
* are running, and their tids, which includes itself. This function
|
||||
* provides that information.
|
||||
*/
|
||||
int send_task_data(struct tcb *vfs)
|
||||
int vfs_send_task_data(struct tcb *vfs)
|
||||
{
|
||||
int li = 0;
|
||||
struct tcb *t, *self;
|
||||
@@ -501,3 +463,25 @@ int send_task_data(struct tcb *vfs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefaults all mapped regions of a task. The reason we have this is
|
||||
* some servers are in the page fault handling path (e.g. fs0), and we
|
||||
* don't want them to fault and cause deadlocks and circular deps.
|
||||
*
|
||||
* Normally fs0 faults dont cause dependencies because its faults
|
||||
* are handled by the boot pager, which is part of mm0. BUT: It may
|
||||
* cause deadlocks because fs0 may fault while serving a request
|
||||
* from mm0.(Which is expected to also handle the fault).
|
||||
*/
|
||||
int task_prefault_regions(struct tcb *task, struct vm_file *f)
|
||||
{
|
||||
struct vm_area *vma;
|
||||
|
||||
list_for_each_entry(vma, &task->vm_area_head->list, list) {
|
||||
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
|
||||
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
|
||||
VM_READ | VM_WRITE) < 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <vm_area.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include <kmalloc/kmalloc.h>
|
||||
#include <lib/malloc.h>
|
||||
#include <globals.h>
|
||||
|
||||
/* Global list of all in-memory files on the system */
|
||||
|
||||
Reference in New Issue
Block a user