Initial commit

This commit is contained in:
Bahadir Balban
2008-01-13 13:53:52 +00:00
commit e2b791a3d8
789 changed files with 95825 additions and 0 deletions

BIN
tasks/mm0/src/.scons14756 Normal file

Binary file not shown.

1
tasks/mm0/src/arch Symbolic link
View File

@@ -0,0 +1 @@
arch-arm

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <arch/mm.h>
/* Extracts generic protection flags from architecture-specific pte */
unsigned int vm_prot_flags(pte_t pte)
{
unsigned int vm_prot_flags = 0;
unsigned int rw_flags = __MAP_USR_RW_FLAGS & PTE_PROT_MASK;
unsigned int ro_flags = __MAP_USR_RO_FLAGS & PTE_PROT_MASK;
/* Clear non-protection flags */
pte &= PTE_PROT_MASK;;
if (pte == ro_flags)
vm_prot_flags = VM_READ | VM_EXEC;
else if (pte == rw_flags)
vm_prot_flags = VM_READ | VM_WRITE | VM_EXEC;
else
vm_prot_flags = VM_NONE;
return vm_prot_flags;
}
/*
* PTE STATES:
* PTE type field: 00 (Translation fault)
* PTE type field correct, AP bits: None (Read or Write access fault)
* PTE type field correct, AP bits: RO (Write access fault)
*/
/* Extracts arch-specific fault parameters and puts them into generic format */
void set_generic_fault_params(struct fault_data *fault)
{
unsigned int prot_flags = vm_prot_flags(fault->kdata->pte);
fault->reason = 0;
if (is_prefetch_abort(fault->kdata->fsr)) {
fault->reason |= VM_READ;
fault->address = fault->kdata->faulty_pc;
} else {
fault->address = fault->kdata->far;
/* Always assume read fault first */
if (prot_flags & VM_NONE)
fault->reason |= VM_READ;
else if (prot_flags & VM_READ)
fault->reason |= VM_WRITE;
else
BUG();
}
}

478
tasks/mm0/src/fault.c Normal file
View File

@@ -0,0 +1,478 @@
/*
* Page fault handling.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <vm_area.h>
#include <task.h>
#include <mm/alloc_page.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <arch/mm.h>
#include <l4/generic/space.h>
#include <string.h>
#include <memory.h>
#include <shm.h>
/* FIXME: TODO:
* For every page that is allocated, (read-only file pages) and anon pages
* etc. Page cache for that page's file must be visited first, before
* allocation.
*/
/*
* For copy-on-write vmas, grows an existing shadow vma, or creates a new one
* for the copy-on-write'ed page. Then adds this shadow vma to the actual vma's
* shadow list. Shadow vmas never overlap with each other, and always overlap
* with part of their original vma.
*/
struct vm_area *copy_on_write_vma(struct fault_data *fault)
{
struct vm_area *shadow;
unsigned long faulty_pfn = __pfn(fault->address);
BUG_ON(faulty_pfn < fault->vma->pfn_start ||
faulty_pfn >= fault->vma->pfn_end);
list_for_each_entry(shadow, &fault->vma->shadow_list, shadow_list) {
if (faulty_pfn == (shadow->pfn_start - 1)) {
/* Growing start of existing shadow vma */
shadow->pfn_start = faulty_pfn;
shadow->f_offset -= 1;
return shadow;
} else if (faulty_pfn == (shadow->pfn_end + 1)) {
/* Growing end of existing shadow vma */
shadow->pfn_end = faulty_pfn;
return shadow;
}
}
/* Otherwise this is a new shadow vma that must be initialised */
shadow = kzalloc(sizeof(struct vm_area));
BUG(); /* This f_offset is wrong. Using uninitialised fields, besides
swap offsets calculate differently */
shadow->f_offset = faulty_pfn - shadow->pfn_start
+ shadow->f_offset;
shadow->pfn_start = faulty_pfn;
shadow->pfn_end = faulty_pfn + 1; /* End pfn is exclusive */
shadow->flags = fault->vma->flags;
/* The vma is owned by the swap file, since it's a private vma */
shadow->owner = fault->task->swap_file;
INIT_LIST_HEAD(&shadow->list);
INIT_LIST_HEAD(&shadow->shadow_list);
/*
* The actual vma uses its shadow_list as the list head for shadows.
* The shadows use their list member, and shadow_list is unused.
*/
list_add(&shadow->list, &fault->vma->shadow_list);
return shadow;
}
/*
* Handles any page ownership change or allocation for file-backed pages.
*/
int do_file_page(struct fault_data *fault)
{
unsigned int reason = fault->reason;
unsigned int vma_flags = fault->vma->flags;
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
/* For RO or non-cow WR pages just read in the page */
if (((reason & VM_READ) || ((reason & VM_WRITE) && !(vma_flags & VMA_COW)))
&& (pte_flags & VM_NONE)) {
/* Allocate a new page */
void *paddr = alloc_page(1);
void *vaddr = phys_to_virt(paddr);
struct page *page = phys_to_page(paddr);
/* Map new page at a self virtual address temporarily */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/*
* Read the page. (Simply read into the faulty area that's
* now mapped using a newly allocated page.)
*/
fault->vma->owner->pager->ops.read_page(fault, vaddr);
/* Remove temporary mapping */
l4_unmap(vaddr, 1, self_tid());
/* Map it to task. */
l4_map(paddr, (void *)page_align(fault->address), 1,
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
fault->task->tid);
spin_lock(&page->lock);
/* Update its page descriptor */
page->count++;
page->owner = fault->vma->owner;
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
page->virtual = page_align(fault->address);
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
spin_unlock(&page->lock);
/* Upgrade RO page to non-cow write */
} else if ((reason & VM_WRITE) && (pte_flags & VM_READ)
&& !(vma_flags & VMA_COW)) {
/* The page is mapped in, just update its permission */
l4_map((void *)__pte_to_addr(fault->kdata->pte),
(void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
/*
* For cow-write, allocate private pages and create shadow vmas.
*/
} else if ((reason & VM_WRITE) && (pte_flags & VM_READ)
&& (vma_flags & VMA_COW)) {
void *pa = (void *)__pte_to_addr(fault->kdata->pte);
void *new_pa = alloc_page(1);
struct page *page = phys_to_page(pa);
struct page *new_page = phys_to_page(new_pa);
void *va, *new_va;
/* Create or obtain existing shadow vma for the page */
struct vm_area *shadow = copy_on_write_vma(fault);
/* Map new page at a local virtual address temporarily */
new_va = l4_map_helper(new_pa, 1);
/* Map the old page (vmapped for process but not us) to self */
va = l4_map_helper(pa, 1);
/* Copy data from old to new page */
memcpy(new_va, va, PAGE_SIZE);
/* Remove temporary mappings */
l4_unmap(va, 1, self_tid());
l4_unmap(new_va, 1, self_tid());
spin_lock(&page->lock);
/* Clear usage details for original page. */
page->count--;
page->virtual = 0; /* FIXME: Maybe mapped for multiple processes ? */
/* New page is owned by shadow's owner (swap) */
new_page->owner = shadow->owner;
new_page->count++;
new_page->f_offset = __pfn(fault->address)
- shadow->pfn_start + shadow->f_offset;
new_page->virtual = page_align(fault->address);
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
spin_unlock(&page->lock);
/*
* Overwrite the original file-backed page's mapping on this
* task with the writeable private page. The original physical
* page still exists in memory and can be referenced from its
* associated owner file, but it's not mapped into any virtual
* address anymore in this task.
*/
l4_map(new_pa, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
} else if ((reason & VM_WRITE) && (pte_flags & VM_NONE)
&& (vma_flags & VMA_COW)) {
struct vm_area *shadow;
/* Allocate a new page */
void *paddr = alloc_page(1);
void *vaddr = phys_to_virt(paddr);
struct page *page = phys_to_page(paddr);
/* Map it to self */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/* Update its page descriptor */
page->count++;
page->owner = fault->vma->owner;
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
page->virtual = page_align(fault->address);
/*
* Read the page. (Simply read into the faulty area that's
* now mapped using a newly allocated page.)
*/
fault->vma->owner->pager->ops.read_page(fault, vaddr);
/* Unmap from self */
l4_unmap(vaddr, 1, self_tid());
/* Map to task. */
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
/* Obtain a shadow vma for the page */
shadow = copy_on_write_vma(fault);
spin_lock(&page->lock);
/* Now anonymise the page by changing its owner file to swap */
page->owner = shadow->owner;
/* Page's offset is different in its new owner. */
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
spin_unlock(&page->lock);
} else
BUG();
return 0;
}
/*
* Handles any page allocation or file ownership change for anonymous pages.
* For read accesses initialises a wired-in zero page and for write accesses
* initialises a private ZI page giving its ownership to the swap file.
*/
int do_anon_page(struct fault_data *fault)
{
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
void *paddr, *vaddr;
struct page *page;
/* If swapped, read in with vma's pager (swap in anon case) */
if (pte_flags & VM_SWAPPED) {
BUG();
// Properly implement:
// fault->vma->owner->pager->ops.read_page(fault);
/* Map the page with right permission */
if (fault->reason & VM_READ)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RO_FLAGS, fault->task->tid);
else if (fault->reason & VM_WRITE)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
else
BUG();
return 0;
}
/* For non-existant pages just map the zero page. */
if (fault->reason & VM_READ) {
/*
* Zero page is a special wired-in page that is mapped
* many times in many tasks. Just update its count field.
*/
paddr = get_zero_page();
#if defined(SHM_DISJOINT_VADDR_POOL)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RO_FLAGS, fault->task->tid);
#else
#error ARM v5 Cache aliasing possibility. Map this uncached on VMA_SHARED
#endif
}
/* Write faults require a real zero initialised page */
if (fault->reason & VM_WRITE) {
paddr = alloc_page(1);
vaddr = phys_to_virt(paddr);
page = phys_to_page(paddr);
/* NOTE:
* This mapping overwrites the original RO mapping which
* is anticipated to be the zero page.
*/
BUG_ON(__pte_to_addr(fault->kdata->pte) !=
(unsigned long)get_zero_page());
/* Map new page at a self virtual address temporarily */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/* Clear the page */
memset((void *)vaddr, 0, PAGE_SIZE);
/* Remove temporary mapping */
l4_unmap((void *)vaddr, 1, self_tid());
#if defined(SHM_DISJOINT_VADDR_POOL)
/* Map the page to task */
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
#else
#error ARM v5 Cache aliasing possibility. Map this uncached on VMA_SHARED.
#endif
spin_lock(&page->lock);
/* vma's swap file owns this page */
page->owner = fault->vma->owner;
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
/* The offset of this page in its owner file */
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
page->count++;
page->virtual = page_align(fault->address);
spin_unlock(&page->lock);
}
return 0;
}
/*
* Page fault model:
*
* A page is anonymous (e.g. stack)
* - page needs read access:
* action: map the zero page.
* - page needs write access:
* action: allocate ZI page and map that. Swap file owns the page.
* - page is swapped to swap:
* action: read back from swap file into new page.
*
* A page is file-backed but private (e.g. .data section)
* - page needs read access:
* action: read the page from its file.
* - page is swapped out before being private. (i.e. invalidated)
* action: read the page from its file. (original file)
* - page is swapped out after being private.
* action: read the page from its file. (swap file)
* - page needs write access:
* action: allocate new page, declare page as private, change its
* owner to swap file.
*
* A page is file backed but not-private, and read-only. (e.g. .text section)
* - page needs read access:
* action: read in the page from its file.
* - page is swapped out. (i.e. invalidated)
* action: read in the page from its file.
* - page needs write access:
* action: forbidden, kill task?
*
* A page is file backed but not-private, and read/write. (e.g. any data file.)
* - page needs read access:
* action: read in the page from its file.
* - page is flushed back to its original file. (i.e. instead of swap)
* action: read in the page from its file.
* - page needs write access:
* action: read the page in, give write access.
*/
void do_page_fault(struct fault_data *fault)
{
unsigned int vma_flags = (fault->vma) ? fault->vma->flags : VM_NONE;
unsigned int reason = fault->reason;
/* vma flags show no access */
if (vma_flags & VM_NONE) {
printf("Illegal access, tid: %d, address: %x\n",
fault->task->tid, fault->address);
BUG();
}
/* The access reason is not included in the vma's listed flags */
if (!(reason & vma_flags)) {
printf("Illegal access, tid: %d, address: %x\n",
fault->task->tid, fault->address);
BUG();
}
if ((reason & VM_EXEC) && (vma_flags & VM_EXEC)) {
printf("Exec faults unsupported yet.\n");
BUG(); /* Can't handle this yet. */
}
/* Handle legitimate read faults on the vma */
if (vma_flags & VMA_ANON)
do_anon_page(fault);
else
do_file_page(fault);
}
void vm_file_pager_read_page(struct fault_data *fault, void *dest_page)
{
/* Fault's offset in its vma */
unsigned long vma_off_pfn = __pfn(fault->address) - fault->vma->pfn_start;
/* Fault's offset in the file */
unsigned long f_off_pfn = fault->vma->f_offset + vma_off_pfn;
/* The address of page in the file */
void *file_page = (void *)(fault->vma->owner->inode.i_addr +
__pfn_to_addr(f_off_pfn));
/*
* Map the memfile's page into virtual memory.
*
* FIXME: Need to find a way of properly generating virtual addresses
* rather than one-to-one conversion.
*/
file_page = l4_map_helper(file_page, 1);
/* Copy it into destination page */
memcpy(dest_page, file_page, PAGE_SIZE);
}
void vm_file_pager_write_page(struct fault_data *f, void *p)
{
}
void vm_swapper_read_page(struct fault_data *fault, void *p)
{
}
void vm_swapper_write_page(struct fault_data *f, void *p) { }
/* Pager for file pages */
struct vm_pager default_file_pager = {
.ops = {
.read_page = vm_file_pager_read_page,
.write_page= vm_file_pager_write_page,
},
};
/* Swap pager for anonymous and private pages */
struct vm_pager swap_pager = {
.ops = {
.read_page = vm_swapper_read_page,
.write_page= vm_swapper_write_page,
},
};
void page_fault_handler(l4id_t sender, fault_kdata_t *fkdata)
{
struct fault_data fault = {
/* Fault data from kernel */
.kdata = fkdata,
};
printf("%s: Handling fault from %d.\n", __TASKNAME__, sender);
BUG_ON(sender == 0);
/* Get pager specific task info */
BUG_ON(!(fault.task = find_task(sender)));
/* Extract fault reason, fault address etc. in generic format */
set_generic_fault_params(&fault);
/* Get vma info */
if (!(fault.vma = find_vma(fault.address,
&fault.task->vm_area_list)))
printf("Hmm. No vma for faulty region. "
"Bad things will happen.\n");
/* Handle the actual fault */
do_page_fault(&fault);
}

91
tasks/mm0/src/init.c Normal file
View File

@@ -0,0 +1,91 @@
/*
* Initialise the system.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <kdata.h>
#include <memory.h>
#include <mm/alloc_page.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <task.h>
#include <shm.h>
void init_utcb(void)
{
struct task_ids ids;
void *utcb_page = alloc_page(1); /* Allocate a utcb page */
l4_getid(&ids);
l4_map(utcb_page, __L4_ARM_Utcb(), 1, MAP_USR_RW_FLAGS, ids.tid);
}
void init_mm(struct initdata *initdata)
{
/* Initialise the page and bank descriptors */
init_physmem(initdata, membank);
printf("%s: Initialised physmem.\n", __TASKNAME__);
/* Initialise the page allocator on first bank. */
init_page_allocator(membank[0].free, membank[0].end);
printf("%s: Initialised page allocator.\n", __TASKNAME__);
/* Initialise the zero page */
init_zero_page();
printf("%s: Initialised zero page.\n", __TASKNAME__);
init_utcb();
printf("%s: Initialised own utcb.\n", __TASKNAME__);
/* Initialise the pager's memory allocator */
kmalloc_init();
printf("%s: Initialised kmalloc.\n", __TASKNAME__);
shm_init();
printf("%s: Initialised shm structures.\n", __TASKNAME__);
/* Give the kernel some memory to use for its allocators */
l4_kmem_grant(__pfn(alloc_page(__pfn(SZ_1MB))), __pfn(SZ_1MB));
}
/* Create temporary run-time files in memory to test with mmap */
void init_boot_files(struct initdata *initdata)
{
struct bootdesc *bd = initdata->bootdesc;
int total_files = bd->total_images;
struct vm_file *memfile;
struct svc_image *img;
memfile = kzalloc(sizeof(struct vm_file) * total_files);
initdata->memfile = memfile;
BUG();
for (int i = BOOTDESC_IMAGE_START; i < total_files; i++) {
img = &bd->images[i];
/*
* I have left the i_addr as physical on purpose. The inode is
* not a readily usable memory address, its simply a unique key
* that represents that file. Here, we use the physical address
* of the memory file as that key. The pager must take action in
* order to make use of it.
*/
memfile[i].inode.i_addr = img->phys_start;
memfile[i].length = img->phys_end - img->phys_start;
memfile[i].pager = &default_file_pager;
INIT_LIST_HEAD(&memfile[i].page_cache_list);
}
}
void initialise(void)
{
request_initdata(&initdata);
init_mm(&initdata);
init_boot_files(&initdata);
// printf("INITTASK: Initialised mock-up bootfiles.\n");
init_pm(&initdata);
// printf("INITTASK: Initialised the memory/process manager.\n");
}

105
tasks/mm0/src/kdata.c Normal file
View File

@@ -0,0 +1,105 @@
/*
* Requesting system information from kernel during init.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <l4lib/arch/syscalls.h>
#include <kdata.h>
#include <string.h>
#include INC_API(kip.h)
#include <kmalloc/kmalloc.h>
/* Kernel data acquired during initialisation */
struct initdata initdata;
#define BOOTDESC_PREALLOC_SIZE 128
static char bootdesc_memory[BOOTDESC_PREALLOC_SIZE]; /* 128 bytes */
void print_bootdesc(struct bootdesc *bd)
{
for (int i = 0; i < bd->total_images; i++) {
printf("Task Image: %d\n", i);
printf("Name: %s\n", bd->images[i].name);
printf("Start: 0x%x\n", bd->images[i].phys_start);
printf("End: 0x%x\n", bd->images[i].phys_end);
}
}
void print_pfn_range(int pfn, int size)
{
unsigned int addr = pfn << PAGE_BITS;
unsigned int end = (pfn + size) << PAGE_BITS;
printf("Used: 0x%x - 0x%x\n", addr, end);
}
void print_page_map(struct page_bitmap *map)
{
unsigned int start_pfn = 0;
unsigned int total_used = 0;
int numpages = 0;
printf("Pages start at address 0x%x\n", map->pfn_start << PAGE_BITS);
printf("Pages end at address 0x%x\n", map->pfn_end << PAGE_BITS);
printf("The used page areas are:\n");
for (int i = 0; i < (PHYSMEM_TOTAL_PAGES >> 5); i++) {
for (int x = 0; x < WORD_BITS; x++) {
if (map->map[i] & (1 << x)) { /* A used page found? */
if (!start_pfn) /* First such page found? */
start_pfn = (WORD_BITS * i) + x;
total_used++;
numpages++; /* Increase number of pages */
} else { /* Either used pages ended or were never found */
if (start_pfn) { /* We had a used page */
/* Finished end of used range.
* Print and reset. */
print_pfn_range(start_pfn, numpages);
start_pfn = 0;
numpages = 0;
}
}
}
}
printf("Total of %d pages. %d Kbytes.\n", total_used, total_used << 2);
}
int request_initdata(struct initdata *initdata)
{
int err;
int bootdesc_size;
/* Read all used physical page information in a bitmap. */
if ((err = l4_kread(KDATA_PAGE_MAP, &initdata->page_map)) < 0) {
printf("L4_kdata_read() call failed. Could not complete"
"KDATA_PAGE_MAP request.\n");
goto error;
}
print_page_map(&initdata->page_map);
/* Read the boot descriptor size */
if ((err = l4_kread(KDATA_BOOTDESC_SIZE, &bootdesc_size)) < 0) {
printf("L4_kdata_read() call failed. Could not complete"
"KDATA_BOOTDESC_SIZE request.\n");
goto error;
}
if (bootdesc_size > BOOTDESC_PREALLOC_SIZE) {
printf("Insufficient preallocated memory for bootdesc. "
"Size too big.\n");
goto error;
}
/* Get preallocated bootdesc memory */
initdata->bootdesc = (struct bootdesc *)&bootdesc_memory;
/* Read the boot descriptor */
if ((err = l4_kread(KDATA_BOOTDESC, initdata->bootdesc)) < 0) {
printf("L4_kdata_read() call failed. Could not complete"
"KDATA_BOOTDESC request.\n");
goto error;
}
return 0;
error:
printf("FATAL: Inittask failed during initialisation. exiting.\n");
return err;
}

98
tasks/mm0/src/lib/bit.c Normal file
View File

@@ -0,0 +1,98 @@
/*
* Bit manipulation functions.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <lib/bit.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <stdio.h>
#include INC_GLUE(memory.h)
/* Emulation of ARM's CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}
int find_and_set_first_free_bit(u32 *word, unsigned int limit)
{
int success = 0;
int i;
for(i = 0; i < limit; i++) {
/* Find first unset bit */
if (!(word[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i))) {
/* Set it */
word[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
success = 1;
break;
}
}
/* Return bit just set */
if (success)
return i;
else
return -1;
}
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits)
{
int i = 0, first = 0, last = 0, found = 0;
/* Can't allocate more than the limit */
if (nbits > limit)
return -1;
/* This is a state machine that checks n contiguous free bits. */
while (i + nbits < limit) {
first = i;
last = i;
while (!(word[BITWISE_GETWORD(last)] & BITWISE_GETBIT(last))) {
last++;
i++;
if (last == first + nbits) {
found = 1;
break;
}
}
if (found)
break;
i++;
}
/* If found, set the bits */
if (found) {
for (int x = first; x < first + nbits; x++)
word[BITWISE_GETWORD(x)] |= BITWISE_GETBIT(x);
return first;
} else
return -1;
}
int check_and_clear_bit(u32 *word, int bit)
{
/* Check that bit was set */
if (word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit)) {
word[BITWISE_GETWORD(bit)] &= ~BITWISE_GETBIT(bit);
return 0;
} else {
printf("Trying to clear already clear bit\n");
return -1;
}
}
int check_and_clear_contig_bits(u32 *word, int first, int nbits)
{
for (int i = first; i < first + nbits; i++)
if (check_and_clear_bit(word, i) < 0)
return -1;
return 0;
}

View File

@@ -0,0 +1,63 @@
/*
* Used for thread and space ids.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <lib/idpool.h>
#include <kmalloc/kmalloc.h>
#include INC_GLUE(memory.h)
#include <stdio.h>
struct id_pool *id_pool_new_init(int totalbits)
{
int nwords = BITWISE_GETWORD(totalbits);
struct id_pool *new = kzalloc((nwords * SZ_WORD)
+ sizeof(struct id_pool));
new->nwords = nwords;
return new;
}
int id_new(struct id_pool *pool)
{
int id = find_and_set_first_free_bit(pool->bitmap,
pool->nwords * WORD_BITS);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This finds n contiguous free ids, allocates and returns the first one */
int ids_new_contiguous(struct id_pool *pool, int numids)
{
int id = find_and_set_first_free_contig_bits(pool->bitmap,
pool->nwords *WORD_BITS,
numids);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This deletes a list of contiguous ids given the first one and number of ids */
int ids_del_contiguous(struct id_pool *pool, int first, int numids)
{
int ret;
if (pool->nwords * WORD_BITS < first + numids)
return -1;
if ((ret = check_and_clear_contig_bits(pool->bitmap, first, numids)))
printf("%s: Error: Invalid argument range.\n", __FUNCTION__);
return ret;
}
int id_del(struct id_pool *pool, int id)
{
int ret;
if (pool->nwords * WORD_BITS < id)
return -1;
if ((ret = check_and_clear_bit(pool->bitmap, id) < 0))
printf("%s: Error: Could not delete id.\n", __FUNCTION__);
return ret;
}

39
tasks/mm0/src/lib/vaddr.c Normal file
View File

@@ -0,0 +1,39 @@
/*
* This module allocates an unused virtual address range for shm segments.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <lib/bit.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include <lib/vaddr.h>
#include <stdio.h>
void vaddr_pool_init(struct id_pool *pool, unsigned long start, unsigned long end)
{
pool = id_pool_new_init(__pfn(end - start));
}
void *vaddr_new(struct id_pool *pool, int npages)
{
unsigned int shm_vpfn;
if ((int)(shm_vpfn = ids_new_contiguous(pool, npages)) < 0)
return 0;
return (void *)__pfn_to_addr(shm_vpfn + SHM_AREA_START);
}
int vaddr_del(struct id_pool *pool, void *vaddr, int npages)
{
unsigned long idpfn = __pfn(page_align(vaddr) - SHM_AREA_START);
if (ids_del_contiguous(pool, idpfn, npages) < 0) {
printf("%s: Invalid address range returned to "
"virtual address pool.\n", __FUNCTION__);
return -1;
}
return 0;
}

92
tasks/mm0/src/memory.c Normal file
View File

@@ -0,0 +1,92 @@
/*
* Initialise the memory structures.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <kdata.h>
#include <memory.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/generic/space.h>
#include <l4lib/arch/syslib.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <memory.h>
struct membank membank[1];
struct page *page_array;
void *phys_to_virt(void *addr)
{
return addr + INITTASK_OFFSET;
}
void *virt_to_phys(void *addr)
{
return addr - INITTASK_OFFSET;
}
/* Allocates page descriptors and initialises them using page_map information */
void init_physmem(struct initdata *initdata, struct membank *membank)
{
struct page_bitmap *pmap = &initdata->page_map;
int npages = pmap->pfn_end - pmap->pfn_start;
/* Allocation marks for the struct page array */
int pg_npages, pg_spfn, pg_epfn;
unsigned long ffree_addr;
/*
* Means the page array won't map one to one to pfns. That's ok,
* but we dont allow it for now.
*/
BUG_ON(pmap->pfn_start);
membank[0].start = __pfn_to_addr(pmap->pfn_start);
membank[0].end = __pfn_to_addr(pmap->pfn_end);
/* First find the first free page after last used page */
for (int i = 0; i < npages; i++)
if ((pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))
membank[0].free = (i + 1) * PAGE_SIZE;
BUG_ON(membank[0].free >= membank[0].end);
/*
* One struct page for every physical page. Calculate how many pages
* needed for page structs, start and end pfn marks.
*/
pg_npages = __pfn((sizeof(struct page) * npages));
/* These are relative pfn offsets to the start of the memory bank */
pg_spfn = __pfn(membank[0].free) - __pfn(membank[0].start);
pg_epfn = pg_spfn + pg_npages;
/* Use free pages from the bank as the space for struct page array */
membank[0].page_array = l4_map_helper((void *)membank[0].free,
pg_npages);
/* Update free memory left */
membank[0].free += pg_npages * PAGE_SIZE;
/* Update page bitmap for the pages used for the page array */
for (int i = pg_spfn; i < pg_epfn; i++)
pmap->map[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
/* Initialise the page array */
for (int i = 0; i < npages; i++) {
INIT_LIST_HEAD(&membank[0].page_array[i].list);
/* Set use counts for pages the kernel has already used up */
if (!(pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))
membank[0].page_array[i].count = -1;
else /* Last page used +1 is free */
ffree_addr = (i + 1) * PAGE_SIZE;
}
/* First free address must come up the same for both */
BUG_ON(ffree_addr != membank[0].free);
/* Set global page array to this bank's array */
page_array = membank[0].page_array;
}

489
tasks/mm0/src/mmap.c Normal file
View File

@@ -0,0 +1,489 @@
/*
* mmap/munmap and friends.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <vm_area.h>
#include <kmalloc/kmalloc.h>
#include INC_API(errno.h)
#include <posix/sys/types.h>
#include <task.h>
#include <mmap.h>
#include <l4lib/arch/syscalls.h>
static struct vm_file devzero;
/* Swap related bookkeeping.
static struct vm_file shm_swap_file;
static struct id_pool *swap_file_offset_pool;
*/
/* mmap system call implementation */
int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
int flags, int fd, off_t offset)
{
return 0;
}
/* TODO: This is to be implemented when fs0 is ready. */
int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *task)
{
// unsigned long npages = __pfn(size);
struct vm_area *vma = find_vma((unsigned long)addr,
&task->vm_area_list);
if (!vma)
return -EINVAL;
/* Must check if this is a shadow copy or not */
if (vma->flags & VMA_COW) {
; /* ... Fill this in. ... */
}
/* TODO:
* Flush the vma's pages back to their file. Perhaps add a dirty bit
* to the vma so that this can be completely avoided for clean vmas?
* For anon pages this is the swap file. For real file-backed pages
* its the real file. However, this can't be fully implemented yet since
* we don't have FS0 yet.
*/
return 0;
}
/*
* This releases a physical page struct from its owner and
* frees the page back to the page allocator.
*/
int page_release(struct page *page)
{
spin_lock(&page->lock);
page->count--;
BUG_ON(page->count < -1);
if (page->count == -1) {
/* Unlink the page from its owner's list */
list_del_init(&page->list);
/* Zero out the fields */
page->owner = 0;
page->flags = 0;
page->f_offset = 0;
page->virtual = 0;
/*
* No refs to page left, and since every physical memory page
* comes from the page allocator, we return it back.
*/
free_page((void *)page_to_phys(page));
}
spin_unlock(&page->lock);
return 0;
}
/*
* Freeing and unmapping of vma pages:
*
* For a vma that is about to be split, shrunk or destroyed, this function
* finds out about the physical pages in memory that represent the vma,
* reduces their refcount, and if they're unused, frees them back to the
* physical page allocator, and finally unmaps those corresponding virtual
* addresses from the unmapper task's address space. This sequence is
* somewhat a rewinding of the actions that the page fault handler takes
* when the vma was faulted by the process.
*/
int vma_release_pages(struct vm_area *vma, struct tcb *task,
unsigned long pfn_start, unsigned long pfn_end)
{
unsigned long f_start, f_end;
struct page *page, *n;
/* Assume vma->pfn_start is lower than or equal to pfn_start */
BUG_ON(vma->pfn_start > pfn_start);
/* Assume vma->pfn_end is higher or equal to pfn_end */
BUG_ON(vma->pfn_end < pfn_end);
/* Find the file offsets of the range to be freed. */
f_start = vma->f_offset + pfn_start - vma->pfn_start;
f_end = vma->f_offset + vma->pfn_end - pfn_end;
list_for_each_entry_safe(page, n, &vma->owner->page_cache_list, list) {
if (page->f_offset >= f_start && page->f_offset <= f_end) {
l4_unmap((void *)virtual(page), 1, task->tid);
page_release(page);
}
}
return 0;
}
int vma_unmap(struct vm_area **orig, struct vm_area **new,
unsigned long, unsigned long, struct tcb *);
/*
* This is called by every vma modifier function in vma_unmap(). This in turn
* calls vma_unmap recursively to modify the shadow vmas, the same way the
* actual vmas get modified. Only COW vmas would need to do this recursion
* and the max level of recursion is one, since only one level of shadows exist.
*/
int vma_unmap_shadows(struct vm_area *vma, struct tcb *task, unsigned long pfn_start,
unsigned long pfn_end)
{
struct vm_area *shadow, *n;
/* Now do all shadows */
list_for_each_entry_safe(shadow, n, &vma->shadow_list,
shadow_list) {
BUG_ON(!(vma->flags & VMA_COW));
if (shadow->pfn_start >= pfn_start &&
shadow->pfn_end <= pfn_end) {
struct vm_area *split_shadow;
/* This may result in shrink/destroy/split of the shadow */
vma_unmap(&shadow, &split_shadow, pfn_start, pfn_end, task);
if (shadow && split_shadow)
list_add_tail(&split_shadow->list,
&shadow->list);
/* FIXME: Is this all to be done here??? Find what to do here. */
BUG();
}
}
return 0;
}
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long f_offset,
struct vm_file *owner)
{
struct vm_area *vma;
/* Initialise new area */
if (!(vma = kzalloc(sizeof(struct vm_area))))
return 0;
vma->pfn_start = pfn_start;
vma->pfn_end = pfn_start + npages;
vma->flags = flags;
vma->f_offset = f_offset;
vma->owner = owner;
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->shadow_list);
return vma;
}
/* TODO: vma_destroy/shrink/split should also handle swap file modification */
/* Frees and unlinks a vma from its list. TODO: Add list locking */
int vma_destroy(struct vm_area *vma, struct tcb *task)
{
struct vm_area *shadow, *n;
/* Release the vma pages */
vma_release_pages(vma, task, vma->pfn_start, vma->pfn_end);
/* Free all shadows, if any. */
list_for_each_entry_safe(shadow, n, &vma->shadow_list, list) {
/* Release all shadow pages */
vma_release_pages(shadow, task, shadow->pfn_start, shadow->pfn_end);
list_del(&shadow->list);
kfree(shadow);
}
/* Unlink and free the vma itself */
list_del(&vma->list);
if (kfree(vma) < 0)
BUG();
return 0;
}
/* This splits a vma, splitter region must be in the *middle* of original vma */
struct vm_area *vma_split(struct vm_area *vma, struct tcb *task,
unsigned long pfn_start, unsigned long pfn_end)
{
struct vm_area *new, *shadow, *n;
/* Allocate an uninitialised vma first */
if (!(new = vma_new(0, 0, 0, 0, 0)))
return 0;
/*
* Some sanity checks to show that splitter range does end up
* producing two smaller vmas.
*/
BUG_ON(vma->pfn_start >= pfn_start || vma->pfn_end <= pfn_end);
/* Release the pages before modifying the original vma */
vma_release_pages(vma, task, pfn_start, pfn_end);
new->pfn_end = vma->pfn_end;
new->pfn_start = pfn_end;
new->f_offset = vma->f_offset + new->pfn_start - vma->pfn_start;
vma->pfn_end = pfn_start;
new->flags = vma->flags;
new->owner = vma->owner;
/* Modify the shadows accordingly first. They may
* split/shrink or get completely destroyed or stay still. */
vma_unmap_shadows(vma, task, pfn_start, pfn_end);
/*
* Now split the modified shadows list into two vmas:
* If the file was COW and its vma had split, vma_new would have
* a valid value and as such the shadows must be separated into
* the two new vmas according to which one they belong to.
*/
list_for_each_entry_safe(shadow, n, &vma->shadow_list,
shadow_list) {
BUG_ON(!(vma->flags & VMA_COW));
BUG_ON(!(new->flags & VMA_COW));
if (shadow->pfn_start >= new->pfn_start &&
shadow->pfn_end <= new->pfn_end) {
list_del_init(&shadow->list);
list_add(&shadow->list, &new->shadow_list);
} else
BUG_ON(!(shadow->pfn_start >= vma->pfn_start &&
shadow->pfn_end <= vma->pfn_end));
}
return new;
}
/*
* For written anonymous regions swapfile segments are allocated dynamically.
* when vma regions are modified these allocations must be re-adjusted.
* This call handles this adjustment as well as the vma.
*/
int vma_swapfile_realloc(struct vm_area *vma, unsigned long pfn_start,
unsigned long pfn_end)
{
/* TODO: Reslot in swapfile */
BUG();
return 0;
}
/* This shrinks the vma from *one* end only, either start or end */
int vma_shrink(struct vm_area *vma, struct tcb *task, unsigned long pfn_start,
unsigned long pfn_end)
{
unsigned long diff;
BUG_ON(pfn_start >= pfn_end);
/* FIXME: Shadows are currently buggy - TBD */
if (!list_empty(&vma->shadow_list)) {
BUG();
vma_swapfile_realloc(vma, pfn_start, pfn_end);
return 0;
}
/* Release the pages before modifying the original vma */
vma_release_pages(vma, task, pfn_start, pfn_end);
/* Shrink from the beginning */
if (pfn_start > vma->pfn_start) {
diff = pfn_start - vma->pfn_start;
vma->f_offset += diff;
vma->pfn_start = pfn_start;
/* Shrink from the end */
} else if (pfn_end < vma->pfn_end) {
diff = vma->pfn_end - pfn_end;
vma->pfn_end = pfn_end;
} else
BUG();
return vma_unmap_shadows(vma, task, pfn_start, pfn_end);
}
/*
* Unmaps the given region from a vma. Depending on the region and vma range,
* this may result in either shrinking, splitting or destruction of the vma.
*/
int vma_unmap(struct vm_area **actual, struct vm_area **split,
unsigned long pfn_start, unsigned long pfn_end, struct tcb *task)
{
struct vm_area *vma = *actual;
struct vm_area *vma_new = 0;
/* Split needed? */
if (vma->pfn_start < pfn_start && vma->pfn_end > pfn_end) {
if (!(vma_new = vma_split(vma, task, pfn_start, pfn_end)))
return -ENOMEM;
list_add_tail(&vma_new->list, &vma->list);
/* Shrink needed? */
} else if (((vma->pfn_start == pfn_start) && (vma->pfn_end > pfn_end))
|| ((vma->pfn_start < pfn_start) && (vma->pfn_end == pfn_end)))
vma_shrink(vma, task, pfn_start, pfn_end);
/* Destroy needed? */
else if ((vma->pfn_start >= pfn_start) && (vma->pfn_end <= pfn_end)) {
/* NOTE: VMA can't be referred after this point. */
vma_destroy(vma, task);
vma = 0;
} else
BUG();
/* Update actual pointers */
*actual = vma;
*split = vma_new;
return 0;
}
/* Unmaps given address range from its vma. Releases those pages in that vma. */
int do_munmap(void *vaddr, unsigned long size, struct tcb *task)
{
unsigned long npages = __pfn(size);
unsigned long pfn_start = __pfn(vaddr);
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma, *vma_new = 0;
int err;
/* Check if any such vma exists */
if (!(vma = find_vma((unsigned long)vaddr, &task->vm_area_list)))
return -EINVAL;
/*
* If end of the range is outside of the vma that has the start
* address, we ignore the rest and assume end is the end of that vma.
* TODO: Find out how posix handles this.
*/
if (pfn_end > vma->pfn_end) {
printf("%s: %s: Warning, unmap end 0x%x beyond vma range. "
"Ignoring.\n", __TASKNAME__, __FUNCTION__,
__pfn_to_addr(pfn_end));
pfn_end = vma->pfn_end;
}
if ((err = vma_unmap(&vma, &vma_new, pfn_start, pfn_end, task)) < 0)
return err;
#if 0
mod_phys_pages:
/* The stage where the actual pages are unmapped from the page tables */
pgtable_unmap:
/* TODO:
* - Find out if the vma is cow, and contains shadow vmas.
* - Remove and free shadow vmas or the real vma, or shrink them if applicable.
* - Free the swap file segment for the vma if vma is private (cow).
* - Reduce refcount for the in-memory pages.
* - If refcount is zero (they could be shared!), either add pages to some page
* cache, or simpler the better, free the actual pages back to the page allocator.
* - l4_unmap() the corresponding virtual region from the page tables.
*/
#endif
return 0;
}
static struct vm_area *
is_vma_mergeable(unsigned long pfn_start, unsigned long pfn_end,
unsigned int flags, struct vm_area *vma)
{
/* TODO:
* The swap implementation is too simple for now. The vmas on swap
* are stored non-sequentially, and adjacent vmas don't imply adjacent
* file position on swap. So at the moment merging swappable vmas
* doesn't make sense. But this is going to change in the future.
*/
if (vma->flags & VMA_COW) {
BUG();
/* FIXME: XXX: Think about this! */
}
/* Check for vma adjacency */
if ((vma->pfn_start == pfn_end) && (vma->flags == flags))
return vma;
if ((vma->pfn_end == pfn_start) && (vma->flags == flags))
return vma;
return 0;
}
/*
* Finds an unmapped virtual memory area for the given parameters. If it
* overlaps with an existing vma, it returns -1, if it is adjacent to an
* existing vma and the flags match, it returns the adjacent vma. Otherwise it
* returns 0.
*/
int find_unmapped_area(struct vm_area **existing, struct vm_file *file,
unsigned long pfn_start, unsigned long npages,
unsigned int flags, struct list_head *vm_area_head)
{
struct vm_area *vma;
unsigned long pfn_end = pfn_start + npages;
*existing = 0;
list_for_each_entry(vma, vm_area_head, list) {
/* Check overlap */
if ((vma->pfn_start <= pfn_start) &&
(pfn_start < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return -1; /* Overlap */
} if ((vma->pfn_start < pfn_end) &&
(pfn_end < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return -1; /* Overlap */
}
if (is_vma_mergeable(pfn_start, pfn_end, flags, vma)) {
*existing = vma;
return 0;
}
}
return 0;
}
/*
* Maps the given file with given flags at the given page offset to the given
* task's address space at the specified virtual memory address and length.
*
* The actual paging in/out of the file from/into memory pages is handled by
* the file's pager upon page faults.
*/
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages)
{
struct vm_area *vma;
unsigned long pfn_start = __pfn(map_address);
if (!mapfile) {
if (flags & VMA_ANON) {
mapfile = &devzero;
f_offset = 0;
} else
BUG();
} else if (pages > (__pfn(page_align_up(mapfile->length)) - f_offset)) {
printf("%s: Trying to map %d pages from page %d, "
"but file length is %d\n", __FUNCTION__, pages,
f_offset, __pfn(page_align_up(mapfile->length)));
return -EINVAL;
}
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__, map_address,
map_address + pages * PAGE_SIZE);
/* See if it overlaps or is mergeable to an existing vma. */
if (find_unmapped_area(&vma, mapfile, pfn_start, pages, flags,
&t->vm_area_list) < 0)
return -EINVAL; /* Indicates overlap. */
/* Mergeable vma returned? */
if (vma) {
if (vma->pfn_end == pfn_start)
vma->pfn_end = pfn_start + pages;
else {
vma->f_offset -= vma->pfn_start - pfn_start;
/* Check if adjusted yields the original */
BUG_ON(vma->f_offset != f_offset);
vma->pfn_start = pfn_start;
}
} else { /* Initialise new area */
if (!(vma = vma_new(pfn_start, pages, flags, f_offset,
mapfile)))
return -ENOMEM;
list_add(&vma->list, &t->vm_area_list);
}
return 0;
}

229
tasks/mm0/src/shm.c Normal file
View File

@@ -0,0 +1,229 @@
/*
* Copyright (C) 2007 Bahadir Balban
*
* Posix shared memory implementation
*/
#include <shm.h>
#include <stdio.h>
#include <task.h>
#include <mmap.h>
#include <l4/lib/string.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <lib/idpool.h>
#include <lib/vaddr.h>
#include <lib/spinlock.h>
#include <l4/api/errno.h>
#include <l4/lib/list.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include INC_GLUE(memlayout.h)
#include <posix/sys/ipc.h>
#include <posix/sys/shm.h>
#include <posix/sys/types.h>
/* The list of shared memory areas that are already set up and working */
static struct list_head shm_desc_list;
/* The single global in-memory swap file for shared memory segments */
/* Unique shared memory ids */
static struct id_pool *shm_ids;
/* Globally disjoint shm virtual address pool */
static struct id_pool *shm_vaddr_pool;
void shm_init()
{
INIT_LIST_HEAD(&shm_desc_list);
/* Initialise shm id pool */
shm_ids = id_pool_new_init(SHM_AREA_MAX);
/* Initialise the global shm virtual address pool */
vaddr_pool_init(shm_vaddr_pool, SHM_AREA_START, SHM_AREA_END);
}
/*
* TODO:
* Implement means to return back ipc results, i.e. sender always does ipc_sendrecv()
* and it blocks on its own receive queue. Server then responds back without blocking.
*
* Later on: mmap can be done using vm_areas and phsyical pages can be accessed by vm_areas.
*/
static int do_shmat(struct shm_descriptor *shm, void *shm_addr, int shmflg,
l4id_t tid)
{
struct tcb *task = find_task(tid);
int err;
if (!task) {
printf("%s:%s: Cannot find caller task with tid %d\n",
__TASKNAME__, __FUNCTION__, tid);
BUG();
}
/*
* Currently shared memory base addresses are the same among all
* processes for every unique shm segment. They line up easier on
* the shm swap file this way. Also currently shm_addr argument is
* ignored, and mm0 allocates shm segment addresses.
*/
if (shm->shm_addr)
shm_addr = shm->shm_addr;
else
shm_addr = vaddr_new(shm_vaddr_pool, __pfn(shm->size));
BUG_ON(!is_page_aligned(shm_addr));
/*
* mmap the area to the process as shared. Page fault handler would
* handle allocating and paging-in the shared pages.
*
* For anon && shared pages do_mmap() handles allocation of the
* shm swap file and the file offset for the segment. The segment can
* be identified because segment virtual address is globally unique
* per segment and its the same for all the system tasks.
*/
if ((err = do_mmap(0, 0, task, (unsigned long)shm_addr,
VM_READ | VM_WRITE | VMA_ANON | VMA_SHARED,
shm->size)) < 0) {
printf("do_mmap: Mapping shm area failed with %d.\n", err);
BUG();
} else
printf("%s: %s: Success.\n", __TASKNAME__, __FUNCTION__);
/* Now update the shared memory descriptor */
shm->refcnt++;
return 0;
}
void *sys_shmat(l4id_t requester, l4id_t shmid, void *shmaddr, int shmflg)
{
struct shm_descriptor *shm_desc, *n;
int err;
list_for_each_entry_safe(shm_desc, n, &shm_desc_list, list) {
if (shm_desc->shmid == shmid) {
if ((err = do_shmat(shm_desc, shmaddr,
shmflg, requester) < 0)) {
l4_ipc_return(err);
return 0;
} else
break;
}
}
l4_ipc_return(0);
return 0;
}
int do_shmdt(struct shm_descriptor *shm, l4id_t tid)
{
struct tcb *task = find_task(tid);
int err;
if (!task) {
printf("%s:%s: Internal error. Cannot find task with tid %d\n",
__TASKNAME__, __FUNCTION__, tid);
BUG();
}
if ((err = do_munmap(shm->shm_addr, shm->size, task)) < 0) {
printf("do_munmap: Unmapping shm segment failed with %d.\n",
err);
BUG();
}
return err;
}
int sys_shmdt(l4id_t requester, const void *shmaddr)
{
struct shm_descriptor *shm_desc, *n;
int err;
list_for_each_entry_safe(shm_desc, n, &shm_desc_list, list) {
if (shm_desc->shm_addr == shmaddr) {
if ((err = do_shmdt(shm_desc, requester) < 0)) {
l4_ipc_return(err);
return 0;
} else
break;
}
}
l4_ipc_return(0);
return 0;
}
static struct shm_descriptor *shm_new(key_t key)
{
/* It doesn't exist, so create a new one */
struct shm_descriptor *shm_desc;
if ((shm_desc = kzalloc(sizeof(struct shm_descriptor))) < 0)
return 0;
if ((shm_desc->shmid = id_new(shm_ids)) < 0)
return 0;
shm_desc->key = (int)key;
INIT_LIST_HEAD(&shm_desc->list);
list_add(&shm_desc->list, &shm_desc_list);
return shm_desc;
}
int sys_shmget(key_t key, int size, int shmflg)
{
struct shm_descriptor *shm_desc;
/* First check argument validity */
if (size > SHM_SHMMAX || size < SHM_SHMMIN) {
l4_ipc_return(-EINVAL);
return 0;
}
/*
* IPC_PRIVATE means create a no-key shm area, i.e. private to this
* process so that it would only share it with its descendants.
*/
if (key == IPC_PRIVATE) {
key = -1; /* Our meaning of no key */
if (!shm_new(key))
l4_ipc_return(-ENOSPC);
else
l4_ipc_return(0);
return 0;
}
list_for_each_entry(shm_desc, &shm_desc_list, list) {
if (shm_desc->key == key) {
/*
* Exclusive means create request
* on existing key should fail.
*/
if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
l4_ipc_return(-EEXIST);
else
/* Found it but do we have a size problem? */
if (shm_desc->size < size)
l4_ipc_return(-EINVAL);
else /* Return shmid of the existing key */
l4_ipc_return(shm_desc->shmid);
return 0;
}
}
/* Key doesn't exist and create set, so we create */
if (shmflg & IPC_CREAT)
if (!(shm_desc = shm_new(key)))
l4_ipc_return(-ENOSPC);
else
l4_ipc_return(shm_desc->shmid);
else /* Key doesn't exist, yet create isn't set, its an -ENOENT */
l4_ipc_return(-ENOENT);
return 0;
}

28
tasks/mm0/src/stack.c Normal file
View File

@@ -0,0 +1,28 @@
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memlayout.h)
#include <string.h>
/* The initial temporary stack used until memory is set up */
__attribute__ ((section("init.stack"))) char stack[4096];
extern unsigned long __stack[]; /* Linker defined */
/* Moves from temporary stack to where it should be in actual. */
void move_stack()
{
register unsigned int sp asm("sp");
register unsigned int fp asm("r11");
unsigned int stack_offset = (unsigned long)__stack - sp;
unsigned int frame_offset = (unsigned long)__stack - fp;
/* Copy current stack into new stack. NOTE: This might demand-page
* the new stack, but maybe that won't work. */
memcpy((void *)USER_AREA_END, __stack, stack_offset);
sp = USER_AREA_END - stack_offset;
fp = USER_AREA_END - frame_offset;
}

149
tasks/mm0/src/task.c Normal file
View File

@@ -0,0 +1,149 @@
/*
* Task management.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/lib/list.h>
#include <l4/api/thread.h>
#include INC_GLUE(memory.h)
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <lib/vaddr.h>
#include <task.h>
#include <kdata.h>
#include <kmalloc/kmalloc.h>
#include <string.h>
#include <vm_area.h>
#include <memory.h>
struct tcb_head {
struct list_head list;
int total; /* Total threads */
} tcb_head;
struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &tcb_head.list, list)
if (t->tid == tid)
return t;
return 0;
}
#if 0
void dump_tasks(void)
{
struct tcb *t;
list_for_each_entry(t, &tcb_head.list, list) {
printf("Task %s: id/spid: %d/%d\n", &t->name[0], t->tid, t->spid);
printf("Task vm areas:\n");
dump_vm_areas(t);
printf("Task swapfile:\n");
dump_task_swapfile(t);
}
}
#endif
void create_init_tcbs(struct initdata *initdata)
{
struct bootdesc *bd = initdata->bootdesc;
INIT_LIST_HEAD(&tcb_head.list);
tcb_head.total++;
for (int i = BOOTDESC_IMAGE_START; i < bd->total_images; i++) {
struct tcb *task = kzalloc(sizeof(struct tcb));
/* Ids will be acquired from the kernel */
task->tid = TASK_ID_INVALID;
task->spid = TASK_ID_INVALID;
task->swap_file = kzalloc(sizeof(struct vm_file));
task->swap_file->pager = &swap_pager;
vaddr_pool_init(task->swap_file_offset_pool, 0,
__pfn(TASK_SWAPFILE_MAXSIZE));
INIT_LIST_HEAD(&task->swap_file->page_cache_list);
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->vm_area_list);
list_add_tail(&task->list, &tcb_head.list);
}
}
int start_init_tasks(struct initdata *initdata)
{
struct tcb *task;
int err;
int i = BOOTDESC_IMAGE_START;
list_for_each_entry(task, &tcb_head.list, list) {
struct vm_file *file = &initdata->memfile[i++];
unsigned int sp = align(USER_AREA_END - 1, 8);
unsigned int pc = USER_AREA_START;
struct task_ids ids = { .tid = task->tid, .spid = task->spid };
/* mmap each task's physical image to task's address space. */
if ((err = do_mmap(file, 0, task, USER_AREA_START,
VM_READ | VM_WRITE | VM_EXEC,
__pfn(page_align_up(file->length)))) < 0) {
printf("do_mmap: failed with %d.\n", err);
goto error;
}
/* mmap each task's stack as single page anonymous memory. */
if ((err = do_mmap(0, 0, task, USER_AREA_END - PAGE_SIZE,
VM_READ | VM_WRITE | VMA_ANON, 1) < 0)) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
/* mmap each task's utcb as single page anonymous memory. */
if ((err = do_mmap(0, 0, task, (unsigned long)__L4_ARM_Utcb(),
VM_READ | VM_WRITE | VMA_ANON, 1) < 0)) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
printf("Creating new thread.\n");
/* Create the thread structures and address space */
if ((err = l4_thread_control(THREAD_CREATE, &ids)) < 0) {
printf("l4_thread_control failed with %d.\n", err);
goto error;
}
printf("New task with id: %d, space id: %d\n", ids.tid, ids.spid);
/* Use returned space and thread ids. */
task->tid = ids.tid;
task->spid = ids.spid;
/* Set up the task's thread details, (pc, sp, pager etc.) */
if ((err = l4_exchange_registers(pc, sp, self_tid(), task->tid) < 0)) {
printf("l4_exchange_registers failed with %d.\n", err);
goto error;
}
printf("Starting task with id %d\n", task->tid);
/* Start the thread */
if ((err = l4_thread_control(THREAD_RUN, &ids) < 0)) {
printf("l4_thread_control failed with %d\n");
goto error;
}
}
return 0;
error:
BUG();
}
void init_pm(struct initdata *initdata)
{
create_init_tcbs(initdata);
start_init_tasks(initdata);
}

49
tasks/mm0/src/zpage.c Normal file
View File

@@ -0,0 +1,49 @@
/*
* Handling of the special zero page.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <memory.h>
#include <mm/alloc_page.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <string.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <l4/generic/space.h>
#include <arch/mm.h>
static void *zpage_p;
static struct page *zpage;
void init_zero_page(void)
{
void *zpage_v;
zpage_p = alloc_page(1);
zpage = phys_to_page(zpage_p);
/* Map it to self */
zpage_v = l4_map_helper(zpage_p, 1);
/* Zero it */
memset(zpage_v, 0, PAGE_SIZE);
/* Unmap it */
l4_unmap_helper(zpage_v, 1);
/* Update page struct. All other fields are zero */
zpage->count++;
}
void *get_zero_page(void)
{
zpage->count++;
return zpage_p;
}
void put_zero_page(void)
{
zpage->count--;
BUG_ON(zpage->count < 0);
}