Initial commit

This commit is contained in:
Bahadir Balban
2008-01-13 13:53:52 +00:00
commit e2b791a3d8
789 changed files with 95825 additions and 0 deletions

135
tasks/mm0/SConstruct Normal file
View File

@@ -0,0 +1,135 @@
#
# User space application build script
#
# Copyright (C) 2007 Bahadir Balban
#
import os
import sys
import shutil
from string import split
from os.path import join
from glob import glob
task_name = "mm0"
# The root directory of the repository where this file resides:
project_root = "../.."
tools_root = join(project_root, "tools")
prev_image = join(project_root, "build/start.axf")
libs_path = join(project_root, "libs")
ld_script = "include/linker.lds"
physical_base_ld_script = "include/physical_base.lds"
# libc paths:
libc_variant = "userspace"
libc_libpath = join(libs_path, "c/build/%s" % libc_variant)
libc_incpath = join(libc_libpath, "include")
libc_crt0 = join(libs_path, "c/build/crt/sys-userspace/arch-arm/crt0.o")
libc_name = "c-%s" % libc_variant
# libl4 paths:
libl4_path = "../libl4"
libl4_incpath = join(libl4_path, "include")
# libposix paths:
libposix_path = "../libposix"
libposix_incpath = join(libposix_path, "include")
#libmem paths:
libmem_path = "../libmem"
libmem_incpath = "../libmem"
# kernel paths:
kernel_incpath = join(project_root, "include")
# Kernel config header.
config_h = join(project_root, "include/l4/config.h")
# If crt0 is in its library path, it becomes hard to link with it.
# For instance the linker script must use an absolute path for it.
def copy_crt0(source, target, env):
os.system("cp " + str(source[0]) + " " + str(target[0]))
def get_physical_base(source, target, env):
os.system(join(tools_root, "pyelf/readelf.py --first-free-page " + \
prev_image +" >> " + physical_base_ld_script))
# The kernel build environment:
env = Environment(CC = 'arm-none-linux-gnueabi-gcc',
# We don't use -nostdinc because sometimes we need standard headers,
# such as stdarg.h e.g. for variable args, as in printk().
CCFLAGS = ['-g', '-nostdlib', '-ffreestanding', '-std=gnu99', '-Wall', '-Werror'],
LINKFLAGS = ['-nostdlib', '-T' + ld_script, "-L" + libc_libpath, "-L" + libl4_path, "-L" + libmem_path],
ASFLAGS = ['-D__ASSEMBLY__'],
PROGSUFFIX = '.axf', # The suffix to use for final executable
ENV = {'PATH' : os.environ['PATH']}, # Inherit shell path
LIBS = [libc_name, 'libl4', 'libmm', 'libmc', 'libkm', \
'gcc', libc_name], # libgcc.a - This is required for division routines.
CPPFLAGS = "-D__USERSPACE__",
CPPPATH = ['#include', libl4_incpath, libc_incpath, kernel_incpath, \
libmem_incpath, libposix_incpath])
def extract_arch_subarch_plat(config_header):
'''
From the autogenerated kernel config.h, extracts platform, archictecture,
subarchitecture information. This is used to include the relevant headers
from the kernel directories.
'''
arch = None
subarch = None
plat = None
if not os.path.exists(config_header):
print "\n\nconfig.h does not exist. "\
"Please run: `scons configure' first\n\n"
sys.exit()
f = open(config_h, "r")
while True:
line = f.readline()
if line == "":
break
parts = split(line)
if len(parts) > 0:
if parts[0] == "#define":
if parts[1] == "__ARCH__":
arch = parts[2]
elif parts[1] == "__PLATFORM__":
plat = parts[2]
elif parts[1] == "__SUBARCH__":
subarch = parts[2]
f.close()
if arch == None:
print "Error: No config symbol found for architecture"
sys.exit()
if subarch == None:
print "Error: No config symbol found for subarchitecture"
sys.exit()
if plat == None:
print "Error: No config symbol found for platform"
sys.exit()
return arch, subarch, plat
def create_symlinks(arch):
arch_path = "include/arch"
arch_path2 ="src/arch"
if os.path.exists(arch_path):
os.system("rm %s" % (arch_path))
os.system("ln -s %s %s" % ("arch-" + arch, arch_path))
if os.path.exists(arch_path2):
os.system("rm %s" % (arch_path2))
os.system("ln -s %s %s" % ("arch-" + arch, arch_path2))
arch, subarch, plat = extract_arch_subarch_plat(config_h)
create_symlinks(arch) # Creates symlinks to architecture specific directories.
src = [glob("src/*.c"), glob("src/lib/*.c"), glob("*.c"), glob("src/arch/*.c")]
objs = env.Object(src)
physical_base = env.Command(physical_base_ld_script, prev_image, get_physical_base)
crt0_copied = env.Command("crt0.o", libc_crt0, copy_crt0)
task = env.Program(task_name, objs + [crt0_copied])
env.Alias(task_name, task)
env.Depends(task, physical_base)

Binary file not shown.

1
tasks/mm0/include/arch Symbolic link
View File

@@ -0,0 +1 @@
arch-arm

View File

@@ -0,0 +1,16 @@
#ifndef __INITTASK_ARCH_MM_H__
#define __INITTASK_ARCH_MM_H__
#include <arch/offsets.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include <vm_area.h>
#define INITTASK_ADDR(x) ((x >= INITTASK_AREA_START) && (x < INITTASK_AREA_END))
struct fault_data;
unsigned int vm_prot_flags(pte_t pte);
void set_generic_fault_params(struct fault_data *fault);
#endif /* __INITTASK_ARCH_MM_H__ */

View File

@@ -0,0 +1,9 @@
#ifndef __INITTASK_ARCH_OFFSETS_H__
#define __INITTASK_ARCH_OFFSETS_H__
#define INITTASK_AREA_START 0xE0000000
#define INITTASK_AREA_END 0xF0000000
#define INITTASK_OFFSET INITTASK_AREA_START
#endif

9
tasks/mm0/include/init.h Normal file
View File

@@ -0,0 +1,9 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __INIT_H__
#define __INIT_H__
void initialise(void);
#endif /* __INIT_H__ */

31
tasks/mm0/include/kdata.h Normal file
View File

@@ -0,0 +1,31 @@
/*
* Data that comes from the kernel.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __MM_KDATA_H__
#define __MM_KDATA_H__
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/generic/physmem.h>
#include INC_PLAT(offsets.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(bootdesc.h)
#include <vm_area.h>
#define BOOTDESC_IMAGE_START 1
struct initdata {
struct bootdesc *bootdesc;
struct vm_file *memfile;
struct page_bitmap page_map;
};
extern struct initdata initdata;
int request_initdata(struct initdata *i);
#endif /* __MM_KDATA_H__ */

View File

@@ -0,0 +1,43 @@
#ifndef __LIB_BIT_H__
#define __LIB_BIT_H__
#include <l4lib/types.h>
unsigned int __clz(unsigned int bitvector);
int find_and_set_first_free_bit(u32 *word, unsigned int lastbit);
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits);
int check_and_clear_bit(u32 *word, int bit);
int check_and_clear_contig_bits(u32 *word, int first, int nbits);
/* Set */
static inline void setbit(unsigned int *w, unsigned int flags)
{
*w |= flags;
}
/* Clear */
static inline void clrbit(unsigned int *w, unsigned int flags)
{
*w &= ~flags;
}
/* Test */
static inline int tstbit(unsigned int *w, unsigned int flags)
{
return *w & flags;
}
/* Test and clear */
static inline int tstclr(unsigned int *w, unsigned int flags)
{
int res = tstbit(w, flags);
clrbit(w, flags);
return res;
}
#endif /* __LIB_BIT_H__ */

View File

@@ -0,0 +1,17 @@
#ifndef __MM0_IDPOOL_H__
#define __MM0_IDPOOL_H__
#include <lib/bit.h>
struct id_pool {
int nwords;
u32 bitmap[];
};
struct id_pool *id_pool_new_init(int mapsize);
int id_new(struct id_pool *pool);
int id_del(struct id_pool *pool, int id);
int ids_new_contiguous(struct id_pool *pool, int numids);
int ids_del_contiguous(struct id_pool *pool, int first, int numids);
#endif /* __MM0_IDPOOL_H__ */

View File

@@ -0,0 +1,17 @@
/*
* Fake spinlock for future multi-threaded mm0
*/
#ifndef __MM0_SPINLOCK_H__
#define __MM0_SPINLOCK_H__
struct spinlock {
int lock;
};
static inline void spin_lock_init(struct spinlock *s) { }
static inline void spin_lock(struct spinlock *s) { }
static inline void spin_unlock(struct spinlock *s) { }
#endif /* __MM0_SPINLOCK_H__ */

View File

@@ -0,0 +1,17 @@
/*
* Virtual address allocation pool (for shm)
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __VADDR_H__
#define __VADDR_H__
#include <lib/idpool.h>
void vaddr_pool_init(struct id_pool *pool, unsigned long start,
unsigned long end);
void *vaddr_new(struct id_pool *pool, int npages);
int vaddr_del(struct id_pool *, void *vaddr, int npages);
#endif /* __VADDR_H__ */

View File

@@ -0,0 +1,40 @@
/*
* Simple linker script for userspace or svc tasks.
*
* Copyright (C) 2007 Bahadir Balban
*/
/*
* The only catch with this linker script is that everything
* is linked starting at virtual_base, and loaded starting
* at physical_base. virtual_base is the predefined region
* of virtual memory for userland applications. physical_base
* is determined at build-time, it is one of the subsequent pages
* that come after the kernel image's load area.
*/
/* INITTASK_AREA_START, see memlayout.h */
virtual_base = 0xE0000000;
INCLUDE "include/physical_base.lds"
/* physical_base = 0x228000; */
offset = virtual_base - physical_base;
ENTRY(_start)
SECTIONS
{
. = virtual_base;
_start_text = .;
.text : AT (ADDR(.text) - offset) { crt0.o(.text) *(.text) }
/* rodata is needed else your strings will link at physical! */
.rodata : AT (ADDR(.rodata) - offset) { *(.rodata) }
.rodata1 : AT (ADDR(.rodata1) - offset) { *(.rodata1) }
.data : AT (ADDR(.data) - offset) { *(.data) }
_start_init = .;
.init : AT (ADDR(.init) - offset) { *(.init.stack) }
. = ALIGN(8);
__stack = .; /* This is the preallocated boot stack */
_end_init = .;
.bss : AT (ADDR(.bss) - offset) { *(.bss) }
_end = .;
}

View File

@@ -0,0 +1,32 @@
/*
* Physical page descriptor
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __MEMORY_H__
#define __MEMORY_H__
#include <vm_area.h>
#include <kdata.h>
struct membank {
unsigned long start;
unsigned long end;
unsigned long free;
struct page *page_array;
};
extern struct membank membank[];
extern struct vm_file *swap_file;
void init_mm_descriptors(struct page_bitmap *page_map,
struct bootdesc *bootdesc, struct membank *membank);
void init_physmem(struct initdata *initdata, struct membank *membank);
void init_zero_page(void);
void *get_zero_page(void);
void put_zero_page(void);
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages);
#endif /* __MEMORY_H__ */

17
tasks/mm0/include/mmap.h Normal file
View File

@@ -0,0 +1,17 @@
/*
* Prototypes for mmap/munmap functions that do the actual work.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __MM0_MMAP_H__
#define __MM0_MMAP_H__
#include <task.h>
#include <vm_area.h>
int do_munmap(void *vaddr, unsigned long size, struct tcb *task);
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages);
#endif /* __MM0_MMAP_H__ */

View File

@@ -0,0 +1,17 @@
/*
* The next free p_align'ed LMA base address
*
* p_align = 0x8000
*
* Recap from ELF spec: p_align: Loadable process segments must have
* congruent values for p_vaddr and p_offset, modulo the page size.
* This member gives the value to which the segments are aligned in
* memory and in the file. Values 0 and 1 mean that no alignment is
* required. Otherwise, p_align should be a positive, integral power
* of 2, and p_addr should equal p_offset, modulo p_align.
* This essentially means next available address must be aligned at
* p_align, rather than the page_size, which one (well, I) would
* normally expect.
*/
physical_base = 0x20800;

35
tasks/mm0/include/shm.h Normal file
View File

@@ -0,0 +1,35 @@
#ifndef __SHM_H__
#define __SHM_H__
#include <l4/api/space.h>
#include <l4/lib/list.h>
#include <l4/macros.h>
#include <l4lib/types.h>
struct shm_descriptor {
int key; /* IPC key supplied by user task */
l4id_t shmid; /* SHM area id, allocated by mm0 */
struct list_head list; /* SHM list, used by mm0 */
struct vm_file *owner;
void *shm_addr; /* The virtual address for segment. */
unsigned long size; /* Size of the area */
unsigned int flags;
int refcnt;
};
#define SHM_AREA_MAX 64 /* Up to 64 shm areas */
/* Up to 10 pages per area, and at least 1 byte (implies 1 page) */
#define SHM_SHMMIN 1
#define SHM_SHMMAX (PAGE_SIZE * 10)
/*
* NOTE: This flags the unique shm vaddr pool. If its not globally unique
* and shm areas are cached, on ARMv5 cache aliasing occurs.
*/
#define SHM_DISJOINT_VADDR_POOL
/* Initialises shared memory bookkeeping structures */
void shm_init();
#endif /* __SHM_H__ */

View File

@@ -0,0 +1,45 @@
/*
* Copyright (C) 2007 Bahadir Balban
*
* MM0 Posix system call prototypes and structure
* definitions for converting data in message registers
* into system call argument format.
*/
#ifndef __MM0_SYSARGS_H__
#define __MM0_SYSARGS_H__
#include <sys/types.h>
/* For reading argument data from a system call */
struct sys_mmap_args {
void *start;
size_t length;
int prot;
int flags;
int fd;
off_t offset;
};
int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
int flags, int fd, off_t offset);
struct sys_shmat_args {
l4id_t shmid;
const void *shmaddr;
int shmflg;
};
void *sys_shmat(l4id_t requester, l4id_t shmid, const void *shmaddr, int shmflg);
int sys_shmdt(l4id_t requester, const void *shmaddr);
struct sys_shmget_args {
key_t key;
int size;
int shmflg;
};
int sys_shmget(key_t key, int size, int shmflg);
#endif /* __MM0_SYSARGS_H__ */

70
tasks/mm0/include/task.h Normal file
View File

@@ -0,0 +1,70 @@
/*
* Thread control block.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __TASK_H__
#define __TASK_H__
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memlayout.h)
#include <l4/lib/list.h>
#include <l4lib/types.h>
#include <l4lib/utcb.h>
#define __TASKNAME__ "mm0"
/* Allow per-task anonymous memory to grow as much as 1 MB for now. */
#define TASK_SWAPFILE_MAXSIZE SZ_1MB
struct vm_file;
/* Stores all task information that can be kept in userspace. */
struct tcb {
/* Task list */
struct list_head list;
/* Name of the task */
char name[16];
/* Task ids */
int tid;
int spid;
/* Related task ids */
unsigned int pagerid; /* Task's pager */
/* Program segment marks */
unsigned long text_start;
unsigned long text_end;
unsigned long data_start;
unsigned long data_end;
unsigned long bss_start;
unsigned long bss_end;
unsigned long stack_start;
unsigned long stack_end; /* Exclusive of last currently mapped page */
unsigned long heap_start;
unsigned long heap_end; /* Exclusive of last currently mapped page */
/* Virtual memory areas */
struct list_head vm_area_list;
/* Per-task swap file for now */
struct vm_file *swap_file;
/* Pool to generate swap file offsets for fileless anonymous regions */
struct id_pool *swap_file_offset_pool;
};
struct tcb *find_task(int tid);
struct initdata;
void init_pm(struct initdata *initdata);
int start_init_tasks(struct initdata *initdata);
void dump_tasks(void);
/* Used by servers that have a reference to tcbs (e.g. a pager) */
#define current ((struct ktcb *)__L4_ARM_Utcb()->usr_handle)
#endif /* __TASK_H__ */

128
tasks/mm0/include/vm_area.h Normal file
View File

@@ -0,0 +1,128 @@
/*
* Virtual memory area descriptors. No page cache yet.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __VM_AREA_H__
#define __VM_AREA_H__
#include <stdio.h>
#include <task.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <arch/mm.h>
#include <lib/spinlock.h>
/* Protection flags */
#define VM_NONE (1 << 0)
#define VM_READ (1 << 1)
#define VM_WRITE (1 << 2)
#define VM_EXEC (1 << 3)
#define VM_PROT_MASK (VM_READ | VM_WRITE | VM_EXEC)
#define VM_SWAPPED (1 << 4)
/* VMA flags */
#define VMA_SHARED (1 << 3)
/* VMA that's not file-backed, always ZI */
#define VMA_ANON (1 << 4)
/* Private copy of a file VMA, can be ZI */
#define VMA_COW (1 << 5)
/* This marks shadow vmas */
#define VMA_SHADOW (1 << 6)
struct page {
int count; /* Refcount */
struct spinlock lock; /* Page lock. */
struct list_head list; /* For list of a file's in-memory pages */
unsigned long virtual; /* If refs >1, first mapper's virtual address */
struct vm_file *owner; /* The file it belongs to */
unsigned int flags; /* Flags associated with the page. */
unsigned long f_offset; /* The offset page resides in its owner */
};
extern struct page *page_array;
#define page_refcnt(x) ((x)->count + 1)
#define virtual(x) ((x)->virtual)
#define phys_to_page(x) (page_array + __pfn(x))
#define page_to_phys(x) __pfn_to_addr((((void *)x) - \
(void *)page_array) \
/ sizeof(struct page))
/* Fault data specific to this task + ptr to kernel's data */
struct fault_data {
fault_kdata_t *kdata; /* Generic data flonged by the kernel */
unsigned int reason; /* Generic fault reason flags */
unsigned int address; /* Aborted address */
struct vm_area *vma; /* Inittask-related fault data */
struct tcb *task; /* Inittask-related fault data */
};
struct vm_pager_ops {
void (*read_page)(struct fault_data *f, void *);
void (*write_page)(struct fault_data *f, void *);
};
/* Describes the pager task that handles a vm_area. */
struct vm_pager {
struct vm_pager_ops ops; /* The ops the pager does on area */
};
/*
* TODO: Since there's no vfs yet, an inode's i_addr field is the
* virtual memory address of a file which uniquely identifies that file.
*/
struct inode {
unsigned long i_addr; /* The unique, global resource id. */
};
/*
* Describes the in-memory representation of a file. This could
* point at a file or another resource, e.g. a device area or swapper space.
*/
struct vm_file {
struct inode inode;
unsigned long length;
/* This is the cache of physical pages that this file has in memory. */
struct list_head page_cache_list;
struct vm_pager *pager;
};
/*
* Describes a virtually contiguous chunk of memory region in a task. It covers
* a unique virtual address area within its task, meaning that it does not
* overlap with other regions in the same task. The region could be backed by a
* file or various other resources. This is managed by the region's pager.
*/
struct vm_area {
struct list_head list; /* Vma list */
struct list_head shadow_list; /* Head for shadow list. See fault.c */
unsigned long pfn_start; /* Region start virtual pfn */
unsigned long pfn_end; /* Region end virtual pfn, exclusive */
unsigned long flags; /* Protection flags. */
unsigned long f_offset; /* File offset in pfns */
struct vm_file *owner; /* File that backs the area. */
};
static inline struct vm_area *find_vma(unsigned long addr,
struct list_head *vm_area_list)
{
struct vm_area *vma;
unsigned long pfn = __pfn(addr);
list_for_each_entry(vma, vm_area_list, list)
if ((pfn >= vma->pfn_start) && (pfn < vma->pfn_end))
return vma;
return 0;
}
/* Pagers */
extern struct vm_pager default_file_pager;
extern struct vm_pager swap_pager;
/* Main page fault entry point */
void page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);
#endif /* __VM_AREA_H__ */

113
tasks/mm0/main.c Normal file
View File

@@ -0,0 +1,113 @@
/*
* mm0. Pager for all tasks.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <init.h>
#include <l4lib/arch/message.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <l4lib/kip.h>
#include <l4lib/utcb.h>
#include <l4lib/ipcdefs.h>
#include <l4/api/thread.h>
#include <l4/api/space.h>
#include <l4/api/ipc.h>
#include <shm.h>
#include <task.h>
#include <vm_area.h>
#include <syscalls.h>
/* FIXME:LOCKING:FIXME:LOCKING:FIXME:LOCKING
* NOTE: For multithreadded MM0, not suprisingly, we need locking on
* vmas, vm_files, and all global variables. Also at times, the per-task
* lists of items (e.g. vmas) must be entirely locked. Pages already have
* locking.
*/
void handle_requests(void)
{
/* Generic ipc data */
u32 mr[MR_UNUSED_TOTAL];
l4id_t sender;
u32 tag;
int err;
printf("%s: Initiating ipc.\n", __TASKNAME__);
if ((err = l4_receive(L4_ANYTHREAD)) < 0) {
printf("%s: %s: IPC Error: %d. Quitting...\n", __TASKNAME__,
__FUNCTION__, err);
BUG();
}
/* Syslib conventional ipc data which uses first few mrs. */
tag = l4_get_tag();
sender = l4_get_sender();
/* Read mrs not used by syslib */
for (int i = 0; i < MR_UNUSED_TOTAL; i++)
mr[i] = read_mr(i);
switch(tag) {
case L4_IPC_TAG_WAIT:
/*
* A thread that wants to sync with us would have
* started here.
*/
printf("%s: Synced with waiting thread.\n", __TASKNAME__);
break;
case L4_IPC_TAG_PFAULT:
/* Handle page fault. */
page_fault_handler(sender, (fault_kdata_t *)&mr[0]);
break;
case L4_IPC_TAG_SHMGET: {
struct sys_shmget_args *args = (struct sys_shmget_args *)&mr[0];
sys_shmget(args->key, args->size, args->shmflg);
break;
}
case L4_IPC_TAG_SHMAT: {
struct sys_shmat_args *args = (struct sys_shmat_args *)&mr[0];
sys_shmat(sender, args->shmid, args->shmaddr, args->shmflg);
break;
}
case L4_IPC_TAG_SHMDT:
sys_shmdt(sender, (void *)mr[0]);
break;
case L4_IPC_TAG_MMAP: {
struct sys_mmap_args *args = (struct sys_mmap_args *)&mr[0];
BUG(); /* FIXME: There are 8 arguments to ipc whereas there are 7 mrs available. Fix this by increasing MRs to 8 ??? */
sys_mmap(sender, args->start, args->length, args->prot, args->flags, args->fd, args->offset);
break;
}
case L4_IPC_TAG_MUNMAP: {
/* TODO: Use arg struct instead */
sys_munmap(sender, (void *)mr[0], (int)mr[1]);
break;
}
case L4_IPC_TAG_MSYNC: {
/* TODO: Use arg struct instead */
sys_msync(sender, (void *)mr[0], (int)mr[1], (int)mr[2]);
break;
}
default:
printf("%s: Unrecognised ipc tag (%d)"
"received. Ignoring.\n", __TASKNAME__, mr[MR_TAG]);
}
}
void main(void)
{
/* Initialise the memory, server tasks, mmap and start them. */
initialise();
while (1) {
handle_requests();
}
}

258
tasks/mm0/mmap.extra.c.code Normal file
View File

@@ -0,0 +1,258 @@
#if 0
void shm_request_handler(struct shm_request *request, struct id_pool **shm_ids)
{
struct shm_descriptor *pending, *new;
struct shm_kdata *kdata;
if (request->type == SHM_SENDER) {
list_for_each_entry(pending, &shm_pending_list, list) {
kdata = &pending->kdata;
/*
* The receiver request should have set this, and also
* few other parameters should match with this request.
*/
if (kdata->receiver == request->pair &&
kdata->npages == request->npages &&
kdata->sender == request->self) {
/* Fill in rest of the incomplete information */
kdata->send_pfn = request->pfn;
/* Allocate a new id for the shm setup */
pending->shmid = id_new(*shm_ids);
/* Add it to completed shm area list */
list_del(&pending->list);
list_add(&pending->list, &shm_desc_list);
/* Arrange the actual shm setup with the kernel */
do_shm_setup(pending);
return;
}
}
/*
* If no matching pending shm descriptor is found, a new one
* should be allocated.
*/
new = kzalloc(sizeof(struct shm_descriptor));
kdata = &new->kdata;
/* Fill in all information available from the request */
kdata->sender = request->self;
kdata->receiver = request->pair;
kdata->send_pfn = request->pfn;
kdata->npages = request->npages;
/* Add the descriptor to pending list. */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &shm_pending_list);
} else if (request->type == SHM_RECEIVER) {
list_for_each_entry(pending, &shm_pending_list, list) {
kdata = &pending->kdata;
/*
* The sender request should have already setup, and
* few other parameters should match with this request.
*/
if (kdata->receiver == request->self &&
kdata->npages == request->npages &&
kdata->sender == request->pair) {
/* Fill in rest of the incomplete information */
kdata->recv_pfn = request->pfn;
/* Allocate a new id for the shm setup */
pending->shmid = id_new(*shm_ids);
list_del(&pending->list);
/* Add it to completed shm area list */
list_add(&pending->list, &shm_desc_list);
/* Arrange the actual shm setup with the kernel */
do_shm_setup(pending);
return;
}
}
/*
* If no matching pending shm descriptor is found, a new one
* should be allocated.
*/
new = kzalloc(sizeof(struct shm_descriptor));
kdata = &new->kdata;
/* Fill in all information available from the request */
kdata->sender = request->pair;
kdata->receiver = request->self;
kdata->recv_pfn = request->pfn;
kdata->npages = request->npages;
/* Add the descriptor to pending list. */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &shm_pending_list);
}
}
#endif
/* Original incomplete untested code for do_munmap */
/* Unmaps given address from its vma. Releases those pages in that vma. */
int do_munmap(unsigned long addr, unsigned long size, struct tcb *task)
{
unsigned long npages = __pfn(size);
unsigned long pfn_start = __pfn(addr);
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma = 0, *shadow = 0, *vma_new = 0, *shadow_new = 0;
struct list_head *n;
if (!(vma = find_vma(addr, &task->vm_area_list)))
return -EINVAL;
/* Do the real file's vma. Split needed? */
if (vma->pfn_start < pfn_start && vma->pfn_end > pfn_end) {
if (!(vma_new = kzalloc(sizeof(struct vm_area))))
return -ENOMEM;
vma = vma_split(vma, vma_new, pfn_start, pfn_end);
INIT_LIST_HEAD(&vma_new->list);
INIT_LIST_HEAD(&vma_new->shadow_list);
list_add(&vma_new->list, &vma->list);
/* Shrink needed? */
} else if (((vma->pfn_start == pfn_start) && (vma->pfn_end > pfn_end))
|| ((vma->pfn_start < pfn_start) && (vma->pfn_end == pfn_end)))
vma = vma_shrink(vma, pfn_start, pfn_end);
/* Destroy needed? */
else if ((vma->pfn_start == pfn_start) && (vma->pfn_end == pfn_end)) {
/* NOTE: VMA can't be referred after this point. */
vma_destroy(vma);
goto pgtable_unmap;
} else
BUG();
/* Sort out the shadows, if any. non-cow mappings would skip this. */
list_for_each_entry(shadow, vma->shadow_list, shadow_list) {
/* Split needed? */
if (shadow->pfn_start < pfn_start && shadow->pfn_end > pfn_end) {
shadow_new = kzalloc(sizeof(struct vm_area));
vma = vma_split(shadow, shadow_new, pfn_start, pfn_end);
INIT_LIST_HEAD(&shadow_new->list);
list_add_tail(&shadow_new->list, &shadow->list);
/* Destroy needed? */
} else if ((shadow->pfn_start == pfn_start) && (shadow->pfn_end == pfn_end)) {
/* NOTE: vma can't be referred after this point. */
vma_destroy(shadow);
/* Shrink needed? */
} else if (((shadow->pfn_start == pfn_start) && (shadow->pfn_end > pfn_end))
|| ((shadow->pfn_start < pfn_start) && (shadow->pfn_end == pfn_end)))
shadow = vma_shrink(shadow, pfn_start, pfn_end);
else
BUG();
}
/*
* If the real file was COW and its vma had split, the shadows must be
* separated into the two new vmas according to which one they
* belong to.
*/
if (vma_new)
list_for_each_entry_safe(shadow, n, vma->shadow_list, shadow_list) {
if (shadow->pfn_start >= vma_new->pfn_start &&
shadow->pfn_end <= vma_new->pfn_end) {
list_del_init(&shadow->list);
list_add(&shadow->list, &vma_new->shadow_list);
} else
BUG_ON(!(shadow->pfn_start >= vma->pfn_start &&
shadow->pfn_end <= vma->pfn_end));
}
/* The stage where the actual pages are unmapped from the page tables */
pgtable_unmap:
/* TODO:
* - Find out if the vma is cow, and contains shadow vmas.
* - Remove and free shadow vmas or the real vma, or shrink them if applicable.
* - Free the swap file segment for the vma if vma is private (cow).
* - Reduce refcount for the in-memory pages.
* - If refcount is zero (they could be shared!), either add pages to some page
* cache, or simpler the better, free the actual pages back to the page allocator.
* - l4_unmap() the corresponding virtual region from the page tables.
*/
}
/*
* TODO: UNUSED: This used to be used during mmap to map swap for anon
* areas.
* Now same code might be used *during swapping* to set up the swapfile
* for the very same areas. Also the fault handler might find the swap
* slot on the file using info from this code.
*
* For an anonymous and/or private region this provides a per-task swap
* file for backing. For shared memory it attaches shm regions to a
* global shm swap file.
*/
struct vm_file *setup_swap_file(unsigned long map_address,
unsigned int flags, unsigned long *f_offset,
struct tcb *t, int pages)
{
struct vm_file *swap_file;
unsigned long shm_f_offset;
BUG_ON(!(flags & VMA_ANON))
BUG_ON(!is_page_aligned(map_address));
/*
* All anon shared memory is kept on a single global swap file. Shm
* addresses are globally the same among processes, so the file mapping
* is a function of shm segment address.
*/
if (flags & VMA_SHARED) {
swap_file = shm_swap_file;
*file = swap_file;
/*
* The file offset is the shm segment's page offset in the shm
* virtual region, which is unique among all processes.
*/
*f_offset = __pfn(map_address) - SHM_AREA_START;
/*
* Extend the file if this shm vaddr lies beyond file end.
*/
if (swap_file->length < __pfn_to_addr(*f_offset + pages))
swap_file->length = __pfn_to_addr(*f_offset + npages);
/* Regular non-shareable anonymous regions */
} else {
swap_file = t->swap_file;
/*
* Anonymous vmas are mapped into process' swap during mmap.
* Copy-on-write vmas are mapped into swap as shadow vmas when
* they get copy-on-write'ed.
*/
*file = swap_file;
BUG_ON(!is_page_aligned(swap_file->length));
/*
* vmas map into the next available swap slot. The whole vma is
* mapped so that it has a contiguous existence on swap file.
* Swap slots are allocated by the per-task swap offset pool.
*/
*f_offset = vaddr_pool_new(task->swap_file_offset_pool, pages);
/* If new offset is greater than current file size, update. */
if (__pfn_to_addr(*f_offset + pages) > swap_file->length)
swap_file->length = __pfn_to_addr(*f_offset + pages);
BUG_ON(swap_file->length > TASK_SWAPFILE_MAXSIZE);
}
printf("Set up swapfile for anon%svma @ pfn offset %d. ",
(flags & VMA_SHARED) ? "/shared " : " ",
"Swap file size: %d bytes.\n", *f_offset, swap_file->length);
return 0;
}

BIN
tasks/mm0/src/.scons14756 Normal file

Binary file not shown.

1
tasks/mm0/src/arch Symbolic link
View File

@@ -0,0 +1 @@
arch-arm

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2007 Bahadir Balban
*/
#include <arch/mm.h>
/* Extracts generic protection flags from architecture-specific pte */
unsigned int vm_prot_flags(pte_t pte)
{
unsigned int vm_prot_flags = 0;
unsigned int rw_flags = __MAP_USR_RW_FLAGS & PTE_PROT_MASK;
unsigned int ro_flags = __MAP_USR_RO_FLAGS & PTE_PROT_MASK;
/* Clear non-protection flags */
pte &= PTE_PROT_MASK;;
if (pte == ro_flags)
vm_prot_flags = VM_READ | VM_EXEC;
else if (pte == rw_flags)
vm_prot_flags = VM_READ | VM_WRITE | VM_EXEC;
else
vm_prot_flags = VM_NONE;
return vm_prot_flags;
}
/*
* PTE STATES:
* PTE type field: 00 (Translation fault)
* PTE type field correct, AP bits: None (Read or Write access fault)
* PTE type field correct, AP bits: RO (Write access fault)
*/
/* Extracts arch-specific fault parameters and puts them into generic format */
void set_generic_fault_params(struct fault_data *fault)
{
unsigned int prot_flags = vm_prot_flags(fault->kdata->pte);
fault->reason = 0;
if (is_prefetch_abort(fault->kdata->fsr)) {
fault->reason |= VM_READ;
fault->address = fault->kdata->faulty_pc;
} else {
fault->address = fault->kdata->far;
/* Always assume read fault first */
if (prot_flags & VM_NONE)
fault->reason |= VM_READ;
else if (prot_flags & VM_READ)
fault->reason |= VM_WRITE;
else
BUG();
}
}

478
tasks/mm0/src/fault.c Normal file
View File

@@ -0,0 +1,478 @@
/*
* Page fault handling.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <vm_area.h>
#include <task.h>
#include <mm/alloc_page.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <arch/mm.h>
#include <l4/generic/space.h>
#include <string.h>
#include <memory.h>
#include <shm.h>
/* FIXME: TODO:
* For every page that is allocated, (read-only file pages) and anon pages
* etc. Page cache for that page's file must be visited first, before
* allocation.
*/
/*
* For copy-on-write vmas, grows an existing shadow vma, or creates a new one
* for the copy-on-write'ed page. Then adds this shadow vma to the actual vma's
* shadow list. Shadow vmas never overlap with each other, and always overlap
* with part of their original vma.
*/
struct vm_area *copy_on_write_vma(struct fault_data *fault)
{
struct vm_area *shadow;
unsigned long faulty_pfn = __pfn(fault->address);
BUG_ON(faulty_pfn < fault->vma->pfn_start ||
faulty_pfn >= fault->vma->pfn_end);
list_for_each_entry(shadow, &fault->vma->shadow_list, shadow_list) {
if (faulty_pfn == (shadow->pfn_start - 1)) {
/* Growing start of existing shadow vma */
shadow->pfn_start = faulty_pfn;
shadow->f_offset -= 1;
return shadow;
} else if (faulty_pfn == (shadow->pfn_end + 1)) {
/* Growing end of existing shadow vma */
shadow->pfn_end = faulty_pfn;
return shadow;
}
}
/* Otherwise this is a new shadow vma that must be initialised */
shadow = kzalloc(sizeof(struct vm_area));
BUG(); /* This f_offset is wrong. Using uninitialised fields, besides
swap offsets calculate differently */
shadow->f_offset = faulty_pfn - shadow->pfn_start
+ shadow->f_offset;
shadow->pfn_start = faulty_pfn;
shadow->pfn_end = faulty_pfn + 1; /* End pfn is exclusive */
shadow->flags = fault->vma->flags;
/* The vma is owned by the swap file, since it's a private vma */
shadow->owner = fault->task->swap_file;
INIT_LIST_HEAD(&shadow->list);
INIT_LIST_HEAD(&shadow->shadow_list);
/*
* The actual vma uses its shadow_list as the list head for shadows.
* The shadows use their list member, and shadow_list is unused.
*/
list_add(&shadow->list, &fault->vma->shadow_list);
return shadow;
}
/*
* Handles any page ownership change or allocation for file-backed pages.
*/
int do_file_page(struct fault_data *fault)
{
unsigned int reason = fault->reason;
unsigned int vma_flags = fault->vma->flags;
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
/* For RO or non-cow WR pages just read in the page */
if (((reason & VM_READ) || ((reason & VM_WRITE) && !(vma_flags & VMA_COW)))
&& (pte_flags & VM_NONE)) {
/* Allocate a new page */
void *paddr = alloc_page(1);
void *vaddr = phys_to_virt(paddr);
struct page *page = phys_to_page(paddr);
/* Map new page at a self virtual address temporarily */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/*
* Read the page. (Simply read into the faulty area that's
* now mapped using a newly allocated page.)
*/
fault->vma->owner->pager->ops.read_page(fault, vaddr);
/* Remove temporary mapping */
l4_unmap(vaddr, 1, self_tid());
/* Map it to task. */
l4_map(paddr, (void *)page_align(fault->address), 1,
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
fault->task->tid);
spin_lock(&page->lock);
/* Update its page descriptor */
page->count++;
page->owner = fault->vma->owner;
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
page->virtual = page_align(fault->address);
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
spin_unlock(&page->lock);
/* Upgrade RO page to non-cow write */
} else if ((reason & VM_WRITE) && (pte_flags & VM_READ)
&& !(vma_flags & VMA_COW)) {
/* The page is mapped in, just update its permission */
l4_map((void *)__pte_to_addr(fault->kdata->pte),
(void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
/*
* For cow-write, allocate private pages and create shadow vmas.
*/
} else if ((reason & VM_WRITE) && (pte_flags & VM_READ)
&& (vma_flags & VMA_COW)) {
void *pa = (void *)__pte_to_addr(fault->kdata->pte);
void *new_pa = alloc_page(1);
struct page *page = phys_to_page(pa);
struct page *new_page = phys_to_page(new_pa);
void *va, *new_va;
/* Create or obtain existing shadow vma for the page */
struct vm_area *shadow = copy_on_write_vma(fault);
/* Map new page at a local virtual address temporarily */
new_va = l4_map_helper(new_pa, 1);
/* Map the old page (vmapped for process but not us) to self */
va = l4_map_helper(pa, 1);
/* Copy data from old to new page */
memcpy(new_va, va, PAGE_SIZE);
/* Remove temporary mappings */
l4_unmap(va, 1, self_tid());
l4_unmap(new_va, 1, self_tid());
spin_lock(&page->lock);
/* Clear usage details for original page. */
page->count--;
page->virtual = 0; /* FIXME: Maybe mapped for multiple processes ? */
/* New page is owned by shadow's owner (swap) */
new_page->owner = shadow->owner;
new_page->count++;
new_page->f_offset = __pfn(fault->address)
- shadow->pfn_start + shadow->f_offset;
new_page->virtual = page_align(fault->address);
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
spin_unlock(&page->lock);
/*
* Overwrite the original file-backed page's mapping on this
* task with the writeable private page. The original physical
* page still exists in memory and can be referenced from its
* associated owner file, but it's not mapped into any virtual
* address anymore in this task.
*/
l4_map(new_pa, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
} else if ((reason & VM_WRITE) && (pte_flags & VM_NONE)
&& (vma_flags & VMA_COW)) {
struct vm_area *shadow;
/* Allocate a new page */
void *paddr = alloc_page(1);
void *vaddr = phys_to_virt(paddr);
struct page *page = phys_to_page(paddr);
/* Map it to self */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/* Update its page descriptor */
page->count++;
page->owner = fault->vma->owner;
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
page->virtual = page_align(fault->address);
/*
* Read the page. (Simply read into the faulty area that's
* now mapped using a newly allocated page.)
*/
fault->vma->owner->pager->ops.read_page(fault, vaddr);
/* Unmap from self */
l4_unmap(vaddr, 1, self_tid());
/* Map to task. */
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
/* Obtain a shadow vma for the page */
shadow = copy_on_write_vma(fault);
spin_lock(&page->lock);
/* Now anonymise the page by changing its owner file to swap */
page->owner = shadow->owner;
/* Page's offset is different in its new owner. */
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
spin_unlock(&page->lock);
} else
BUG();
return 0;
}
/*
* Handles any page allocation or file ownership change for anonymous pages.
* For read accesses initialises a wired-in zero page and for write accesses
* initialises a private ZI page giving its ownership to the swap file.
*/
int do_anon_page(struct fault_data *fault)
{
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
void *paddr, *vaddr;
struct page *page;
/* If swapped, read in with vma's pager (swap in anon case) */
if (pte_flags & VM_SWAPPED) {
BUG();
// Properly implement:
// fault->vma->owner->pager->ops.read_page(fault);
/* Map the page with right permission */
if (fault->reason & VM_READ)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RO_FLAGS, fault->task->tid);
else if (fault->reason & VM_WRITE)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
else
BUG();
return 0;
}
/* For non-existant pages just map the zero page. */
if (fault->reason & VM_READ) {
/*
* Zero page is a special wired-in page that is mapped
* many times in many tasks. Just update its count field.
*/
paddr = get_zero_page();
#if defined(SHM_DISJOINT_VADDR_POOL)
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RO_FLAGS, fault->task->tid);
#else
#error ARM v5 Cache aliasing possibility. Map this uncached on VMA_SHARED
#endif
}
/* Write faults require a real zero initialised page */
if (fault->reason & VM_WRITE) {
paddr = alloc_page(1);
vaddr = phys_to_virt(paddr);
page = phys_to_page(paddr);
/* NOTE:
* This mapping overwrites the original RO mapping which
* is anticipated to be the zero page.
*/
BUG_ON(__pte_to_addr(fault->kdata->pte) !=
(unsigned long)get_zero_page());
/* Map new page at a self virtual address temporarily */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
/* Clear the page */
memset((void *)vaddr, 0, PAGE_SIZE);
/* Remove temporary mapping */
l4_unmap((void *)vaddr, 1, self_tid());
#if defined(SHM_DISJOINT_VADDR_POOL)
/* Map the page to task */
l4_map(paddr, (void *)page_align(fault->address), 1,
MAP_USR_RW_FLAGS, fault->task->tid);
#else
#error ARM v5 Cache aliasing possibility. Map this uncached on VMA_SHARED.
#endif
spin_lock(&page->lock);
/* vma's swap file owns this page */
page->owner = fault->vma->owner;
/* Add the page to it's owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
list_add(&page->list, &page->owner->page_cache_list);
/* The offset of this page in its owner file */
page->f_offset = __pfn(fault->address)
- fault->vma->pfn_start + fault->vma->f_offset;
page->count++;
page->virtual = page_align(fault->address);
spin_unlock(&page->lock);
}
return 0;
}
/*
* Page fault model:
*
* A page is anonymous (e.g. stack)
* - page needs read access:
* action: map the zero page.
* - page needs write access:
* action: allocate ZI page and map that. Swap file owns the page.
* - page is swapped to swap:
* action: read back from swap file into new page.
*
* A page is file-backed but private (e.g. .data section)
* - page needs read access:
* action: read the page from its file.
* - page is swapped out before being private. (i.e. invalidated)
* action: read the page from its file. (original file)
* - page is swapped out after being private.
* action: read the page from its file. (swap file)
* - page needs write access:
* action: allocate new page, declare page as private, change its
* owner to swap file.
*
* A page is file backed but not-private, and read-only. (e.g. .text section)
* - page needs read access:
* action: read in the page from its file.
* - page is swapped out. (i.e. invalidated)
* action: read in the page from its file.
* - page needs write access:
* action: forbidden, kill task?
*
* A page is file backed but not-private, and read/write. (e.g. any data file.)
* - page needs read access:
* action: read in the page from its file.
* - page is flushed back to its original file. (i.e. instead of swap)
* action: read in the page from its file.
* - page needs write access:
* action: read the page in, give write access.
*/
void do_page_fault(struct fault_data *fault)
{
unsigned int vma_flags = (fault->vma) ? fault->vma->flags : VM_NONE;
unsigned int reason = fault->reason;
/* vma flags show no access */
if (vma_flags & VM_NONE) {
printf("Illegal access, tid: %d, address: %x\n",
fault->task->tid, fault->address);
BUG();
}
/* The access reason is not included in the vma's listed flags */
if (!(reason & vma_flags)) {
printf("Illegal access, tid: %d, address: %x\n",
fault->task->tid, fault->address);
BUG();
}
if ((reason & VM_EXEC) && (vma_flags & VM_EXEC)) {
printf("Exec faults unsupported yet.\n");
BUG(); /* Can't handle this yet. */
}
/* Handle legitimate read faults on the vma */
if (vma_flags & VMA_ANON)
do_anon_page(fault);
else
do_file_page(fault);
}
void vm_file_pager_read_page(struct fault_data *fault, void *dest_page)
{
/* Fault's offset in its vma */
unsigned long vma_off_pfn = __pfn(fault->address) - fault->vma->pfn_start;
/* Fault's offset in the file */
unsigned long f_off_pfn = fault->vma->f_offset + vma_off_pfn;
/* The address of page in the file */
void *file_page = (void *)(fault->vma->owner->inode.i_addr +
__pfn_to_addr(f_off_pfn));
/*
* Map the memfile's page into virtual memory.
*
* FIXME: Need to find a way of properly generating virtual addresses
* rather than one-to-one conversion.
*/
file_page = l4_map_helper(file_page, 1);
/* Copy it into destination page */
memcpy(dest_page, file_page, PAGE_SIZE);
}
void vm_file_pager_write_page(struct fault_data *f, void *p)
{
}
void vm_swapper_read_page(struct fault_data *fault, void *p)
{
}
void vm_swapper_write_page(struct fault_data *f, void *p) { }
/* Pager for file pages */
struct vm_pager default_file_pager = {
.ops = {
.read_page = vm_file_pager_read_page,
.write_page= vm_file_pager_write_page,
},
};
/* Swap pager for anonymous and private pages */
struct vm_pager swap_pager = {
.ops = {
.read_page = vm_swapper_read_page,
.write_page= vm_swapper_write_page,
},
};
void page_fault_handler(l4id_t sender, fault_kdata_t *fkdata)
{
struct fault_data fault = {
/* Fault data from kernel */
.kdata = fkdata,
};
printf("%s: Handling fault from %d.\n", __TASKNAME__, sender);
BUG_ON(sender == 0);
/* Get pager specific task info */
BUG_ON(!(fault.task = find_task(sender)));
/* Extract fault reason, fault address etc. in generic format */
set_generic_fault_params(&fault);
/* Get vma info */
if (!(fault.vma = find_vma(fault.address,
&fault.task->vm_area_list)))
printf("Hmm. No vma for faulty region. "
"Bad things will happen.\n");
/* Handle the actual fault */
do_page_fault(&fault);
}

91
tasks/mm0/src/init.c Normal file
View File

@@ -0,0 +1,91 @@
/*
* Initialise the system.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <kdata.h>
#include <memory.h>
#include <mm/alloc_page.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <task.h>
#include <shm.h>
void init_utcb(void)
{
struct task_ids ids;
void *utcb_page = alloc_page(1); /* Allocate a utcb page */
l4_getid(&ids);
l4_map(utcb_page, __L4_ARM_Utcb(), 1, MAP_USR_RW_FLAGS, ids.tid);
}
void init_mm(struct initdata *initdata)
{
/* Initialise the page and bank descriptors */
init_physmem(initdata, membank);
printf("%s: Initialised physmem.\n", __TASKNAME__);
/* Initialise the page allocator on first bank. */
init_page_allocator(membank[0].free, membank[0].end);
printf("%s: Initialised page allocator.\n", __TASKNAME__);
/* Initialise the zero page */
init_zero_page();
printf("%s: Initialised zero page.\n", __TASKNAME__);
init_utcb();
printf("%s: Initialised own utcb.\n", __TASKNAME__);
/* Initialise the pager's memory allocator */
kmalloc_init();
printf("%s: Initialised kmalloc.\n", __TASKNAME__);
shm_init();
printf("%s: Initialised shm structures.\n", __TASKNAME__);
/* Give the kernel some memory to use for its allocators */
l4_kmem_grant(__pfn(alloc_page(__pfn(SZ_1MB))), __pfn(SZ_1MB));
}
/* Create temporary run-time files in memory to test with mmap */
void init_boot_files(struct initdata *initdata)
{
struct bootdesc *bd = initdata->bootdesc;
int total_files = bd->total_images;
struct vm_file *memfile;
struct svc_image *img;
memfile = kzalloc(sizeof(struct vm_file) * total_files);
initdata->memfile = memfile;
BUG();
for (int i = BOOTDESC_IMAGE_START; i < total_files; i++) {
img = &bd->images[i];
/*
* I have left the i_addr as physical on purpose. The inode is
* not a readily usable memory address, its simply a unique key
* that represents that file. Here, we use the physical address
* of the memory file as that key. The pager must take action in
* order to make use of it.
*/
memfile[i].inode.i_addr = img->phys_start;
memfile[i].length = img->phys_end - img->phys_start;
memfile[i].pager = &default_file_pager;
INIT_LIST_HEAD(&memfile[i].page_cache_list);
}
}
void initialise(void)
{
request_initdata(&initdata);
init_mm(&initdata);
init_boot_files(&initdata);
// printf("INITTASK: Initialised mock-up bootfiles.\n");
init_pm(&initdata);
// printf("INITTASK: Initialised the memory/process manager.\n");
}

105
tasks/mm0/src/kdata.c Normal file
View File

@@ -0,0 +1,105 @@
/*
* Requesting system information from kernel during init.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <l4lib/arch/syscalls.h>
#include <kdata.h>
#include <string.h>
#include INC_API(kip.h)
#include <kmalloc/kmalloc.h>
/* Kernel data acquired during initialisation */
struct initdata initdata;
#define BOOTDESC_PREALLOC_SIZE 128
static char bootdesc_memory[BOOTDESC_PREALLOC_SIZE]; /* 128 bytes */
void print_bootdesc(struct bootdesc *bd)
{
for (int i = 0; i < bd->total_images; i++) {
printf("Task Image: %d\n", i);
printf("Name: %s\n", bd->images[i].name);
printf("Start: 0x%x\n", bd->images[i].phys_start);
printf("End: 0x%x\n", bd->images[i].phys_end);
}
}
void print_pfn_range(int pfn, int size)
{
unsigned int addr = pfn << PAGE_BITS;
unsigned int end = (pfn + size) << PAGE_BITS;
printf("Used: 0x%x - 0x%x\n", addr, end);
}
void print_page_map(struct page_bitmap *map)
{
unsigned int start_pfn = 0;
unsigned int total_used = 0;
int numpages = 0;
printf("Pages start at address 0x%x\n", map->pfn_start << PAGE_BITS);
printf("Pages end at address 0x%x\n", map->pfn_end << PAGE_BITS);
printf("The used page areas are:\n");
for (int i = 0; i < (PHYSMEM_TOTAL_PAGES >> 5); i++) {
for (int x = 0; x < WORD_BITS; x++) {
if (map->map[i] & (1 << x)) { /* A used page found? */
if (!start_pfn) /* First such page found? */
start_pfn = (WORD_BITS * i) + x;
total_used++;
numpages++; /* Increase number of pages */
} else { /* Either used pages ended or were never found */
if (start_pfn) { /* We had a used page */
/* Finished end of used range.
* Print and reset. */
print_pfn_range(start_pfn, numpages);
start_pfn = 0;
numpages = 0;
}
}
}
}
printf("Total of %d pages. %d Kbytes.\n", total_used, total_used << 2);
}
int request_initdata(struct initdata *initdata)
{
int err;
int bootdesc_size;
/* Read all used physical page information in a bitmap. */
if ((err = l4_kread(KDATA_PAGE_MAP, &initdata->page_map)) < 0) {
printf("L4_kdata_read() call failed. Could not complete"
"KDATA_PAGE_MAP request.\n");
goto error;
}
print_page_map(&initdata->page_map);
/* Read the boot descriptor size */
if ((err = l4_kread(KDATA_BOOTDESC_SIZE, &bootdesc_size)) < 0) {
printf("L4_kdata_read() call failed. Could not complete"
"KDATA_BOOTDESC_SIZE request.\n");
goto error;
}
if (bootdesc_size > BOOTDESC_PREALLOC_SIZE) {
printf("Insufficient preallocated memory for bootdesc. "
"Size too big.\n");
goto error;
}
/* Get preallocated bootdesc memory */
initdata->bootdesc = (struct bootdesc *)&bootdesc_memory;
/* Read the boot descriptor */
if ((err = l4_kread(KDATA_BOOTDESC, initdata->bootdesc)) < 0) {
printf("L4_kdata_read() call failed. Could not complete"
"KDATA_BOOTDESC request.\n");
goto error;
}
return 0;
error:
printf("FATAL: Inittask failed during initialisation. exiting.\n");
return err;
}

98
tasks/mm0/src/lib/bit.c Normal file
View File

@@ -0,0 +1,98 @@
/*
* Bit manipulation functions.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <lib/bit.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <stdio.h>
#include INC_GLUE(memory.h)
/* Emulation of ARM's CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}
int find_and_set_first_free_bit(u32 *word, unsigned int limit)
{
int success = 0;
int i;
for(i = 0; i < limit; i++) {
/* Find first unset bit */
if (!(word[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i))) {
/* Set it */
word[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
success = 1;
break;
}
}
/* Return bit just set */
if (success)
return i;
else
return -1;
}
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits)
{
int i = 0, first = 0, last = 0, found = 0;
/* Can't allocate more than the limit */
if (nbits > limit)
return -1;
/* This is a state machine that checks n contiguous free bits. */
while (i + nbits < limit) {
first = i;
last = i;
while (!(word[BITWISE_GETWORD(last)] & BITWISE_GETBIT(last))) {
last++;
i++;
if (last == first + nbits) {
found = 1;
break;
}
}
if (found)
break;
i++;
}
/* If found, set the bits */
if (found) {
for (int x = first; x < first + nbits; x++)
word[BITWISE_GETWORD(x)] |= BITWISE_GETBIT(x);
return first;
} else
return -1;
}
int check_and_clear_bit(u32 *word, int bit)
{
/* Check that bit was set */
if (word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit)) {
word[BITWISE_GETWORD(bit)] &= ~BITWISE_GETBIT(bit);
return 0;
} else {
printf("Trying to clear already clear bit\n");
return -1;
}
}
int check_and_clear_contig_bits(u32 *word, int first, int nbits)
{
for (int i = first; i < first + nbits; i++)
if (check_and_clear_bit(word, i) < 0)
return -1;
return 0;
}

View File

@@ -0,0 +1,63 @@
/*
* Used for thread and space ids.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <lib/idpool.h>
#include <kmalloc/kmalloc.h>
#include INC_GLUE(memory.h)
#include <stdio.h>
struct id_pool *id_pool_new_init(int totalbits)
{
int nwords = BITWISE_GETWORD(totalbits);
struct id_pool *new = kzalloc((nwords * SZ_WORD)
+ sizeof(struct id_pool));
new->nwords = nwords;
return new;
}
int id_new(struct id_pool *pool)
{
int id = find_and_set_first_free_bit(pool->bitmap,
pool->nwords * WORD_BITS);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This finds n contiguous free ids, allocates and returns the first one */
int ids_new_contiguous(struct id_pool *pool, int numids)
{
int id = find_and_set_first_free_contig_bits(pool->bitmap,
pool->nwords *WORD_BITS,
numids);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This deletes a list of contiguous ids given the first one and number of ids */
int ids_del_contiguous(struct id_pool *pool, int first, int numids)
{
int ret;
if (pool->nwords * WORD_BITS < first + numids)
return -1;
if ((ret = check_and_clear_contig_bits(pool->bitmap, first, numids)))
printf("%s: Error: Invalid argument range.\n", __FUNCTION__);
return ret;
}
int id_del(struct id_pool *pool, int id)
{
int ret;
if (pool->nwords * WORD_BITS < id)
return -1;
if ((ret = check_and_clear_bit(pool->bitmap, id) < 0))
printf("%s: Error: Could not delete id.\n", __FUNCTION__);
return ret;
}

39
tasks/mm0/src/lib/vaddr.c Normal file
View File

@@ -0,0 +1,39 @@
/*
* This module allocates an unused virtual address range for shm segments.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <lib/bit.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include <lib/vaddr.h>
#include <stdio.h>
void vaddr_pool_init(struct id_pool *pool, unsigned long start, unsigned long end)
{
pool = id_pool_new_init(__pfn(end - start));
}
void *vaddr_new(struct id_pool *pool, int npages)
{
unsigned int shm_vpfn;
if ((int)(shm_vpfn = ids_new_contiguous(pool, npages)) < 0)
return 0;
return (void *)__pfn_to_addr(shm_vpfn + SHM_AREA_START);
}
int vaddr_del(struct id_pool *pool, void *vaddr, int npages)
{
unsigned long idpfn = __pfn(page_align(vaddr) - SHM_AREA_START);
if (ids_del_contiguous(pool, idpfn, npages) < 0) {
printf("%s: Invalid address range returned to "
"virtual address pool.\n", __FUNCTION__);
return -1;
}
return 0;
}

92
tasks/mm0/src/memory.c Normal file
View File

@@ -0,0 +1,92 @@
/*
* Initialise the memory structures.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <kdata.h>
#include <memory.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/generic/space.h>
#include <l4lib/arch/syslib.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <memory.h>
struct membank membank[1];
struct page *page_array;
void *phys_to_virt(void *addr)
{
return addr + INITTASK_OFFSET;
}
void *virt_to_phys(void *addr)
{
return addr - INITTASK_OFFSET;
}
/* Allocates page descriptors and initialises them using page_map information */
void init_physmem(struct initdata *initdata, struct membank *membank)
{
struct page_bitmap *pmap = &initdata->page_map;
int npages = pmap->pfn_end - pmap->pfn_start;
/* Allocation marks for the struct page array */
int pg_npages, pg_spfn, pg_epfn;
unsigned long ffree_addr;
/*
* Means the page array won't map one to one to pfns. That's ok,
* but we dont allow it for now.
*/
BUG_ON(pmap->pfn_start);
membank[0].start = __pfn_to_addr(pmap->pfn_start);
membank[0].end = __pfn_to_addr(pmap->pfn_end);
/* First find the first free page after last used page */
for (int i = 0; i < npages; i++)
if ((pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))
membank[0].free = (i + 1) * PAGE_SIZE;
BUG_ON(membank[0].free >= membank[0].end);
/*
* One struct page for every physical page. Calculate how many pages
* needed for page structs, start and end pfn marks.
*/
pg_npages = __pfn((sizeof(struct page) * npages));
/* These are relative pfn offsets to the start of the memory bank */
pg_spfn = __pfn(membank[0].free) - __pfn(membank[0].start);
pg_epfn = pg_spfn + pg_npages;
/* Use free pages from the bank as the space for struct page array */
membank[0].page_array = l4_map_helper((void *)membank[0].free,
pg_npages);
/* Update free memory left */
membank[0].free += pg_npages * PAGE_SIZE;
/* Update page bitmap for the pages used for the page array */
for (int i = pg_spfn; i < pg_epfn; i++)
pmap->map[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
/* Initialise the page array */
for (int i = 0; i < npages; i++) {
INIT_LIST_HEAD(&membank[0].page_array[i].list);
/* Set use counts for pages the kernel has already used up */
if (!(pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))
membank[0].page_array[i].count = -1;
else /* Last page used +1 is free */
ffree_addr = (i + 1) * PAGE_SIZE;
}
/* First free address must come up the same for both */
BUG_ON(ffree_addr != membank[0].free);
/* Set global page array to this bank's array */
page_array = membank[0].page_array;
}

489
tasks/mm0/src/mmap.c Normal file
View File

@@ -0,0 +1,489 @@
/*
* mmap/munmap and friends.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <vm_area.h>
#include <kmalloc/kmalloc.h>
#include INC_API(errno.h)
#include <posix/sys/types.h>
#include <task.h>
#include <mmap.h>
#include <l4lib/arch/syscalls.h>
static struct vm_file devzero;
/* Swap related bookkeeping.
static struct vm_file shm_swap_file;
static struct id_pool *swap_file_offset_pool;
*/
/* mmap system call implementation */
int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
int flags, int fd, off_t offset)
{
return 0;
}
/* TODO: This is to be implemented when fs0 is ready. */
int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *task)
{
// unsigned long npages = __pfn(size);
struct vm_area *vma = find_vma((unsigned long)addr,
&task->vm_area_list);
if (!vma)
return -EINVAL;
/* Must check if this is a shadow copy or not */
if (vma->flags & VMA_COW) {
; /* ... Fill this in. ... */
}
/* TODO:
* Flush the vma's pages back to their file. Perhaps add a dirty bit
* to the vma so that this can be completely avoided for clean vmas?
* For anon pages this is the swap file. For real file-backed pages
* its the real file. However, this can't be fully implemented yet since
* we don't have FS0 yet.
*/
return 0;
}
/*
* This releases a physical page struct from its owner and
* frees the page back to the page allocator.
*/
int page_release(struct page *page)
{
spin_lock(&page->lock);
page->count--;
BUG_ON(page->count < -1);
if (page->count == -1) {
/* Unlink the page from its owner's list */
list_del_init(&page->list);
/* Zero out the fields */
page->owner = 0;
page->flags = 0;
page->f_offset = 0;
page->virtual = 0;
/*
* No refs to page left, and since every physical memory page
* comes from the page allocator, we return it back.
*/
free_page((void *)page_to_phys(page));
}
spin_unlock(&page->lock);
return 0;
}
/*
* Freeing and unmapping of vma pages:
*
* For a vma that is about to be split, shrunk or destroyed, this function
* finds out about the physical pages in memory that represent the vma,
* reduces their refcount, and if they're unused, frees them back to the
* physical page allocator, and finally unmaps those corresponding virtual
* addresses from the unmapper task's address space. This sequence is
* somewhat a rewinding of the actions that the page fault handler takes
* when the vma was faulted by the process.
*/
int vma_release_pages(struct vm_area *vma, struct tcb *task,
unsigned long pfn_start, unsigned long pfn_end)
{
unsigned long f_start, f_end;
struct page *page, *n;
/* Assume vma->pfn_start is lower than or equal to pfn_start */
BUG_ON(vma->pfn_start > pfn_start);
/* Assume vma->pfn_end is higher or equal to pfn_end */
BUG_ON(vma->pfn_end < pfn_end);
/* Find the file offsets of the range to be freed. */
f_start = vma->f_offset + pfn_start - vma->pfn_start;
f_end = vma->f_offset + vma->pfn_end - pfn_end;
list_for_each_entry_safe(page, n, &vma->owner->page_cache_list, list) {
if (page->f_offset >= f_start && page->f_offset <= f_end) {
l4_unmap((void *)virtual(page), 1, task->tid);
page_release(page);
}
}
return 0;
}
int vma_unmap(struct vm_area **orig, struct vm_area **new,
unsigned long, unsigned long, struct tcb *);
/*
* This is called by every vma modifier function in vma_unmap(). This in turn
* calls vma_unmap recursively to modify the shadow vmas, the same way the
* actual vmas get modified. Only COW vmas would need to do this recursion
* and the max level of recursion is one, since only one level of shadows exist.
*/
int vma_unmap_shadows(struct vm_area *vma, struct tcb *task, unsigned long pfn_start,
unsigned long pfn_end)
{
struct vm_area *shadow, *n;
/* Now do all shadows */
list_for_each_entry_safe(shadow, n, &vma->shadow_list,
shadow_list) {
BUG_ON(!(vma->flags & VMA_COW));
if (shadow->pfn_start >= pfn_start &&
shadow->pfn_end <= pfn_end) {
struct vm_area *split_shadow;
/* This may result in shrink/destroy/split of the shadow */
vma_unmap(&shadow, &split_shadow, pfn_start, pfn_end, task);
if (shadow && split_shadow)
list_add_tail(&split_shadow->list,
&shadow->list);
/* FIXME: Is this all to be done here??? Find what to do here. */
BUG();
}
}
return 0;
}
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long f_offset,
struct vm_file *owner)
{
struct vm_area *vma;
/* Initialise new area */
if (!(vma = kzalloc(sizeof(struct vm_area))))
return 0;
vma->pfn_start = pfn_start;
vma->pfn_end = pfn_start + npages;
vma->flags = flags;
vma->f_offset = f_offset;
vma->owner = owner;
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->shadow_list);
return vma;
}
/* TODO: vma_destroy/shrink/split should also handle swap file modification */
/* Frees and unlinks a vma from its list. TODO: Add list locking */
int vma_destroy(struct vm_area *vma, struct tcb *task)
{
struct vm_area *shadow, *n;
/* Release the vma pages */
vma_release_pages(vma, task, vma->pfn_start, vma->pfn_end);
/* Free all shadows, if any. */
list_for_each_entry_safe(shadow, n, &vma->shadow_list, list) {
/* Release all shadow pages */
vma_release_pages(shadow, task, shadow->pfn_start, shadow->pfn_end);
list_del(&shadow->list);
kfree(shadow);
}
/* Unlink and free the vma itself */
list_del(&vma->list);
if (kfree(vma) < 0)
BUG();
return 0;
}
/* This splits a vma, splitter region must be in the *middle* of original vma */
struct vm_area *vma_split(struct vm_area *vma, struct tcb *task,
unsigned long pfn_start, unsigned long pfn_end)
{
struct vm_area *new, *shadow, *n;
/* Allocate an uninitialised vma first */
if (!(new = vma_new(0, 0, 0, 0, 0)))
return 0;
/*
* Some sanity checks to show that splitter range does end up
* producing two smaller vmas.
*/
BUG_ON(vma->pfn_start >= pfn_start || vma->pfn_end <= pfn_end);
/* Release the pages before modifying the original vma */
vma_release_pages(vma, task, pfn_start, pfn_end);
new->pfn_end = vma->pfn_end;
new->pfn_start = pfn_end;
new->f_offset = vma->f_offset + new->pfn_start - vma->pfn_start;
vma->pfn_end = pfn_start;
new->flags = vma->flags;
new->owner = vma->owner;
/* Modify the shadows accordingly first. They may
* split/shrink or get completely destroyed or stay still. */
vma_unmap_shadows(vma, task, pfn_start, pfn_end);
/*
* Now split the modified shadows list into two vmas:
* If the file was COW and its vma had split, vma_new would have
* a valid value and as such the shadows must be separated into
* the two new vmas according to which one they belong to.
*/
list_for_each_entry_safe(shadow, n, &vma->shadow_list,
shadow_list) {
BUG_ON(!(vma->flags & VMA_COW));
BUG_ON(!(new->flags & VMA_COW));
if (shadow->pfn_start >= new->pfn_start &&
shadow->pfn_end <= new->pfn_end) {
list_del_init(&shadow->list);
list_add(&shadow->list, &new->shadow_list);
} else
BUG_ON(!(shadow->pfn_start >= vma->pfn_start &&
shadow->pfn_end <= vma->pfn_end));
}
return new;
}
/*
* For written anonymous regions swapfile segments are allocated dynamically.
* when vma regions are modified these allocations must be re-adjusted.
* This call handles this adjustment as well as the vma.
*/
int vma_swapfile_realloc(struct vm_area *vma, unsigned long pfn_start,
unsigned long pfn_end)
{
/* TODO: Reslot in swapfile */
BUG();
return 0;
}
/* This shrinks the vma from *one* end only, either start or end */
int vma_shrink(struct vm_area *vma, struct tcb *task, unsigned long pfn_start,
unsigned long pfn_end)
{
unsigned long diff;
BUG_ON(pfn_start >= pfn_end);
/* FIXME: Shadows are currently buggy - TBD */
if (!list_empty(&vma->shadow_list)) {
BUG();
vma_swapfile_realloc(vma, pfn_start, pfn_end);
return 0;
}
/* Release the pages before modifying the original vma */
vma_release_pages(vma, task, pfn_start, pfn_end);
/* Shrink from the beginning */
if (pfn_start > vma->pfn_start) {
diff = pfn_start - vma->pfn_start;
vma->f_offset += diff;
vma->pfn_start = pfn_start;
/* Shrink from the end */
} else if (pfn_end < vma->pfn_end) {
diff = vma->pfn_end - pfn_end;
vma->pfn_end = pfn_end;
} else
BUG();
return vma_unmap_shadows(vma, task, pfn_start, pfn_end);
}
/*
* Unmaps the given region from a vma. Depending on the region and vma range,
* this may result in either shrinking, splitting or destruction of the vma.
*/
int vma_unmap(struct vm_area **actual, struct vm_area **split,
unsigned long pfn_start, unsigned long pfn_end, struct tcb *task)
{
struct vm_area *vma = *actual;
struct vm_area *vma_new = 0;
/* Split needed? */
if (vma->pfn_start < pfn_start && vma->pfn_end > pfn_end) {
if (!(vma_new = vma_split(vma, task, pfn_start, pfn_end)))
return -ENOMEM;
list_add_tail(&vma_new->list, &vma->list);
/* Shrink needed? */
} else if (((vma->pfn_start == pfn_start) && (vma->pfn_end > pfn_end))
|| ((vma->pfn_start < pfn_start) && (vma->pfn_end == pfn_end)))
vma_shrink(vma, task, pfn_start, pfn_end);
/* Destroy needed? */
else if ((vma->pfn_start >= pfn_start) && (vma->pfn_end <= pfn_end)) {
/* NOTE: VMA can't be referred after this point. */
vma_destroy(vma, task);
vma = 0;
} else
BUG();
/* Update actual pointers */
*actual = vma;
*split = vma_new;
return 0;
}
/* Unmaps given address range from its vma. Releases those pages in that vma. */
int do_munmap(void *vaddr, unsigned long size, struct tcb *task)
{
unsigned long npages = __pfn(size);
unsigned long pfn_start = __pfn(vaddr);
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma, *vma_new = 0;
int err;
/* Check if any such vma exists */
if (!(vma = find_vma((unsigned long)vaddr, &task->vm_area_list)))
return -EINVAL;
/*
* If end of the range is outside of the vma that has the start
* address, we ignore the rest and assume end is the end of that vma.
* TODO: Find out how posix handles this.
*/
if (pfn_end > vma->pfn_end) {
printf("%s: %s: Warning, unmap end 0x%x beyond vma range. "
"Ignoring.\n", __TASKNAME__, __FUNCTION__,
__pfn_to_addr(pfn_end));
pfn_end = vma->pfn_end;
}
if ((err = vma_unmap(&vma, &vma_new, pfn_start, pfn_end, task)) < 0)
return err;
#if 0
mod_phys_pages:
/* The stage where the actual pages are unmapped from the page tables */
pgtable_unmap:
/* TODO:
* - Find out if the vma is cow, and contains shadow vmas.
* - Remove and free shadow vmas or the real vma, or shrink them if applicable.
* - Free the swap file segment for the vma if vma is private (cow).
* - Reduce refcount for the in-memory pages.
* - If refcount is zero (they could be shared!), either add pages to some page
* cache, or simpler the better, free the actual pages back to the page allocator.
* - l4_unmap() the corresponding virtual region from the page tables.
*/
#endif
return 0;
}
static struct vm_area *
is_vma_mergeable(unsigned long pfn_start, unsigned long pfn_end,
unsigned int flags, struct vm_area *vma)
{
/* TODO:
* The swap implementation is too simple for now. The vmas on swap
* are stored non-sequentially, and adjacent vmas don't imply adjacent
* file position on swap. So at the moment merging swappable vmas
* doesn't make sense. But this is going to change in the future.
*/
if (vma->flags & VMA_COW) {
BUG();
/* FIXME: XXX: Think about this! */
}
/* Check for vma adjacency */
if ((vma->pfn_start == pfn_end) && (vma->flags == flags))
return vma;
if ((vma->pfn_end == pfn_start) && (vma->flags == flags))
return vma;
return 0;
}
/*
* Finds an unmapped virtual memory area for the given parameters. If it
* overlaps with an existing vma, it returns -1, if it is adjacent to an
* existing vma and the flags match, it returns the adjacent vma. Otherwise it
* returns 0.
*/
int find_unmapped_area(struct vm_area **existing, struct vm_file *file,
unsigned long pfn_start, unsigned long npages,
unsigned int flags, struct list_head *vm_area_head)
{
struct vm_area *vma;
unsigned long pfn_end = pfn_start + npages;
*existing = 0;
list_for_each_entry(vma, vm_area_head, list) {
/* Check overlap */
if ((vma->pfn_start <= pfn_start) &&
(pfn_start < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return -1; /* Overlap */
} if ((vma->pfn_start < pfn_end) &&
(pfn_end < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return -1; /* Overlap */
}
if (is_vma_mergeable(pfn_start, pfn_end, flags, vma)) {
*existing = vma;
return 0;
}
}
return 0;
}
/*
* Maps the given file with given flags at the given page offset to the given
* task's address space at the specified virtual memory address and length.
*
* The actual paging in/out of the file from/into memory pages is handled by
* the file's pager upon page faults.
*/
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages)
{
struct vm_area *vma;
unsigned long pfn_start = __pfn(map_address);
if (!mapfile) {
if (flags & VMA_ANON) {
mapfile = &devzero;
f_offset = 0;
} else
BUG();
} else if (pages > (__pfn(page_align_up(mapfile->length)) - f_offset)) {
printf("%s: Trying to map %d pages from page %d, "
"but file length is %d\n", __FUNCTION__, pages,
f_offset, __pfn(page_align_up(mapfile->length)));
return -EINVAL;
}
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__, map_address,
map_address + pages * PAGE_SIZE);
/* See if it overlaps or is mergeable to an existing vma. */
if (find_unmapped_area(&vma, mapfile, pfn_start, pages, flags,
&t->vm_area_list) < 0)
return -EINVAL; /* Indicates overlap. */
/* Mergeable vma returned? */
if (vma) {
if (vma->pfn_end == pfn_start)
vma->pfn_end = pfn_start + pages;
else {
vma->f_offset -= vma->pfn_start - pfn_start;
/* Check if adjusted yields the original */
BUG_ON(vma->f_offset != f_offset);
vma->pfn_start = pfn_start;
}
} else { /* Initialise new area */
if (!(vma = vma_new(pfn_start, pages, flags, f_offset,
mapfile)))
return -ENOMEM;
list_add(&vma->list, &t->vm_area_list);
}
return 0;
}

229
tasks/mm0/src/shm.c Normal file
View File

@@ -0,0 +1,229 @@
/*
* Copyright (C) 2007 Bahadir Balban
*
* Posix shared memory implementation
*/
#include <shm.h>
#include <stdio.h>
#include <task.h>
#include <mmap.h>
#include <l4/lib/string.h>
#include <kmalloc/kmalloc.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <lib/idpool.h>
#include <lib/vaddr.h>
#include <lib/spinlock.h>
#include <l4/api/errno.h>
#include <l4/lib/list.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include INC_GLUE(memlayout.h)
#include <posix/sys/ipc.h>
#include <posix/sys/shm.h>
#include <posix/sys/types.h>
/* The list of shared memory areas that are already set up and working */
static struct list_head shm_desc_list;
/* The single global in-memory swap file for shared memory segments */
/* Unique shared memory ids */
static struct id_pool *shm_ids;
/* Globally disjoint shm virtual address pool */
static struct id_pool *shm_vaddr_pool;
void shm_init()
{
INIT_LIST_HEAD(&shm_desc_list);
/* Initialise shm id pool */
shm_ids = id_pool_new_init(SHM_AREA_MAX);
/* Initialise the global shm virtual address pool */
vaddr_pool_init(shm_vaddr_pool, SHM_AREA_START, SHM_AREA_END);
}
/*
* TODO:
* Implement means to return back ipc results, i.e. sender always does ipc_sendrecv()
* and it blocks on its own receive queue. Server then responds back without blocking.
*
* Later on: mmap can be done using vm_areas and phsyical pages can be accessed by vm_areas.
*/
static int do_shmat(struct shm_descriptor *shm, void *shm_addr, int shmflg,
l4id_t tid)
{
struct tcb *task = find_task(tid);
int err;
if (!task) {
printf("%s:%s: Cannot find caller task with tid %d\n",
__TASKNAME__, __FUNCTION__, tid);
BUG();
}
/*
* Currently shared memory base addresses are the same among all
* processes for every unique shm segment. They line up easier on
* the shm swap file this way. Also currently shm_addr argument is
* ignored, and mm0 allocates shm segment addresses.
*/
if (shm->shm_addr)
shm_addr = shm->shm_addr;
else
shm_addr = vaddr_new(shm_vaddr_pool, __pfn(shm->size));
BUG_ON(!is_page_aligned(shm_addr));
/*
* mmap the area to the process as shared. Page fault handler would
* handle allocating and paging-in the shared pages.
*
* For anon && shared pages do_mmap() handles allocation of the
* shm swap file and the file offset for the segment. The segment can
* be identified because segment virtual address is globally unique
* per segment and its the same for all the system tasks.
*/
if ((err = do_mmap(0, 0, task, (unsigned long)shm_addr,
VM_READ | VM_WRITE | VMA_ANON | VMA_SHARED,
shm->size)) < 0) {
printf("do_mmap: Mapping shm area failed with %d.\n", err);
BUG();
} else
printf("%s: %s: Success.\n", __TASKNAME__, __FUNCTION__);
/* Now update the shared memory descriptor */
shm->refcnt++;
return 0;
}
void *sys_shmat(l4id_t requester, l4id_t shmid, void *shmaddr, int shmflg)
{
struct shm_descriptor *shm_desc, *n;
int err;
list_for_each_entry_safe(shm_desc, n, &shm_desc_list, list) {
if (shm_desc->shmid == shmid) {
if ((err = do_shmat(shm_desc, shmaddr,
shmflg, requester) < 0)) {
l4_ipc_return(err);
return 0;
} else
break;
}
}
l4_ipc_return(0);
return 0;
}
int do_shmdt(struct shm_descriptor *shm, l4id_t tid)
{
struct tcb *task = find_task(tid);
int err;
if (!task) {
printf("%s:%s: Internal error. Cannot find task with tid %d\n",
__TASKNAME__, __FUNCTION__, tid);
BUG();
}
if ((err = do_munmap(shm->shm_addr, shm->size, task)) < 0) {
printf("do_munmap: Unmapping shm segment failed with %d.\n",
err);
BUG();
}
return err;
}
int sys_shmdt(l4id_t requester, const void *shmaddr)
{
struct shm_descriptor *shm_desc, *n;
int err;
list_for_each_entry_safe(shm_desc, n, &shm_desc_list, list) {
if (shm_desc->shm_addr == shmaddr) {
if ((err = do_shmdt(shm_desc, requester) < 0)) {
l4_ipc_return(err);
return 0;
} else
break;
}
}
l4_ipc_return(0);
return 0;
}
static struct shm_descriptor *shm_new(key_t key)
{
/* It doesn't exist, so create a new one */
struct shm_descriptor *shm_desc;
if ((shm_desc = kzalloc(sizeof(struct shm_descriptor))) < 0)
return 0;
if ((shm_desc->shmid = id_new(shm_ids)) < 0)
return 0;
shm_desc->key = (int)key;
INIT_LIST_HEAD(&shm_desc->list);
list_add(&shm_desc->list, &shm_desc_list);
return shm_desc;
}
int sys_shmget(key_t key, int size, int shmflg)
{
struct shm_descriptor *shm_desc;
/* First check argument validity */
if (size > SHM_SHMMAX || size < SHM_SHMMIN) {
l4_ipc_return(-EINVAL);
return 0;
}
/*
* IPC_PRIVATE means create a no-key shm area, i.e. private to this
* process so that it would only share it with its descendants.
*/
if (key == IPC_PRIVATE) {
key = -1; /* Our meaning of no key */
if (!shm_new(key))
l4_ipc_return(-ENOSPC);
else
l4_ipc_return(0);
return 0;
}
list_for_each_entry(shm_desc, &shm_desc_list, list) {
if (shm_desc->key == key) {
/*
* Exclusive means create request
* on existing key should fail.
*/
if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
l4_ipc_return(-EEXIST);
else
/* Found it but do we have a size problem? */
if (shm_desc->size < size)
l4_ipc_return(-EINVAL);
else /* Return shmid of the existing key */
l4_ipc_return(shm_desc->shmid);
return 0;
}
}
/* Key doesn't exist and create set, so we create */
if (shmflg & IPC_CREAT)
if (!(shm_desc = shm_new(key)))
l4_ipc_return(-ENOSPC);
else
l4_ipc_return(shm_desc->shmid);
else /* Key doesn't exist, yet create isn't set, its an -ENOENT */
l4_ipc_return(-ENOENT);
return 0;
}

28
tasks/mm0/src/stack.c Normal file
View File

@@ -0,0 +1,28 @@
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memlayout.h)
#include <string.h>
/* The initial temporary stack used until memory is set up */
__attribute__ ((section("init.stack"))) char stack[4096];
extern unsigned long __stack[]; /* Linker defined */
/* Moves from temporary stack to where it should be in actual. */
void move_stack()
{
register unsigned int sp asm("sp");
register unsigned int fp asm("r11");
unsigned int stack_offset = (unsigned long)__stack - sp;
unsigned int frame_offset = (unsigned long)__stack - fp;
/* Copy current stack into new stack. NOTE: This might demand-page
* the new stack, but maybe that won't work. */
memcpy((void *)USER_AREA_END, __stack, stack_offset);
sp = USER_AREA_END - stack_offset;
fp = USER_AREA_END - frame_offset;
}

149
tasks/mm0/src/task.c Normal file
View File

@@ -0,0 +1,149 @@
/*
* Task management.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/lib/list.h>
#include <l4/api/thread.h>
#include INC_GLUE(memory.h)
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <lib/vaddr.h>
#include <task.h>
#include <kdata.h>
#include <kmalloc/kmalloc.h>
#include <string.h>
#include <vm_area.h>
#include <memory.h>
struct tcb_head {
struct list_head list;
int total; /* Total threads */
} tcb_head;
struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &tcb_head.list, list)
if (t->tid == tid)
return t;
return 0;
}
#if 0
void dump_tasks(void)
{
struct tcb *t;
list_for_each_entry(t, &tcb_head.list, list) {
printf("Task %s: id/spid: %d/%d\n", &t->name[0], t->tid, t->spid);
printf("Task vm areas:\n");
dump_vm_areas(t);
printf("Task swapfile:\n");
dump_task_swapfile(t);
}
}
#endif
void create_init_tcbs(struct initdata *initdata)
{
struct bootdesc *bd = initdata->bootdesc;
INIT_LIST_HEAD(&tcb_head.list);
tcb_head.total++;
for (int i = BOOTDESC_IMAGE_START; i < bd->total_images; i++) {
struct tcb *task = kzalloc(sizeof(struct tcb));
/* Ids will be acquired from the kernel */
task->tid = TASK_ID_INVALID;
task->spid = TASK_ID_INVALID;
task->swap_file = kzalloc(sizeof(struct vm_file));
task->swap_file->pager = &swap_pager;
vaddr_pool_init(task->swap_file_offset_pool, 0,
__pfn(TASK_SWAPFILE_MAXSIZE));
INIT_LIST_HEAD(&task->swap_file->page_cache_list);
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->vm_area_list);
list_add_tail(&task->list, &tcb_head.list);
}
}
int start_init_tasks(struct initdata *initdata)
{
struct tcb *task;
int err;
int i = BOOTDESC_IMAGE_START;
list_for_each_entry(task, &tcb_head.list, list) {
struct vm_file *file = &initdata->memfile[i++];
unsigned int sp = align(USER_AREA_END - 1, 8);
unsigned int pc = USER_AREA_START;
struct task_ids ids = { .tid = task->tid, .spid = task->spid };
/* mmap each task's physical image to task's address space. */
if ((err = do_mmap(file, 0, task, USER_AREA_START,
VM_READ | VM_WRITE | VM_EXEC,
__pfn(page_align_up(file->length)))) < 0) {
printf("do_mmap: failed with %d.\n", err);
goto error;
}
/* mmap each task's stack as single page anonymous memory. */
if ((err = do_mmap(0, 0, task, USER_AREA_END - PAGE_SIZE,
VM_READ | VM_WRITE | VMA_ANON, 1) < 0)) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
/* mmap each task's utcb as single page anonymous memory. */
if ((err = do_mmap(0, 0, task, (unsigned long)__L4_ARM_Utcb(),
VM_READ | VM_WRITE | VMA_ANON, 1) < 0)) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
printf("Creating new thread.\n");
/* Create the thread structures and address space */
if ((err = l4_thread_control(THREAD_CREATE, &ids)) < 0) {
printf("l4_thread_control failed with %d.\n", err);
goto error;
}
printf("New task with id: %d, space id: %d\n", ids.tid, ids.spid);
/* Use returned space and thread ids. */
task->tid = ids.tid;
task->spid = ids.spid;
/* Set up the task's thread details, (pc, sp, pager etc.) */
if ((err = l4_exchange_registers(pc, sp, self_tid(), task->tid) < 0)) {
printf("l4_exchange_registers failed with %d.\n", err);
goto error;
}
printf("Starting task with id %d\n", task->tid);
/* Start the thread */
if ((err = l4_thread_control(THREAD_RUN, &ids) < 0)) {
printf("l4_thread_control failed with %d\n");
goto error;
}
}
return 0;
error:
BUG();
}
void init_pm(struct initdata *initdata)
{
create_init_tcbs(initdata);
start_init_tasks(initdata);
}

49
tasks/mm0/src/zpage.c Normal file
View File

@@ -0,0 +1,49 @@
/*
* Handling of the special zero page.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <memory.h>
#include <mm/alloc_page.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <string.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <l4/generic/space.h>
#include <arch/mm.h>
static void *zpage_p;
static struct page *zpage;
void init_zero_page(void)
{
void *zpage_v;
zpage_p = alloc_page(1);
zpage = phys_to_page(zpage_p);
/* Map it to self */
zpage_v = l4_map_helper(zpage_p, 1);
/* Zero it */
memset(zpage_v, 0, PAGE_SIZE);
/* Unmap it */
l4_unmap_helper(zpage_v, 1);
/* Update page struct. All other fields are zero */
zpage->count++;
}
void *get_zero_page(void)
{
zpage->count++;
return zpage_p;
}
void put_zero_page(void)
{
zpage->count--;
BUG_ON(zpage->count < 0);
}

View File

@@ -0,0 +1,98 @@
/*
* Bit manipulation functions.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include "bit.h"
#include <stdio.h>
/* Emulation of ARM's CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}
int find_and_set_first_free_bit(u32 *word, unsigned int limit)
{
int success = 0;
int i;
for(i = 0; i < limit; i++) {
/* Find first unset bit */
if (!(word[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i))) {
/* Set it */
word[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
success = 1;
break;
}
}
/* Return bit just set */
if (success)
return i;
else
return -1;
}
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits)
{
int i = 0, first = 0, last = 0, found = 0;
/* Can't allocate more than the limit */
if (nbits > limit)
return -1;
/* This is a state machine that checks n contiguous free bits. */
while (i < limit) {
first = i;
last = i;
while (!(word[BITWISE_GETWORD(last)] & BITWISE_GETBIT(last))) {
last++;
i++;
if (last == first + nbits) {
found = 1;
break;
}
if (i == limit)
break;
}
if (found)
break;
i++;
}
/* If found, set the bits */
if (found) {
for (int x = first; x < first + nbits; x++)
word[BITWISE_GETWORD(x)] |= BITWISE_GETBIT(x);
return first;
} else
return -1;
}
int check_and_clear_bit(u32 *word, int bit)
{
/* Check that bit was set */
if (word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit)) {
word[BITWISE_GETWORD(bit)] &= ~BITWISE_GETBIT(bit);
return 0;
} else {
printf("Trying to clear already clear bit\n");
return -1;
}
}
int check_and_clear_contig_bits(u32 *word, int first, int nbits)
{
for (int i = first; i < first + nbits; i++)
if (check_and_clear_bit(word, i) < 0)
return -1;
return 0;
}

View File

@@ -0,0 +1,51 @@
#ifndef __LIB_BIT_H__
#define __LIB_BIT_H__
/* Minimum excess needed for word alignment */
#define SZ_WORD sizeof(unsigned int)
#define WORD_BITS 32
#define WORD_BITS_LOG2 5
#define BITWISE_GETWORD(x) ((x) >> WORD_BITS_LOG2) /* Divide by 32 */
#define BITWISE_GETBIT(x) (1 << ((x) % WORD_BITS))
typedef unsigned int u32;
unsigned int __clz(unsigned int bitvector);
int find_and_set_first_free_bit(u32 *word, unsigned int lastbit);
int check_and_clear_bit(u32 *word, int bit);
int check_and_clear_contig_bits(u32 *word, int first, int nbits);
int find_and_set_first_free_contig_bits(u32 *word, unsigned int limit,
int nbits);
/* Set */
static inline void setbit(unsigned int *w, unsigned int flags)
{
*w |= flags;
}
/* Clear */
static inline void clrbit(unsigned int *w, unsigned int flags)
{
*w &= ~flags;
}
/* Test */
static inline int tstbit(unsigned int *w, unsigned int flags)
{
return *w & flags;
}
/* Test and clear */
static inline int tstclr(unsigned int *w, unsigned int flags)
{
int res = tstbit(w, flags);
clrbit(w, flags);
return res;
}
#endif /* __LIB_BIT_H__ */

View File

@@ -0,0 +1,63 @@
/*
* Used for thread and space ids.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include "idpool.h"
#include <stdio.h>
#include <stdlib.h>
struct id_pool *id_pool_new_init(int totalbits)
{
int nwords = BITWISE_GETWORD(totalbits);
struct id_pool *new = calloc(1, (nwords * SZ_WORD)
+ sizeof(struct id_pool));
new->nwords = nwords;
return new;
}
int id_new(struct id_pool *pool)
{
int id = find_and_set_first_free_bit(pool->bitmap,
pool->nwords * WORD_BITS);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This finds n contiguous free ids, allocates and returns the first one */
int ids_new_contiguous(struct id_pool *pool, int numids)
{
printf("%s: Enter\n", __FUNCTION__);
int id = find_and_set_first_free_contig_bits(pool->bitmap,
pool->nwords *WORD_BITS,
numids);
if (id < 0)
printf("%s: Warning! New id alloc failed\n", __FUNCTION__);
return id;
}
/* This deletes a list of contiguous ids given the first one and number of ids */
int ids_del_contiguous(struct id_pool *pool, int first, int numids)
{
int ret;
printf("bits: %d, first +nids: %d\n", pool->nwords *WORD_BITS, first+numids);
if (pool->nwords * WORD_BITS < first + numids)
return -1;
if ((ret = check_and_clear_contig_bits(pool->bitmap, first, numids)))
printf("Warning!!!\n");
return ret;
}
int id_del(struct id_pool *pool, int id)
{
int ret;
if (pool->nwords * WORD_BITS < id)
return -1;
if ((ret = check_and_clear_bit(pool->bitmap, id) < 0))
printf("Warning!!!\n");
return ret;
}

View File

@@ -0,0 +1,17 @@
#ifndef __MM0_IDPOOL_H__
#define __MM0_IDPOOL_H__
#include "bit.h"
struct id_pool {
int nwords;
u32 bitmap[];
};
struct id_pool *id_pool_new_init(int mapsize);
int id_new(struct id_pool *pool);
int id_del(struct id_pool *pool, int id);
int ids_new_contiguous(struct id_pool *pool, int numids);
int ids_del_contiguous(struct id_pool *pool, int first, int numids);
#endif /* __MM0_IDPOOL_H__ */

View File

@@ -0,0 +1,58 @@
#include "bit.h"
#include "idpool.h"
#include <stdio.h>
#define CTOTAL 3
int main(int argc, char *argv[])
{
struct id_pool *pool = id_pool_new_init(64);
int id_array[64];
int first;
if ((first = ids_new_contiguous(pool, 64)) < 0)
printf("%d contig ids not allocated.\n", 64);
else
printf("%d contig ids allocated starting from %d\n", 64, first);
if (ids_del_contiguous(pool, 5, 60) == 0)
printf("%d contig ids freed with success.\n", 64);
else
printf("%d-%d contig ids could not be freed\n", 1, 65);
return 0;
}
/*
int main(int argc, char *argv[])
{
struct id_pool *pool = id_pool_new_init(64);
int id_array[64];
int first;
for (int i = 0; i < 64; i++) {
id_array[i] = id_new(pool);
printf("Allocated id: %d\n", id_array[i]);
}
if ((first = ids_new_contiguous(pool, CTOTAL)) < 0)
printf("%d contig ids not allocated as expected.\n", CTOTAL);
printf("Now freeing id_array[30 - 32]\n");
ids_del_contiguous(pool, id_array[30], 3);
ids_del_contiguous(pool, id_array[35], 9);
if ((first = ids_new_contiguous(pool, CTOTAL + 3)) < 0)
printf("%d contig ids not allocated.\n", CTOTAL + 3);
else
printf("%d contig ids allocated starting from %d\n", CTOTAL + 3, first);
if ((first = ids_new_contiguous(pool, CTOTAL)) < 0)
printf("Error: %d contig ids not allocated.\n", CTOTAL);
else
printf("%d contig ids allocated as expected starting from %d\n", CTOTAL, first);
return 0;
}
*/

View File

@@ -0,0 +1,28 @@
#!/usr/bin/python
import os
import sys
compiler_prefix = "arm-none-linux-gnueabi-"
objdump = "objdump"
command = "-t"
image_name = "inittask.axf"
linkoutput_file_suffix = "-linkinfo.txt"
linkoutput_file = image_name + linkoutput_file_suffix
def generate_bootdesc():
command = compiler_prefix + objdump + " -t " + image_name + " > " + linkoutput_file
print command
os.system(command)
f = open(linkoutput_file, "r")
while True:
line = f.readline()
if len(line) is 0:
break
if "_start" in line or "_end" in line:
print line
f.close()
if __name__ == "__main__":
generate_bootdesc()