Revised task initialisation, revising mmap yet.

This commit is contained in:
Bahadir Balban
2008-03-06 20:55:46 +00:00
parent 5681f3d1cb
commit 783904574d
12 changed files with 589 additions and 308 deletions

View File

@@ -19,6 +19,7 @@
struct initdata {
struct bootdesc *bootdesc;
struct page_bitmap page_map;
struct list_head boot_file_list;
};
extern struct initdata initdata;

View File

@@ -16,17 +16,11 @@ struct membank {
struct page *page_array;
};
extern struct membank membank[];
extern struct vm_file *swap_file;
void init_mm_descriptors(struct page_bitmap *page_map,
struct bootdesc *bootdesc, struct membank *membank);
void init_physmem(struct initdata *initdata, struct membank *membank);
void init_devzero(void);
struct vm_file *get_devzero(void);
void *get_zero_page(void);
void put_zero_page(void);
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages);

View File

@@ -16,7 +16,13 @@
#define __TASKNAME__ __PAGERNAME__
#define TASK_OFILES_MAX 32
#define TASK_FILES_MAX 32
/* POSIX minimum is 4Kb */
#define DEFAULT_ENV_SIZE SZ_16KB
#define DEFAULT_STACK_SIZE SZ_16KB
#define DEFAULT_UTCB_SIZE PAGE_SIZE
struct vm_file;
@@ -26,13 +32,6 @@ struct file_descriptor {
struct vm_file *vmfile;
};
struct proc_files {
struct vm_file *stackfile; /* ZI, private, devzero, then autogenerated */
struct vm_file *envfile; /* NON-ZI, private, autogenerated, then autogenerated */
struct vm_file *datafile; /* NON-ZI, private, real file, then autogenerated */
struct vm_file *bssfile; /* ZI private, devzero, then autogenerated */
};
/* Stores all task information that can be kept in userspace. */
struct tcb {
/* Task list */
@@ -67,22 +66,17 @@ struct tcb {
/* UTCB address */
unsigned long utcb_address;
/* Task's private files */
struct proc_files proc_files;
/* Virtual memory areas */
struct list_head vm_area_list;
/* File descriptors for this task */
struct file_descriptor fd[TASK_OFILES_MAX];
struct file_descriptor fd[TASK_FILES_MAX];
};
struct tcb *find_task(int tid);
struct initdata;
void init_pm(struct initdata *initdata);
int start_init_tasks(struct initdata *initdata);
void dump_tasks(void);
void send_task_data(l4id_t requester);

View File

@@ -22,15 +22,14 @@
#define VM_PROT_MASK (VM_READ | VM_WRITE | VM_EXEC)
/* Shared copy of a file */
#define VMA_SHARED (1 << 3)
#define VMA_SHARED (1 << 4)
/* VMA that's not file-backed, always maps devzero as VMA_COW */
#define VMA_ANONYMOUS (1 << 4)
#define VMA_ANONYMOUS (1 << 5)
/* Private copy of a file */
#define VMA_PRIVATE (1 << 5)
#define VMA_PRIVATE (1 << 6)
/* Copy-on-write semantics */
#define VMA_COW (1 << 6)
/* A vm object that is a shadow of another */
#define VMOBJ_SHADOW (1 << 7)
#define VMA_COW (1 << 7)
#define VMA_FIXED (1 << 8)
struct page {
int count; /* Refcount */
@@ -60,8 +59,8 @@ struct fault_data {
};
struct vm_pager_ops {
int (*page_in)(struct vm_object *vm_obj, unsigned long f_offset);
int (*page_out)(struct vm_object *vm_obj, unsigned long f_offset);
int (*page_in)(struct vm_object *vm_obj, unsigned long pfn_offset);
int (*page_out)(struct vm_object *vm_obj, unsigned long pfn_offset);
};
/* Describes the pager task that handles a vm_area. */
@@ -69,14 +68,27 @@ struct vm_pager {
struct vm_pager_ops ops; /* The ops the pager does on area */
};
/* Defines the type of file. A device file? Regular file? One used at boot? */
enum VM_FILE_TYPE {
VM_FILE_DEVZERO = 0,
VM_FILE_REGULAR,
VM_FILE_BOOTFILE,
};
/* Defines the type of object. A file? Just a standalone object? */
enum VM_OBJ_TYPE {
VM_OBJ_SHADOW = 1, /* Anonymous pages, swap_pager, no vm_file */
VM_OBJ_VNODE, /* VFS file pages, vnode_pager, has vm_file */
VM_OBJ_DEVICE /* Device pages, device_pager, has vm_file */
VM_OBJ_SHADOW = 1, /* Anonymous pages, swap_pager */
VM_OBJ_FILE, /* VFS file and device pages */
};
/* TODO:
* How to distinguish different devices handling page faults ???
* A possible answer:
*
* If they are not mmap'ed, this is handled by the vfs calling that file's
* specific operations (e.g. even calling the device process). If they're
* mmap'ed, they adhere to a standard mmap_device structure kept in
* vm_file->priv_data. This is used by the device pager to map those pages.
*/
/*
@@ -94,24 +106,29 @@ struct vm_object {
struct vm_object *orig_obj; /* Original object that this one shadows */
unsigned int type; /* Defines the type of the object */
struct list_head list; /* List of all vm objects in memory */
struct list_head page_cache; /* List of in-memory pages */
struct vm_pager *pager; /* The pager for this object */
struct vm_pager *pager; /* The pager for this object */
struct list_head page_cache; /* List of in-memory pages */
};
/* In memory representation of either a vfs file, a device. */
struct vm_file {
unsigned long vnum;
unsigned long length;
unsigned int type;
struct list_head list;
struct vm_object vm_obj;
void *priv_data; /* Device pagers use to access device info */
};
/* To create per-vma vm_object lists */
struct vma_obj_list {
struct vma_obj_link {
struct list_head list;
struct vm_object *obj;
}
#define vm_object_to_file(obj) \
(struct vm_file *)container_of(obj, struct vm_file, vm_obj)
/*
* Describes a virtually contiguous chunk of memory region in a task. It covers
* a unique virtual address area within its task, meaning that it does not
@@ -119,9 +136,9 @@ struct vma_obj_list {
* file or various other resources. This is managed by the region's pager.
*
* COW: Upon copy-on-write, each copy-on-write instance creates a shadow of the
* original vma which supersedes the original vma with its copied modified pages.
* This creates a stack of shadow vmas, where the top vma's copy of pages
* supersede the ones lower in the stack.
* original vm object which supersedes the original vm object with its copied
* modified pages. This creates a stack of shadow vm objects, where the top
* object's copy of pages supersede the ones lower in the stack.
*/
struct vm_area {
struct list_head list; /* Per-task vma list */
@@ -148,10 +165,17 @@ static inline struct vm_area *find_vma(unsigned long addr,
int insert_page_olist(struct page *this, struct vm_object *vm_obj);
/* Pagers */
extern struct vm_pager default_file_pager;
extern struct vm_pager boot_file_pager;
extern struct vm_pager swap_pager;
extern struct vm_pager file_pager;
extern struct vm_pager bootfile_pager;
extern struct vm_pager devzero_pager;
/* vm object and vm file lists */
extern struct list_head vm_object_list;
extern struct list_head vm_file_list;
/* vm file and object initialisation */
struct vm_file *vm_file_alloc_init(void);
struct vm_object *vm_object_alloc_init(void);
/* Main page fault entry point */
void page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);

View File

@@ -1,58 +0,0 @@
#define vm_object_to_file(obj) \
(struct vm_file *)container_of(obj, struct vm_file, vm_obj)
/* Returns the page with given offset in this vm_object */
struct page *devzero_pager_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *devzero = container_of(vm_obj, struct vm_file, vm_obj);
struct page *zpage = devzero->priv_data;
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
struct page *device_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *dev_file;
BUG_ON(vm_obj->type != VM_OBJ_DEVICE);
dev_file = vm_object_to_file(vm_obj);
}
struct vm_pager device_pager {
.page_in = device_page_in,
};
LIST_HEAD(&boot_files);
/* From bare boot images, create mappable device files */
int init_boot_files(struct initdata *initdata)
{
int err;
struct svc_image *img;
unsigned int sp, pc;
struct tcb *task;
struct task_ids ids;
struct bootdesc *bd;
struct vm_file *boot_file;
bd = initdata->bootdesc;
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
boot_file = kzalloc(sizeof(struct vm_file));
INIT_LIST_HEAD(&boot_file->vm_obj.list);
boot_file->priv_data = img;
boot_file->length = img->phys_end - img->phys_start;
boot_file->pager = &device_pager;
list_add(&boot_file->vm_obj.list, &boot_files);
}
}

45
tasks/mm0/src/dev.c Normal file
View File

@@ -0,0 +1,45 @@
/*
* This is yet unused, it is more of an anticipation
* of how mmaped devices would be mapped with a pager.
*/
struct mmap_device {
struct list_head page_list; /* Dyn-allocated page list */
unsigned long pfn_start; /* Physical pfn start */
unsigned long pfn_end; /* Physical pfn end */
};
struct page *mmap_device_page_in(struct vm_object *vm_obj,
unsigned long pfn_offset)
{
struct vm_file *f = vm_obj_to_file(vm_obj);
struct mmap_device *mmdev = f->private_data;
struct page *page;
/* Check if its within device boundary */
if (pfn_offset >= mmdev->pfn_end - mmdev->pfn_start)
return -1;
/* Simply return the page if found */
list_for_each_entry(page, &mmdev->page_list, list)
if (page->offset == pfn_offset)
return page;
/* Otherwise allocate one of our own for that offset and return it */
page = kzalloc(sizeof(struct page));
INIT_LIST_HEAD(&page->list);
spin_lock_init(&page->lock);
page->offset = pfn_offset;
page->owner = vm_obj;
page->flags = DEVICE_PAGE;
list_add(&page->list, &mmdev->page_list)
return page;
}
/* All mmapable devices are handled by this */
struct vm_pager mmap_device_pager {
.page_in = mmap_device_page_in,
};

View File

@@ -48,13 +48,17 @@ void init_mm(struct initdata *initdata)
init_page_allocator(membank[0].free, membank[0].end);
printf("%s: Initialised page allocator.\n", __TASKNAME__);
/* Initialise the pager's memory allocator */
kmalloc_init();
printf("%s: Initialised kmalloc.\n", __TASKNAME__);
/* Initialise the zero page */
init_devzero();
printf("%s: Initialised devzero.\n", __TASKNAME__);
/* Initialise the pager's memory allocator */
kmalloc_init();
printf("%s: Initialised kmalloc.\n", __TASKNAME__);
/* Initialise in-memory boot files */
init_boot_files();
printf("%s: Initialised in-memory boot files.\n", __TASKNAME__);
shm_init();
printf("%s: Initialised shm structures.\n", __TASKNAME__);

View File

@@ -135,21 +135,32 @@ int vma_unmap_shadows(struct vm_area *vma, struct tcb *task, unsigned long pfn_s
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
unsigned int flags, unsigned long f_offset,
struct vm_file *owner)
struct vm_file *mapfile)
{
struct vm_area *vma;
struct vm_obj_link *obj_link;
/* Initialise new area */
/* Allocate new area */
if (!(vma = kzalloc(sizeof(struct vm_area))))
return 0;
/* Allocate vm object link */
if (!(obj_link = kzalloc(sizeof(struct vm_obj_link)))) {
kfree(vma);
return 0;
}
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->vm_obj_list);
vma->pfn_start = pfn_start;
vma->pfn_end = pfn_start + npages;
vma->flags = flags;
vma->f_offset = f_offset;
vma->owner = owner;
INIT_LIST_HEAD(&vma->list);
INIT_LIST_HEAD(&vma->shadow_list);
INIT_LIST_HEAD(&obj_link->list);
obj_link->obj = &mapfile->vm_obj;
list_add(&obj_link->list, &vma->vm_obj_list);
return vma;
}
@@ -359,61 +370,54 @@ int sys_munmap(l4id_t sender, void *vaddr, unsigned long size)
return do_munmap(vaddr, size, task);
}
static struct vm_area *
is_vma_mergeable(unsigned long pfn_start, unsigned long pfn_end,
unsigned int flags, struct vm_area *vma)
int vma_intersect(unsigned long start, unsigned long end,
struct vm_area *vma)
{
/* TODO:
* The swap implementation is too simple for now. The vmas on swap
* are stored non-sequentially, and adjacent vmas don't imply adjacent
* file position on swap. So at the moment merging swappable vmas
* doesn't make sense. But this is going to change in the future.
*/
if (vma->flags & VMA_COW) {
BUG();
/* FIXME: XXX: Think about this! */
if ((pfn_start <= vma->pfn_start) && (pfn_end > vma->pfn_start)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return 1;
}
/* Check for vma adjacency */
if ((vma->pfn_start == pfn_end) && (vma->flags == flags))
return vma;
if ((vma->pfn_end == pfn_start) && (vma->flags == flags))
return vma;
if ((pfn_end >= vma->pfn_end) && (pfn_start < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return 1;
}
return 0;
}
/*
* Finds an unmapped virtual memory area for the given parameters. If it
* overlaps with an existing vma, it returns -1, if it is adjacent to an
* existing vma and the flags match, it returns the adjacent vma. Otherwise it
* returns 0.
* Check if region is unmapped. If not, supply another region,
* if VMA_FIXED is supplied return EINVAL.
*/
int find_unmapped_area(struct vm_area **existing, unsigned long pfn_start,
unsigned long npages, unsigned int flags,
struct list_head *vm_area_head)
struct vm_area *
find_unmapped_area(unsigned long pfn_start, unsigned long npages,
unsigned int flags, struct vm_file *mapfile,
unsigned long file_offset, struct tcb *task)
{
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma;
*existing = 0;
list_for_each_entry(vma, vm_area_head, list) {
/* Check overlap */
if ((vma->pfn_start <= pfn_start) &&
(pfn_start < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return -1; /* Overlap */
} if ((vma->pfn_start < pfn_end) &&
(pfn_end < vma->pfn_end)) {
printf("%s: VMAs overlap.\n", __FUNCTION__);
return -1; /* Overlap */
}
if (is_vma_mergeable(pfn_start, pfn_end, flags, vma)) {
*existing = vma;
return 0;
/*
* We refuse all mapped areas for now, in the future refuse just
* the process image areas, i.e. the stack, data and text.
*/
list_for_each_entry(vma, &task->vm_area_head, list) {
if (vma_intersect(pfn_start, pfn_end, vma)) {
if (flags & VM_FIXED)
return 0;
else
}
}
return 0;
/*
* They don't overlap, initialise and return a new
* vma for the given region.
*/
if (!(vma = vma_new(pfn_start, pfn_end - pfn_start,
flags, file_offset, mapfile)))
return -ENOMEM;
else
return vma;
}
/*
@@ -423,16 +427,15 @@ int find_unmapped_area(struct vm_area **existing, unsigned long pfn_start,
* The actual paging in/out of the file from/into memory pages is handled by
* the file's pager upon page faults.
*/
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *task,
unsigned long map_address, unsigned int flags, unsigned int pages)
{
struct vm_area *vma;
struct vm_object *vmobj;
unsigned long pfn_start = __pfn(map_address);
struct vm_area *vma;
if (!mapfile) {
if (flags & VMA_ANON) {
vmobj = get_devzero();
if (flags & VMA_ANONYMOUS) {
mapfile = get_devzero();
f_offset = 0;
} else
BUG();
@@ -444,17 +447,17 @@ int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
f_offset, __pfn(page_align_up(mapfile->length)));
return -EINVAL;
}
/* Set up a vm object for given file */
vmobj = vm_obj_alloc_init();
vmobj->priv.file = mapfile;
}
if (pages == 0) {
printf("Trying to map %d pages.\n", pages);
return -EINVAL;
}
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__, map_address,
map_address + pages * PAGE_SIZE);
/* See if it overlaps or is mergeable to an existing vma. */
if (find_unmapped_area(&vma, pfn_start, pages, flags,
&t->vm_area_list) < 0)
/* See if it overlaps with an existing vma. */
if (IS_ERR(vma = find_unmapped_area(pfn_start, pages, flags, task)))
return -EINVAL; /* Indicates overlap. */
/* Mergeable vma returned? */
@@ -482,12 +485,14 @@ int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
int flags, int fd, unsigned long pfn)
{
unsigned long npages = __pfn(page_align_up(length));
struct tcb * task;
struct vm_file *file = 0;
unsigned int vmflags = 0;
struct tcb *task;
int err;
BUG_ON(!(task = find_task(sender)));
if (fd < 0 || fd > TASK_OFILES_MAX)
if ((fd < 0 && !(flags & MAP_ANONYMOUS)) || fd > TASK_FILES_MAX)
return -EINVAL;
if ((unsigned long)start < USER_AREA_START || (unsigned long)start >= USER_AREA_END)
@@ -498,8 +503,31 @@ int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
* Check that pfn + npages range is within the file range.
* Check that posix flags passed match those defined in vm_area.h
*/
if ((err = do_mmap(task->fd[fd].vmfile, __pfn_to_addr(pfn), task,
(unsigned long)start, flags, npages)) < 0)
if (flags & MAP_ANONYMOUS) {
file = 0;
vmflags |= VMA_ANONYMOUS;
} else {
file = task->fd[fd].vmfile;
}
if (flags & MAP_FIXED)
vmflags |= VMA_FIXED;
if (flags & MAP_PRIVATE)
/* This means COW, if writeable. */
vmflags |= VMA_PRIVATE;
else /* This also means COW, if writeable and anonymous */
vmflags |= VMA_SHARED;
if (prot & PROT_READ)
vmflags |= VM_READ;
if (prot & PROT_WRITE)
vmflags |= VM_WRITE;
if (prot & PROT_EXEC)
vmflags |= VM_EXEC;
if ((err = do_mmap(file, __pfn_to_addr(pfn), task,
(unsigned long)start, vmflags, npages)) < 0)
return err;
return 0;

185
tasks/mm0/src/pagers.c Normal file
View File

@@ -0,0 +1,185 @@
/*
* Copyright (C) 2008 Bahadir Balban
*/
#include <vm_area.h>
struct page *file_page_in(struct vm_object *vm_obj, unsigned long page_offset)
{
struct vm_file *f = vm_object_to_file(vm_obj);
struct page *page;
/* The page is not resident in page cache. */
if (!(page = find_page(vm_obj, page_offset)))
/* Allocate a new page */
void *paddr = alloc_page(1);
void *vaddr = phys_to_virt(paddr);
page = phys_to_page(paddr);
/* Map the page to vfs task */
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, VFS_TID);
/* Syscall to vfs to read into the page. */
if ((err = vfs_read(f->vnum, page_offset, 1, vaddr)) < 0)
goto out_err;
/* Unmap it from vfs */
l4_unmap(vaddr, 1, VFS_TID);
/* Update vm object details */
vm_obj->npages++;
/* Update page details */
spin_lock(&page->lock);
page->count++;
page->owner = vm_obj;
page->offset = page_offset;
page->virtual = 0;
/* Add the page to owner's list of in-memory pages */
BUG_ON(!list_empty(&page->list));
insert_page_olist(page, vm_obj);
spin_unlock(&page->lock);
}
return page;
out_err:
l4_unmap(vaddr, 1, VFS_TID);
free_page(paddr);
return PTR_ERR(err);
}
/*
* This reads-in a range of pages from a file and populates the page cache
* just like a page fault, but its not in the page fault path.
*/
int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
unsigned long pfn_end)
{
struct page *page;
for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++)
vmfile->vm_obj->pager.ops->page_in(vmfile->vm_obj, f_offset);
return 0;
}
/*
* All non-mmapable char devices are handled by this.
* VFS calls those devices to read their pages
*/
struct vm_pager file_pager {
.page_in = file_page_in,
};
/* Returns the page with given offset in this vm_object */
struct page *bootfile_page_in(struct vm_object *vm_obj,
unsigned long pfn_offset)
{
struct vm_file *boot_file = vm_obj_to_file(vm_obj);
struct svc_image *img = boot_file->priv_data;
struct page *page = phys_to_page(img->phys_start +
__pfn_to_addr(pfn_offset));
spin_lock(&page->lock);
page->count++;
spin_unlock(&page->lock);
return page;
}
struct vm_pager bootfile_pager {
.page_in = bootfile_page_in,
};
LIST_HEAD(&boot_file_list);
/* From bare boot images, create mappable device files */
int init_boot_files(struct initdata *initdata)
{
struct svc_image *img;
unsigned int sp, pc;
struct tcb *task;
struct task_ids ids;
struct bootdesc *bd;
struct vm_file *boot_file;
int err;
bd = initdata->bootdesc;
INIT_LIST_HEAD(&initdata->boot_file_list);
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
boot_file = vm_file_alloc_init();
boot_file->priv_data = img;
boot_file->length = img->phys_end - img->phys_start;
boot_file->pager = &bootfile_pager;
boot_file->type = VM_FILE_BOOTFILE;
/* Initialise the vm object */
boot_file->vm_obj.type = VM_OBJ_FILE;
/* Add the object to global vm_object list */
list_add(&boot_file->vm_obj.list, &vm_object_list);
/* Add the file to initdata's bootfile list */
list_add(&boot_file->list, &initdata->boot_file_list);
}
}
/* Returns the page with given offset in this vm_object */
struct page *devzero_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *devzero = vm_obj_to_file(vm_obj);
struct page *zpage = devzero->priv_data;
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
struct vm_pager devzero_pager {
.page_in = devzero_page_in,
};
struct vm_file *get_devzero(void)
{
struct vm_file *f;
list_for_each_entry(f, &vm_file_list, list)
if (f->type & VM_FILE_DEVZERO)
return f;
return 0;
}
int init_devzero(void)
{
void *zphys, *zvirt;
struct page *zpage;
struct vm_file *devzero;
/* Allocate and initialise the zero page */
zphys = alloc_page(1);
zpage = phys_to_page(zphys);
zvirt = l4_map_helper(zphys, 1);
memset(zvirt, 0, PAGE_SIZE);
l4_unmap_helper(zvirt, 1);
zpage->count++;
/* Allocate and initialise devzero file */
devzero = vmfile_alloc_init();
devzero->vm_obj.npages = ~0;
devzero->vm_obj.pager = devzero_pager;
devzero->vm_obj.type = VM_OBJ_FILE;
devzero->type = VM_FILE_DEVZERO;
devzero->priv_data = zpage;
list_add(&devzero->vm_obj.list, &vm_object_list);
list_add(&devzero->list, &vm_file_list);
return 0;
}

View File

@@ -1,7 +1,4 @@
/*
* Anonymous files for the process (e.g. stack, data, env)
* are implemented here.
*
* Copyright (C) 2008 Bahadir Balban
*/
#include <l4lib/types.h>
@@ -15,99 +12,6 @@
#include <task.h>
#include <proc.h>
static void *zpage_phys;
static struct page *zpage;
static struct vm_file *devzero;
/* TODO: Associate devzero with zero page */
void init_zero_page(void)
{
void *zpage_virt;
zpage_phys = alloc_page(1);
zpage = phys_to_page(zpage_phys);
/* Map it to self */
zpage_virt = l4_map_helper(zpage_phys, 1);
/* Zero it */
memset(zpage_virt, 0, PAGE_SIZE);
/* Unmap it */
l4_unmap_helper(zpage_virt, 1);
/* Update page struct. All other fields are zero */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
}
#define VM_OBJ_MASK 0xFFFF
#define VM_OBJ_DEVZERO (1 << 0) /* Devzero special file */
#define VM_OBJ_FILE (1 << 1) /* Regular VFS file */
#define VM_OBJ_SHADOW (1 << 2) /* Shadow of another object */
#define VM_OBJ_COW (1 << 3) /* Copy-on-write semantics */
struct vm_object *get_devzero(void)
{
return &devzero;
}
struct page *get_zero_page(void)
{
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
void put_zero_page(void)
{
spin_lock(&page->lock);
zpage->count--;
spin_unlock(&page->lock);
BUG_ON(zpage->count < 0);
}
#define vm_object_to_file(obj) \
(struct vm_file *)container_of(obj, struct vm_file, vm_obj)
/* Returns the page with given offset in this vm_object */
struct page *devzero_pager_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *devzero = container_of(vm_obj, struct vm_file, vm_obj);
struct page *zpage = devzero->priv_data;
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
struct vm_pager devzero_pager {
.page_in = devzero_pager_page_in,
};
void init_devzero(void)
{
init_zero_page();
INIT_LIST_HEAD(&devzero.list);
INIT_LIST_HEAD(&devzero.shadows);
INIT_LIST_HEAD(&devzero.page_cache);
/* Devzero has infinitely many pages ;-) */
devzero.npages = -1;
devzero.type = VM_OBJ_DEVZERO;
devzero.pager = &devzero_pager;
}
/* Allocates and fills in the env page. This is like a pre-faulted file. */
int task_populate_env(struct task *task)

View File

@@ -16,57 +16,23 @@
#include <l4lib/arch/utcb.h>
#include <l4lib/ipcdefs.h>
#include <lib/addr.h>
#include <task.h>
#include <kdata.h>
#include <kmalloc/kmalloc.h>
#include <kdata.h>
#include <string.h>
#include <vm_area.h>
#include <memory.h>
#include <file.h>
#include <utcb.h>
#include <proc.h>
#include <task.h>
struct tcb_head {
struct list_head list;
int total; /* Total threads */
} tcb_head;
struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &tcb_head.list, list)
if (t->tid == tid)
return t;
return 0;
}
struct tcb *tcb_alloc_init(void)
{
struct tcb *task;
if (!(task = kzalloc(sizeof(struct tcb))))
return PTR_ERR(-ENOMEM);
/* Ids will be acquired from the kernel */
task->tid = TASK_ID_INVALID;
task->spid = TASK_ID_INVALID;
/* Initialise its lists */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->vm_area_list);
return task;
}
#if 0
int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
{
int err;
struct svc_image *img;
unsigned int sp, pc;
struct tcb *task;
struct task_ids ids;
struct bootdesc *bd;
struct vm_object *vm_obj;
bd = initdata->bootdesc;
@@ -104,14 +70,6 @@ int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
/* Allocate a utcb virtual address */
task->utcb_address = (unsigned long)utcb_vaddr_new();
/* Allocate the first vm_object we want to map */
vm_obj = vm_obj_alloc_init();
vm_obj->priv.img = img;
file->length = img->phys_end - img->phys_start;
file->pager = &boot_file_pager;
list_add(&file->list, &initdata->boot_file_list);
/* Prepare environment boundaries. Posix minimum is 4Kb */
task->env_end = USER_AREA_END;
task->env_start = task->env_end - PAGE_SIZE;
@@ -199,7 +157,191 @@ int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
error:
BUG();
}
#endif
struct tcb_head {
struct list_head list;
int total; /* Total threads */
} tcb_head;
struct tcb *find_task(int tid)
{
struct tcb *t;
list_for_each_entry(t, &tcb_head.list, list)
if (t->tid == tid)
return t;
return 0;
}
struct tcb *tcb_alloc_init(void)
{
struct tcb *task;
if (!(task = kzalloc(sizeof(struct tcb))))
return PTR_ERR(-ENOMEM);
/* Ids will be acquired from the kernel */
task->tid = TASK_ID_INVALID;
task->spid = TASK_ID_INVALID;
/* Initialise its lists */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->vm_area_list);
return task;
}
/*
* Creates a process environment, mmaps the given file along
* with any other necessary segment, and executes it as a task.
*/
int start_boot_task(struct vm_file *file, struct task_ids *ids)
{
/* Create the thread structures and address space */
printf("Creating new thread.\n");
if ((err = l4_thread_control(THREAD_CREATE, &ids)) < 0) {
printf("l4_thread_control failed with %d.\n", err);
goto error;
}
/* Create a task and use given space and thread ids. */
printf("New task with id: %d, space id: %d\n", ids->tid, ids->spid);
task = tcb_alloc_init(tcbs);
task->tid = ids->tid;
task->spid = ids->spid;
/* Allocate a utcb virtual address */
task->utcb_address = (unsigned long)utcb_vaddr_new();
/* Prepare environment boundaries. */
task->env_end = USER_AREA_END;
task->env_start = task->env_end - DEFAULT_ENV_SIZE;
task->args_end = task->env_start;
task->args_start = task->env_start;
/* Task stack starts right after the environment. TODO: Fix this. */
task->stack_end = task->env_start;
task->stack_start = task->stack_end - DEFAULT_STACK_SIZE;
/* Currently RO text and RW data are one region */
task->data_start = USER_AREA_START;
task->data_end = USER_AREA_START + file->length;
task->text_start = task->data_start;
task->text_end = task->data_end;
/* Set up task's registers */
sp = align(task->stack_end - 1, 8);
pc = task->text_start;
/* Set up the task's thread details, (pc, sp, pager etc.) */
if ((err = l4_exchange_registers(pc, sp, self_tid(), task->tid) < 0)) {
printf("l4_exchange_registers failed with %d.\n", err);
goto error;
}
/*
* mmap each task's physical image to task's address space.
* TODO: Map data and text separately when available from bootdesc.
*/
if ((err = do_mmap(file, 0, task, task->text_start,
VM_READ | VM_WRITE | VM_EXEC | VMA_PRIVATE,
__pfn(page_align_up(task->text_end)))) < 0) {
printf("do_mmap: failed with %d.\n", err);
goto error;
}
/* mmap each task's environment as anonymous memory. */
if ((err = do_mmap(0, 0, task, task->env_start,
VM_READ | VM_WRITE | VMA_PRIVATE,
__pfn(task->env_end - task->env_start)) < 0)) {
printf("do_mmap: Mapping environment failed with %d.\n",
err);
goto error;
}
/* mmap each task's stack as anonymous memory. */
if ((err = do_mmap(0, 0, task, task->stack_start,
VM_READ | VM_WRITE | VMA_PRIVATE,
__pfn(task->stack_end - task->stack_start)) < 0)) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
/* mmap each task's utcb as single page anonymous memory. */
printf("%s: Mapping utcb for new task at: 0x%x\n", __TASKNAME__,
task->utcb_address);
if ((err = do_mmap(0, 0, task, task->utcb_address,
VM_READ | VM_WRITE | VMA_SHARED,
DEFAULT_UTCB_SIZE) < 0)) {
printf("do_mmap: Mapping utcb failed with %d.\n", err);
goto error;
}
/* Add the task to the global task list */
list_add(&task->list, tcb_head->list);
tcb_head->total++;
/* Start the thread */
printf("Starting task with id %d\n", task->tid);
if ((err = l4_thread_control(THREAD_RUN, &ids) < 0)) {
printf("l4_thread_control failed with %d\n", err);
goto error;
}
}
/*
* Reads boot files from init data, determines their task ids if they
* match with particular servers, and starts the tasks.
*/
int start_boot_tasks(struct initdata *initdata)
{
struct vm_file *file;
struct list_head *n;
struct svg_image *img;
struct task_ids ids;
int total = 0;
do {
file = 0;
list_for_each_entry_safe(file, n, &initdata->boot_file_list,
list) {
list_del(&file->list);
break;
}
if (file) {
BUG_ON(file->type != VM_FILE_BOOTFILE);
img = file->priv_data;
if (!strcmp(img->name, __PAGERNAME__))
continue;
/* Set up task ids */
if (!strcmp(img->name, __VFSNAME__)) {
ids.tid = VFS_TID;
ids.spid = VFS_TID;
} else {
ids.tid = -1;
ids.spid = -1;
}
} else
break;
/* Add the file to global vm lists */
list_add(&file->list, &vm_file_list);
list_add(&file->vm_obj->list, &vm_object_list);
/* Start the file as a task */
start_boot_task(file, &ids);
total++;
} while (1);
if (!total) {
printf("%s: Could not start any tasks.\n", __TASKNAME__);
BUG();
}
return 0;
}
void init_pm(struct initdata *initdata)
{

View File

@@ -15,6 +15,15 @@ LIST_HEAD(vm_object_list);
/* Global list of in-memory vm files */
LIST_HEAD(vm_file_list);
struct vm_object *vm_object_init(struct vm_object *obj)
{
INIT_LIST_HEAD(&obj->list);
INIT_LIST_HEAD(&obj->page_cache);
INIT_LIST_HEAD(&obj->shadows);
return obj;
}
/* Allocate and initialise a vmfile, and return it */
struct vm_object *vm_object_alloc_init(void)
{
@@ -23,10 +32,19 @@ struct vm_object *vm_object_alloc_init(void)
if (!(obj = kzalloc(sizeof(*obj))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&obj->list);
INIT_LIST_HEAD(&obj->page_cache);
INIT_LIST_HEAD(&obj->shadows);
return obj;
return vm_object_init(obj);
}
struct vm_file *vm_file_alloc_init(void)
{
struct vm_file *f;
if (!(f = kzalloc(sizeof(*f))))
return PTR_ERR(-ENOMEM);
INIT_LIST_HEAD(&f->file_list);
vm_object_init(&f->vm_obj);
return f;
}