More changes.

This commit is contained in:
Bahadir Balban
2008-03-05 01:59:31 +00:00
parent 58b833dd7f
commit 5681f3d1cb
10 changed files with 209 additions and 219 deletions

View File

@@ -1,9 +1,30 @@
/*
* Data that comes from the kernel, and other init data.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __INIT_H__
#define __INIT_H__
#ifndef __MM_INIT_H__
#define __MM_INIT_H__
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/generic/physmem.h>
#include INC_PLAT(offsets.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(bootdesc.h)
#include <vm_area.h>
struct initdata {
struct bootdesc *bootdesc;
struct page_bitmap page_map;
};
extern struct initdata initdata;
int request_initdata(struct initdata *i);
void initialise(void);
#endif /* __INIT_H__ */
#endif /* __MM_INIT_H__ */

View File

@@ -1,29 +0,0 @@
/*
* Data that comes from the kernel.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __MM_KDATA_H__
#define __MM_KDATA_H__
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/generic/physmem.h>
#include INC_PLAT(offsets.h)
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_ARCH(bootdesc.h)
#include <vm_area.h>
struct initdata {
struct bootdesc *bootdesc;
struct list_head boot_file_list;
struct page_bitmap page_map;
};
extern struct initdata initdata;
int request_initdata(struct initdata *i);
#endif /* __MM_KDATA_H__ */

View File

@@ -3,11 +3,11 @@
#include <vm_area.h>
struct proc_files {
struct vm_object *stack_file; /* ZI, RO: devzero, RW: private */
struct vm_object *env_file; /* NON-ZI, RO: private, RW: private */
struct vm_object *data_file; /* NON-ZI, RO: shared, RW: private */
struct vm_object *bss_file; /* ZI, RO: devzero, RW: private */
struct proc_vm_objects {
struct vm_object *stack; /* ZI, RO: devzero, RW: private */
struct vm_object *env; /* NON-ZI, RO: private, RW: private */
struct vm_object *data; /* NON-ZI, RO: shared, RW: private */
struct vm_object *bss; /* ZI, RO: devzero, RW: private */
};
int task_setup_vm_objects(struct tcb *t);

View File

@@ -1,5 +1,5 @@
/*
* Virtual memory area descriptors. No page cache yet.
* Virtual memory area descriptors.
*
* Copyright (C) 2007 Bahadir Balban
*/
@@ -20,17 +20,17 @@
#define VM_WRITE (1 << 2)
#define VM_EXEC (1 << 3)
#define VM_PROT_MASK (VM_READ | VM_WRITE | VM_EXEC)
#define VM_SWAPPED (1 << 4)
/* VMA flags */
/* Shared copy of a file */
#define VMA_SHARED (1 << 3)
/* VMA that's not file-backed, always ZI */
#define VMA_ANON (1 << 4)
/* Private copy of a file VMA, can be ZI */
#define VMA_COW (1 << 5)
/* VMA object type flags */
#define VMOBJ_SHADOW (1 << 6)
/* VMA that's not file-backed, always maps devzero as VMA_COW */
#define VMA_ANONYMOUS (1 << 4)
/* Private copy of a file */
#define VMA_PRIVATE (1 << 5)
/* Copy-on-write semantics */
#define VMA_COW (1 << 6)
/* A vm object that is a shadow of another */
#define VMOBJ_SHADOW (1 << 7)
struct page {
int count; /* Refcount */
@@ -69,6 +69,16 @@ struct vm_pager {
struct vm_pager_ops ops; /* The ops the pager does on area */
};
enum VM_OBJ_TYPE {
VM_OBJ_SHADOW = 1, /* Anonymous pages, swap_pager, no vm_file */
VM_OBJ_VNODE, /* VFS file pages, vnode_pager, has vm_file */
VM_OBJ_DEVICE /* Device pages, device_pager, has vm_file */
};
/* TODO:
* How to distinguish different devices handling page faults ???
*/
/*
* Describes the in-memory representation of a resource. This could
* point at a file or another resource, e.g. a device area, swapper space,
@@ -81,20 +91,19 @@ struct vm_object {
int vma_refcnt; /* Number of vmas that refer */
int shadow_refcnt; /* Number of shadows that refer */
struct list_head shadows; /* List of vm objects that shadow this one */
struct vm_object *orig_vma; /* Original object that this one shadows */
struct vm_object *orig_obj; /* Original object that this one shadows */
unsigned int type; /* Defines the type of the object */
struct list_head list; /* List of all vm objects in memory */
struct list_head page_cache;/* List of in-memory pages */
struct vm_pager *pager; /* The pager for this object */
union private_data { /* Private data about the object */
struct vm_file *file; /* VFS file-specific information */
} priv;
struct list_head page_cache; /* List of in-memory pages */
struct vm_pager *pager; /* The pager for this object */
};
/* In memory representation of a vfs file. */
/* In memory representation of either a vfs file, a device. */
struct vm_file {
unsigned long vnum;
unsigned long length;
struct vm_object vm_obj;
void *priv_data; /* Device pagers use to access device info */
};
/* To create per-vma vm_object lists */

58
tasks/mm0/src/boot.c Normal file
View File

@@ -0,0 +1,58 @@
#define vm_object_to_file(obj) \
(struct vm_file *)container_of(obj, struct vm_file, vm_obj)
/* Returns the page with given offset in this vm_object */
struct page *devzero_pager_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *devzero = container_of(vm_obj, struct vm_file, vm_obj);
struct page *zpage = devzero->priv_data;
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
struct page *device_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *dev_file;
BUG_ON(vm_obj->type != VM_OBJ_DEVICE);
dev_file = vm_object_to_file(vm_obj);
}
struct vm_pager device_pager {
.page_in = device_page_in,
};
LIST_HEAD(&boot_files);
/* From bare boot images, create mappable device files */
int init_boot_files(struct initdata *initdata)
{
int err;
struct svc_image *img;
unsigned int sp, pc;
struct tcb *task;
struct task_ids ids;
struct bootdesc *bd;
struct vm_file *boot_file;
bd = initdata->bootdesc;
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
boot_file = kzalloc(sizeof(struct vm_file));
INIT_LIST_HEAD(&boot_file->vm_obj.list);
boot_file->priv_data = img;
boot_file->length = img->phys_end - img->phys_start;
boot_file->pager = &device_pager;
list_add(&boot_file->vm_obj.list, &boot_files);
}
}

View File

@@ -1,65 +0,0 @@
/*
* Handling of the special zero page.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <memory.h>
#include <mm/alloc_page.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <string.h>
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include <l4/generic/space.h>
#include <arch/mm.h>
static void *zpage_p;
static struct page *zpage;
static struct vm_file devzero;
void init_zero_page(void)
{
void *zpage_v;
zpage_p = alloc_page(1);
zpage = phys_to_page(zpage_p);
/* Map it to self */
zpage_v = l4_map_helper(zpage_p, 1);
/* Zero it */
memset(zpage_v, 0, PAGE_SIZE);
/* Unmap it */
l4_unmap_helper(zpage_v, 1);
/* Update page struct. All other fields are zero */
zpage->count++;
}
void init_devzero(void)
{
init_zero_page();
INIT_LIST_HEAD(&devzero.list);
INIT_LIST_HEAD(&devzero.page_cache_list);
devzero.length = (unsigned int)-1;
devzero.vnum = -1;
}
struct vm_file *get_devzero(void)
{
return &devzero;
}
void *get_zero_page(void)
{
zpage->count++;
return zpage_p;
}
void put_zero_page(void)
{
zpage->count--;
BUG_ON(zpage->count < 0);
}

View File

@@ -12,10 +12,6 @@
#include <memory.h>
#include <l4lib/arch/syscalls.h>
/* Swap related bookkeeping.
static struct vm_file shm_swap_file;
static struct id_pool *swap_file_offset_pool;
*/
/* TODO: This is to be implemented when fs0 is ready. */
int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *task)
@@ -237,20 +233,6 @@ struct vm_area *vma_split(struct vm_area *vma, struct tcb *task,
return new;
}
/*
* For written anonymous regions swapfile segments are allocated dynamically.
* when vma regions are modified these allocations must be re-adjusted.
* This call handles this adjustment as well as the vma.
*/
int vma_swapfile_realloc(struct vm_area *vma, unsigned long pfn_start,
unsigned long pfn_end)
{
/* TODO: Reslot in swapfile */
BUG();
return 0;
}
/* This shrinks the vma from *one* end only, either start or end */
int vma_shrink(struct vm_area *vma, struct tcb *task, unsigned long pfn_start,
unsigned long pfn_end)
@@ -407,12 +389,12 @@ is_vma_mergeable(unsigned long pfn_start, unsigned long pfn_end,
* existing vma and the flags match, it returns the adjacent vma. Otherwise it
* returns 0.
*/
int find_unmapped_area(struct vm_area **existing, struct vm_file *file,
unsigned long pfn_start, unsigned long npages,
unsigned int flags, struct list_head *vm_area_head)
int find_unmapped_area(struct vm_area **existing, unsigned long pfn_start,
unsigned long npages, unsigned int flags,
struct list_head *vm_area_head)
{
struct vm_area *vma;
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma;
*existing = 0;
list_for_each_entry(vma, vm_area_head, list) {
@@ -445,26 +427,33 @@ int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
unsigned long map_address, unsigned int flags, unsigned int pages)
{
struct vm_area *vma;
struct vm_object *vmobj;
unsigned long pfn_start = __pfn(map_address);
if (!mapfile) {
if (flags & VMA_ANON) {
mapfile = get_devzero();
vmobj = get_devzero();
f_offset = 0;
} else
BUG();
} else if (pages > (__pfn(page_align_up(mapfile->length)) - f_offset)) {
printf("%s: Trying to map %d pages from page %d, "
"but file length is %d\n", __FUNCTION__, pages,
f_offset, __pfn(page_align_up(mapfile->length)));
return -EINVAL;
} else {
if (pages > (__pfn(page_align_up(mapfile->length))
- f_offset)) {
printf("%s: Trying to map %d pages from page %d, "
"but file length is %d\n", __FUNCTION__, pages,
f_offset, __pfn(page_align_up(mapfile->length)));
return -EINVAL;
}
/* Set up a vm object for given file */
vmobj = vm_obj_alloc_init();
vmobj->priv.file = mapfile;
}
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__, map_address,
map_address + pages * PAGE_SIZE);
/* See if it overlaps or is mergeable to an existing vma. */
if (find_unmapped_area(&vma, mapfile, pfn_start, pages, flags,
if (find_unmapped_area(&vma, pfn_start, pages, flags,
&t->vm_area_list) < 0)
return -EINVAL; /* Indicates overlap. */

View File

@@ -15,76 +15,100 @@
#include <task.h>
#include <proc.h>
static void *zpage_p;
static void *zpage_phys;
static struct page *zpage;
static struct vm_file *devzero;
static struct vm_object devzero;
/* TODO: Associate devzero with zero page */
void init_zero_page(void)
{
void *zpage_v;
zpage_p = alloc_page(1);
zpage = phys_to_page(zpage_p);
void *zpage_virt;
zpage_phys = alloc_page(1);
zpage = phys_to_page(zpage_phys);
/* Map it to self */
zpage_v = l4_map_helper(zpage_p, 1);
zpage_virt = l4_map_helper(zpage_phys, 1);
/* Zero it */
memset(zpage_v, 0, PAGE_SIZE);
memset(zpage_virt, 0, PAGE_SIZE);
/* Unmap it */
l4_unmap_helper(zpage_v, 1);
l4_unmap_helper(zpage_virt, 1);
/* Update page struct. All other fields are zero */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
}
#define VM_OBJ_MASK 0xFFFF
#define VM_OBJ_DEVZERO (1 << 0) /* Devzero special file */
#define VM_OBJ_FILE (1 << 1) /* Regular VFS file */
#define VM_OBJ_SHADOW (1 << 2) /* Shadow of another object */
#define VM_OBJ_COW (1 << 3) /* Copy-on-write semantics */
struct vm_object *get_devzero(void)
{
return &devzero;
}
struct page *get_zero_page(void)
{
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
void put_zero_page(void)
{
spin_lock(&page->lock);
zpage->count--;
spin_unlock(&page->lock);
BUG_ON(zpage->count < 0);
}
#define vm_object_to_file(obj) \
(struct vm_file *)container_of(obj, struct vm_file, vm_obj)
/* Returns the page with given offset in this vm_object */
struct page *devzero_pager_page_in(struct vm_object *vm_obj, unsigned long f_offset)
struct page *devzero_pager_page_in(struct vm_object *vm_obj,
unsigned long page_offset)
{
struct vm_file *devzero = container_of(vm_obj, struct vm_file, vm_obj);
struct page *zpage = devzero->priv_data;
/* Update zero page struct. */
spin_lock(&page->lock);
zpage->count++;
spin_unlock(&page->lock);
return zpage;
}
struct vm_pager devzero_pager {
page_in = devzero_pager_page_int,
.page_in = devzero_pager_page_in,
};
void init_devzero(void)
{
init_zero_page();
INIT_LIST_HEAD(&devzero.page_cache);
INIT_LIST_HEAD(&devzero.list);
INIT_LIST_HEAD(&devzero.shadows);
INIT_LIST_HEAD(&devzero.page_cache);
/* Devzero has infinitely many pages ;-) */
devzero.npages = -1;
devzero.type = VM_OBJ_FILE;
devzero.type = VM_OBJ_DEVZERO;
devzero.pager = &devzero_pager;
}
struct vm_file *get_devzero(void)
{
return &devzero;
}
void *get_zero_page(void)
{
zpage->count++;
return zpage_p;
}
void put_zero_page(void)
{
zpage->count--;
BUG_ON(zpage->count < 0);
}
/* Allocates and fills in the env page. This is like a pre-faulted file. */
int task_populate_env(struct task *task)
{
@@ -123,22 +147,17 @@ int task_populate_env(struct task *task)
return 0;
}
#define TASK_DATA_VNUM 1
#define TASK_STACK_VNUM 2
#define TASK_ENV_VNUM 3
/*
* For a task that is about to execute, this dynamically
* generates its environment file, and environment data.
*/
int task_setup_vm_objects(struct tcb *t)
{
struct proc_files *pf = &t->proc_files;
struct proc_vm_objects *po = &t->proc_vm_objects;
if (IS_ERR(pf->stack_file = vmfile_alloc_init()))
if (IS_ERR(pf->stack = vmfile_alloc_init()))
return (int)t->stack_file;
if (IS_ERR(pf->env_file = vmfile_alloc_init()))
if (IS_ERR(pf->env = vmfile_alloc_init()))
return (int)t->env_file;
if (IS_ERR(pf->env_file = vmfile_alloc_init()))
return (int)t->data_file;

View File

@@ -41,36 +41,20 @@ struct tcb *find_task(int tid)
return 0;
}
#if 0
void dump_tasks(void)
struct tcb *tcb_alloc_init(void)
{
struct tcb *t;
struct tcb *task;
list_for_each_entry(t, &tcb_head.list, list) {
printf("Task %s: id/spid: %d/%d\n", &t->name[0], t->tid, t->spid);
printf("Task vm areas:\n");
dump_vm_areas(t);
printf("Task swapfile:\n");
dump_task_swapfile(t);
}
}
#endif
struct tcb *create_init_tcb(struct tcb_head *tcbs)
{
struct tcb *task = kzalloc(sizeof(struct tcb));
if (!(task = kzalloc(sizeof(struct tcb))))
return PTR_ERR(-ENOMEM);
/* Ids will be acquired from the kernel */
task->tid = TASK_ID_INVALID;
task->spid = TASK_ID_INVALID;
/* Initialise its lists */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->vm_area_list);
list_add_tail(&task->list, &tcbs->list);
tcbs->total++;
/* Allocate a utcb virtual address */
task->utcb_address = (unsigned long)utcb_vaddr_new();
return task;
}
@@ -78,15 +62,15 @@ struct tcb *create_init_tcb(struct tcb_head *tcbs)
int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
{
int err;
struct vm_file *file;
struct svc_image *img;
unsigned int sp, pc;
struct tcb *task;
struct task_ids ids;
struct bootdesc *bd = initdata->bootdesc;
struct bootdesc *bd;
struct vm_object *vm_obj;
bd = initdata->bootdesc;
INIT_LIST_HEAD(&tcb_head.list);
INIT_LIST_HEAD(&initdata->boot_file_list);
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
@@ -113,16 +97,17 @@ int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
/* Create a task and use returned space and thread ids. */
printf("New task with id: %d, space id: %d\n", ids.tid, ids.spid);
task = create_init_tcb(tcbs);
task = tcb_alloc_init(tcbs);
task->tid = ids.tid;
task->spid = ids.spid;
/*
* For boot files, we use the physical address of the memory
* file as its mock-up inode.
*/
file = vmfile_alloc_init();
file->vnum = img->phys_start;
/* Allocate a utcb virtual address */
task->utcb_address = (unsigned long)utcb_vaddr_new();
/* Allocate the first vm_object we want to map */
vm_obj = vm_obj_alloc_init();
vm_obj->priv.img = img;
file->length = img->phys_end - img->phys_start;
file->pager = &boot_file_pager;
list_add(&file->list, &initdata->boot_file_list);

View File

@@ -9,9 +9,12 @@
#include <kmalloc/kmalloc.h>
/* Global list of in-memory vm files. */
/* Global list of in-memory vm objects. */
LIST_HEAD(vm_object_list);
/* Global list of in-memory vm files */
LIST_HEAD(vm_file_list);
/* Allocate and initialise a vmfile, and return it */
struct vm_object *vm_object_alloc_init(void)
{