mirror of
https://github.com/drasko/codezero.git
synced 2026-01-30 11:43:13 +01:00
Fixed most compiler errors. Need to do more.
This commit is contained in:
@@ -27,5 +27,8 @@ extern struct initdata initdata;
|
|||||||
int request_initdata(struct initdata *i);
|
int request_initdata(struct initdata *i);
|
||||||
|
|
||||||
void initialise(void);
|
void initialise(void);
|
||||||
|
int init_devzero(void);
|
||||||
|
struct vm_file *get_devzero(void);
|
||||||
|
int init_boot_files(struct initdata *initdata);
|
||||||
|
|
||||||
#endif /* __MM_INIT_H__ */
|
#endif /* __MM_INIT_H__ */
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
#define __MEMORY_H__
|
#define __MEMORY_H__
|
||||||
|
|
||||||
#include <vm_area.h>
|
#include <vm_area.h>
|
||||||
#include <kdata.h>
|
#include <init.h>
|
||||||
|
|
||||||
struct membank {
|
struct membank {
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
|
|||||||
@@ -9,6 +9,17 @@
|
|||||||
#include <task.h>
|
#include <task.h>
|
||||||
#include <vm_area.h>
|
#include <vm_area.h>
|
||||||
|
|
||||||
|
/* POSIX-defined mmap flags */
|
||||||
|
#define PROT_READ 0x1
|
||||||
|
#define PROT_WRITE 0x2
|
||||||
|
#define PROT_EXEC 0x4
|
||||||
|
#define PROT_NONE 0x0
|
||||||
|
|
||||||
|
#define MAP_ANONYMOUS 0x20
|
||||||
|
#define MAP_FIXED 0x10
|
||||||
|
#define MAP_SHARED 0x01
|
||||||
|
#define MAP_PRIVATE 0x02
|
||||||
|
|
||||||
int do_munmap(void *vaddr, unsigned long size, struct tcb *task);
|
int do_munmap(void *vaddr, unsigned long size, struct tcb *task);
|
||||||
|
|
||||||
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
|
int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
|
||||||
|
|||||||
@@ -19,8 +19,8 @@
|
|||||||
#define TASK_FILES_MAX 32
|
#define TASK_FILES_MAX 32
|
||||||
|
|
||||||
/* POSIX minimum is 4Kb */
|
/* POSIX minimum is 4Kb */
|
||||||
#define DEFAULT_ENV_SIZE SZ_16KB
|
#define DEFAULT_ENV_SIZE SZ_16K
|
||||||
#define DEFAULT_STACK_SIZE SZ_16KB
|
#define DEFAULT_STACK_SIZE SZ_16K
|
||||||
#define DEFAULT_UTCB_SIZE PAGE_SIZE
|
#define DEFAULT_UTCB_SIZE PAGE_SIZE
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -31,15 +31,6 @@
|
|||||||
#define VMA_COW (1 << 7)
|
#define VMA_COW (1 << 7)
|
||||||
#define VMA_FIXED (1 << 8)
|
#define VMA_FIXED (1 << 8)
|
||||||
|
|
||||||
/*
|
|
||||||
* A suggestion to how a non-page_array (i.e. a device)
|
|
||||||
* page could tell its physical address.
|
|
||||||
*/
|
|
||||||
struct devpage {
|
|
||||||
struct page page;
|
|
||||||
unsigned long phys;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct page {
|
struct page {
|
||||||
int refcnt; /* Refcount */
|
int refcnt; /* Refcount */
|
||||||
struct spinlock lock; /* Page lock. */
|
struct spinlock lock; /* Page lock. */
|
||||||
@@ -51,6 +42,15 @@ struct page {
|
|||||||
};
|
};
|
||||||
extern struct page *page_array;
|
extern struct page *page_array;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A suggestion to how a non-page_array (i.e. a device)
|
||||||
|
* page could tell its physical address.
|
||||||
|
*/
|
||||||
|
struct devpage {
|
||||||
|
struct page page;
|
||||||
|
unsigned long phys;
|
||||||
|
};
|
||||||
|
|
||||||
#define page_refcnt(x) ((x)->count + 1)
|
#define page_refcnt(x) ((x)->count + 1)
|
||||||
#define virtual(x) ((x)->virtual)
|
#define virtual(x) ((x)->virtual)
|
||||||
#define phys_to_page(x) (page_array + __pfn(x))
|
#define phys_to_page(x) (page_array + __pfn(x))
|
||||||
@@ -119,7 +119,6 @@ struct vm_object {
|
|||||||
struct list_head list; /* List of all vm objects in memory */
|
struct list_head list; /* List of all vm objects in memory */
|
||||||
struct vm_pager *pager; /* The pager for this object */
|
struct vm_pager *pager; /* The pager for this object */
|
||||||
struct list_head page_cache;/* List of in-memory pages */
|
struct list_head page_cache;/* List of in-memory pages */
|
||||||
struct vm_object_ops ops; /* Operations on the object */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* In memory representation of either a vfs file, a device. */
|
/* In memory representation of either a vfs file, a device. */
|
||||||
@@ -133,7 +132,7 @@ struct vm_file {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* To create per-vma vm_object lists */
|
/* To create per-vma vm_object lists */
|
||||||
struct vma_obj_link {
|
struct vm_obj_link {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct list_head shref; /* Ref to shadowers by original objects */
|
struct list_head shref; /* Ref to shadowers by original objects */
|
||||||
struct vm_object *obj;
|
struct vm_object *obj;
|
||||||
@@ -176,18 +175,26 @@ static inline struct vm_area *find_vma(unsigned long addr,
|
|||||||
/* Adds a page to its vm_objects's page cache in order of offset. */
|
/* Adds a page to its vm_objects's page cache in order of offset. */
|
||||||
int insert_page_olist(struct page *this, struct vm_object *vm_obj);
|
int insert_page_olist(struct page *this, struct vm_object *vm_obj);
|
||||||
|
|
||||||
|
/* Find a page in page cache via page offset */
|
||||||
|
struct page *find_page(struct vm_object *obj, unsigned long pfn);
|
||||||
|
|
||||||
/* Pagers */
|
/* Pagers */
|
||||||
extern struct vm_pager file_pager;
|
extern struct vm_pager file_pager;
|
||||||
extern struct vm_pager bootfile_pager;
|
extern struct vm_pager bootfile_pager;
|
||||||
extern struct vm_pager devzero_pager;
|
extern struct vm_pager devzero_pager;
|
||||||
|
extern struct vm_pager swap_pager;
|
||||||
|
|
||||||
|
/* Pager initialisation, Special files */
|
||||||
|
|
||||||
/* vm object and vm file lists */
|
/* vm object and vm file lists */
|
||||||
extern struct list_head vm_object_list;
|
extern struct list_head vm_object_list;
|
||||||
extern struct list_head vm_file_list;
|
|
||||||
|
|
||||||
/* vm file and object initialisation */
|
/* vm file and object initialisation */
|
||||||
struct vm_file *vm_file_alloc_init(void);
|
struct vm_file *vm_file_alloc_init(void);
|
||||||
struct vm_object *vm_object_alloc_init(void);
|
struct vm_object *vm_object_alloc_init(void);
|
||||||
|
struct vm_object *vm_object_create(void);
|
||||||
|
struct vm_file *vm_file_create(void);
|
||||||
|
int vm_object_delete(struct vm_object *vmo);
|
||||||
|
|
||||||
/* Main page fault entry point */
|
/* Main page fault entry point */
|
||||||
void page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);
|
void page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
#include INC_SUBARCH(mm.h)
|
#include INC_SUBARCH(mm.h)
|
||||||
#include <arch/mm.h>
|
#include <arch/mm.h>
|
||||||
#include <l4/generic/space.h>
|
#include <l4/generic/space.h>
|
||||||
|
#include <l4/api/errno.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
#include <shm.h>
|
#include <shm.h>
|
||||||
@@ -29,6 +30,404 @@ unsigned long fault_to_file_offset(struct fault_data *fault)
|
|||||||
return f_off_pfn;
|
return f_off_pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a reference to a vm_object link, returns the next link.
|
||||||
|
* If back to given head, returns 0.
|
||||||
|
*
|
||||||
|
* vma->link1->link2->link3
|
||||||
|
* | | |
|
||||||
|
* V V V
|
||||||
|
* vmo1 vmo2 vmo3|vm_file
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
* Given a reference to link = vma, head = vma, returns link1.
|
||||||
|
* Given a reference to link = link3, head = vma, returns 0.
|
||||||
|
*/
|
||||||
|
struct vm_obj_link *vma_next_link(struct list_head *link,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
BUG_ON(list_empty(link));
|
||||||
|
if (link == head)
|
||||||
|
return 0;
|
||||||
|
else
|
||||||
|
return list_entry(link->next, struct vm_obj_link, list);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Unlinks obj_link from its vma and deletes it but keeps the object. */
|
||||||
|
int vma_drop_link(struct vm_obj_link *shadower_link,
|
||||||
|
struct vm_obj_link *orig_link)
|
||||||
|
{
|
||||||
|
/* Remove object link from vma's list */
|
||||||
|
list_del(&orig_link->list);
|
||||||
|
|
||||||
|
/* Reduce object's ref count */
|
||||||
|
BUG_ON(--orig_link->obj->refcnt <= 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove the shadower from original's shadower list.
|
||||||
|
* We know shadower is deleted from original's list
|
||||||
|
* because each shadow can shadow a single object.
|
||||||
|
*/
|
||||||
|
list_del(&shadower_link->shref);
|
||||||
|
|
||||||
|
/* Delete the original link */
|
||||||
|
kfree(orig_link);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Checks if page cache pages of lesser is a subset of those of copier.
|
||||||
|
* Note this just checks the page cache, so if any objects have pages
|
||||||
|
* swapped to disk, this function does not rule.
|
||||||
|
*/
|
||||||
|
int vm_object_check_subset(struct vm_object *copier,
|
||||||
|
struct vm_object *lesser)
|
||||||
|
{
|
||||||
|
struct page *pc, *pl;
|
||||||
|
|
||||||
|
/* Copier must have equal or more pages to overlap lesser */
|
||||||
|
if (copier->npages < lesser->npages)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do a page by page comparison. Every lesser page
|
||||||
|
* must be in copier for overlap.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(pl, &lesser->page_cache, list)
|
||||||
|
if (!(pc = find_page(copier, pl->offset)))
|
||||||
|
return 1;
|
||||||
|
/*
|
||||||
|
* For all pages of lesser vmo, there seems to be a page
|
||||||
|
* in the copier vmo. So lesser is a subset of copier
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Merges link 1 to link 2 */
|
||||||
|
int vma_do_merge_link(struct vm_obj_link *link1, struct vm_obj_link *link2)
|
||||||
|
{
|
||||||
|
struct vm_object *obj1 = link1->obj;
|
||||||
|
struct vm_object *obj2 = link2->obj;
|
||||||
|
struct page *p1, *p2;
|
||||||
|
|
||||||
|
/* Move all non-intersecting pages to link2. */
|
||||||
|
list_for_each_entry(p1, &obj1->page_cache, list) {
|
||||||
|
/* Page doesn't exist, move it to shadower */
|
||||||
|
if (!(p2 = find_page(obj2, p1->offset))) {
|
||||||
|
list_del(&p1->list);
|
||||||
|
spin_lock(&p1->lock);
|
||||||
|
p1->owner = obj2;
|
||||||
|
spin_unlock(&p1->lock);
|
||||||
|
insert_page_olist(p1, obj2);
|
||||||
|
obj2->npages++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Delete the object along with all its pages. */
|
||||||
|
vm_object_delete(obj1);
|
||||||
|
|
||||||
|
/* Delete the last link for the object */
|
||||||
|
kfree(link1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Finds the only shadower of a vm object, finds the link to
|
||||||
|
* the object that is in the same vma as the shadower, and
|
||||||
|
* merges the two in the shadower object, frees the link and
|
||||||
|
* the original object. Note this must be called when merge
|
||||||
|
* is decided.
|
||||||
|
*/
|
||||||
|
int vma_merge_link(struct vm_object *vmo)
|
||||||
|
{
|
||||||
|
struct vm_obj_link *sh_link, *vmo_link;
|
||||||
|
|
||||||
|
/* Check refcount */
|
||||||
|
BUG_ON(vmo->refcnt != 1);
|
||||||
|
|
||||||
|
/* Get the last shadower entry */
|
||||||
|
sh_link = list_entry(vmo->shadowers.next,
|
||||||
|
struct vm_obj_link, shref);
|
||||||
|
|
||||||
|
/* Remove it from original's shadow list */
|
||||||
|
list_del(&sh_link->shref);
|
||||||
|
|
||||||
|
/* Check that there really was one shadower left */
|
||||||
|
BUG_ON(!list_empty(&vmo_link->obj->shadowers));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get the link to vmo that is in the same list as
|
||||||
|
* the only shadower
|
||||||
|
*/
|
||||||
|
vmo_link = list_entry(sh_link->list.next,
|
||||||
|
struct vm_obj_link, list);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check that we got the right link. Since it is
|
||||||
|
* an ordered list, the link must be the following
|
||||||
|
* entry after its shadower.
|
||||||
|
*/
|
||||||
|
BUG_ON(vmo_link->obj != vmo);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now that we got the consecutive links in the
|
||||||
|
* same vma, do the actual merge.
|
||||||
|
*/
|
||||||
|
vma_do_merge_link(vmo_link, sh_link);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Creates a bare vm_object along with its vma link, since
|
||||||
|
* the shadow will be immediately used in a vma object list.
|
||||||
|
*/
|
||||||
|
struct vm_obj_link *vma_create_shadow(void)
|
||||||
|
{
|
||||||
|
struct vm_object *vmo;
|
||||||
|
struct vm_obj_link *vmo_link;
|
||||||
|
|
||||||
|
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!(vmo = vm_object_create())) {
|
||||||
|
kfree(vmo_link);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&vmo_link->list);
|
||||||
|
vmo->flags = VM_OBJ_SHADOW;
|
||||||
|
vmo_link->obj = vmo;
|
||||||
|
|
||||||
|
return vmo_link;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allocates a new page, copies the original onto it and returns. */
|
||||||
|
struct page *copy_page(struct page *orig)
|
||||||
|
{
|
||||||
|
void *new_vaddr, *vaddr, *paddr;
|
||||||
|
struct page *new;
|
||||||
|
|
||||||
|
if (!(paddr = alloc_page(1)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
new = phys_to_page(paddr);
|
||||||
|
|
||||||
|
/* Map the new and orig page to self */
|
||||||
|
new_vaddr = l4_map_helper(paddr, 1);
|
||||||
|
vaddr = l4_map_helper((void *)page_to_phys(orig), 1);
|
||||||
|
|
||||||
|
/* Copy the page into new page */
|
||||||
|
memcpy(new_vaddr, vaddr, PAGE_SIZE);
|
||||||
|
|
||||||
|
/* Unmap both pages from current task. */
|
||||||
|
l4_unmap_helper(vaddr, 1);
|
||||||
|
l4_unmap_helper(new_vaddr, 1);
|
||||||
|
|
||||||
|
return new;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* TODO:
|
||||||
|
* - Why not allocate a swap descriptor in vma_create_shadow() rather than
|
||||||
|
* a bare vm_object? It will be needed.
|
||||||
|
* - Does vm_write clash with any other object flags???
|
||||||
|
* - Check refcounting of shadows, their references, page refs,
|
||||||
|
* reduces increases etc.
|
||||||
|
*/
|
||||||
|
int copy_on_write(struct fault_data *fault)
|
||||||
|
{
|
||||||
|
struct vm_obj_link *vmo_link, *shadow_link, *copier_link;
|
||||||
|
struct vm_object *vmo, *shadow;
|
||||||
|
struct page *page, *new_page;
|
||||||
|
struct vm_area *vma = fault->vma;
|
||||||
|
unsigned int reason = fault->reason;
|
||||||
|
unsigned long file_offset = fault_to_file_offset(fault);
|
||||||
|
|
||||||
|
/* Get the first object, either original file or a shadow */
|
||||||
|
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
|
||||||
|
printf("%s:%s: No vm object in vma!\n",
|
||||||
|
__TASKNAME__, __FUNCTION__);
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Is the object read-only? Create a shadow object if so.
|
||||||
|
*
|
||||||
|
* NOTE: Whenever the topmost object is read-only, a new shadow
|
||||||
|
* object must be created. When there are no shadows one is created
|
||||||
|
* because, its the original vm_object that is not writeable, and
|
||||||
|
* when there are shadows one is created because a fork had just
|
||||||
|
* happened, in which case all shadows are rendered read-only.
|
||||||
|
*/
|
||||||
|
if (!(vmo->flags & VM_WRITE)) {
|
||||||
|
if (!(shadow_link = vma_create_shadow()))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* Initialise the shadow */
|
||||||
|
shadow = shadow_link->obj;
|
||||||
|
shadow->refcnt = 1;
|
||||||
|
shadow->orig_obj = vmo_link->obj;
|
||||||
|
shadow->flags = VM_OBJ_SHADOW | VM_WRITE;
|
||||||
|
shadow->pager = &swap_pager;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add the shadow in front of the original:
|
||||||
|
*
|
||||||
|
* vma->link0->link1
|
||||||
|
* | |
|
||||||
|
* V V
|
||||||
|
* shadow original
|
||||||
|
*/
|
||||||
|
list_add(&shadow_link->list, &vma->vm_obj_list);
|
||||||
|
|
||||||
|
/* Shadow is the copier object */
|
||||||
|
copier_link = shadow_link;
|
||||||
|
} else {
|
||||||
|
/* No new shadows, the topmost r/w vmo is the copier object */
|
||||||
|
copier_link = vmo_link;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We start page search on read-only objects. If the first
|
||||||
|
* one was writable, go to next which must be read-only.
|
||||||
|
*/
|
||||||
|
BUG_ON(!(vmo_link = vma_next_link(&vmo_link->list,
|
||||||
|
&vma->vm_obj_list)));
|
||||||
|
BUG_ON(vmo_link->obj->flags & VM_WRITE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Traverse the list of read-only vm objects and search for the page */
|
||||||
|
while (!(page = vmo_link->obj->pager->ops.page_in(vmo_link->obj,
|
||||||
|
file_offset))) {
|
||||||
|
if (!(vmo_link = vma_next_link(&vmo_link->list,
|
||||||
|
&vma->vm_obj_list))) {
|
||||||
|
printf("%s:%s: Traversed all shadows and the original "
|
||||||
|
"file's vm_object, but could not find the "
|
||||||
|
"faulty page in this vma.\n",__TASKNAME__,
|
||||||
|
__FUNCTION__);
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy the page. This traverse and copy is like a page-in operation
|
||||||
|
* of a pager, except that the page is moving along vm_objects.
|
||||||
|
*/
|
||||||
|
new_page = copy_page(page);
|
||||||
|
|
||||||
|
/* Update page details */
|
||||||
|
spin_lock(&new_page->lock);
|
||||||
|
new_page->refcnt = 1;
|
||||||
|
new_page->owner = copier_link->obj;
|
||||||
|
new_page->offset = file_offset;
|
||||||
|
new_page->virtual = 0;
|
||||||
|
|
||||||
|
/* Add the page to owner's list of in-memory pages */
|
||||||
|
BUG_ON(!list_empty(&new_page->list));
|
||||||
|
insert_page_olist(new_page, new_page->owner);
|
||||||
|
spin_unlock(&page->lock);
|
||||||
|
|
||||||
|
/* Map the new page to faulty task */
|
||||||
|
l4_map((void *)page_to_phys(new_page),
|
||||||
|
(void *)page_align(fault->address), 1,
|
||||||
|
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||||
|
fault->task->tid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Finished handling the actual fault, now check for possible
|
||||||
|
* shadow collapses. Does the copier completely shadow the one
|
||||||
|
* underlying it?
|
||||||
|
*/
|
||||||
|
if (!(vmo_link = vma_next_link(&copier_link->list, &vma->vm_obj_list))) {
|
||||||
|
/* Copier must have an object under it */
|
||||||
|
printf("Copier must have had an object under it!\n");
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Compare whether page caches overlap */
|
||||||
|
if (vm_object_check_subset(copier_link->obj, vmo_link->obj)) {
|
||||||
|
/*
|
||||||
|
* They do overlap, so keep reference to object but
|
||||||
|
* drop and delete the vma link.
|
||||||
|
*/
|
||||||
|
vmo = vmo_link->obj;
|
||||||
|
vma_drop_link(copier_link, vmo_link);
|
||||||
|
vmo_link = 0;
|
||||||
|
|
||||||
|
/* vm object reference down to one and object is mergeable? */
|
||||||
|
if ((vmo->refcnt == 1) &&
|
||||||
|
(vmo->flags != VM_OBJ_FILE))
|
||||||
|
vma_merge_link(vmo);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Handles the page fault, all entries here are assumed *legal* faults,
|
||||||
|
* i.e. do_page_fault() should have already checked for illegal accesses.
|
||||||
|
*/
|
||||||
|
int __do_page_fault(struct fault_data *fault)
|
||||||
|
{
|
||||||
|
unsigned int reason = fault->reason;
|
||||||
|
unsigned int vma_flags = fault->vma->flags;
|
||||||
|
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
||||||
|
struct vm_area *vma = fault->vma;
|
||||||
|
unsigned long file_offset;
|
||||||
|
struct vm_obj_link *vmo_link;
|
||||||
|
struct vm_object *vmo;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
/* Handle read */
|
||||||
|
if ((reason & VM_READ) && (pte_flags & VM_NONE)) {
|
||||||
|
file_offset = fault_to_file_offset(fault);
|
||||||
|
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
|
||||||
|
&vma->vm_obj_list)));
|
||||||
|
vmo = vmo_link->obj;
|
||||||
|
|
||||||
|
/* Get the page from its pager */
|
||||||
|
if (IS_ERR(page = vmo->pager->ops.page_in(vmo, file_offset))) {
|
||||||
|
printf("%s: Could not obtain faulty page.\n",
|
||||||
|
__TASKNAME__);
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
/* Map it to faulty task */
|
||||||
|
l4_map((void *)page_to_phys(page),
|
||||||
|
(void *)page_align(fault->address), 1,
|
||||||
|
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||||
|
fault->task->tid);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Handle write */
|
||||||
|
if ((reason & VM_WRITE) && (pte_flags & VM_READ)) {
|
||||||
|
/* Copy-on-write */
|
||||||
|
if (vma_flags & VMA_PRIVATE) {
|
||||||
|
copy_on_write(fault);
|
||||||
|
}
|
||||||
|
/* FIXME: Just do fs files for now, anon shm objects later. */
|
||||||
|
if (vma_flags & VMA_SHARED) {
|
||||||
|
file_offset = fault_to_file_offset(fault);
|
||||||
|
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
|
||||||
|
&vma->vm_obj_list)));
|
||||||
|
vmo = vmo_link->obj;
|
||||||
|
|
||||||
|
/* Get the page from its pager */
|
||||||
|
if (IS_ERR(page = vmo->pager->ops.page_in(vmo, file_offset))) {
|
||||||
|
printf("%s: Could not obtain faulty page.\n",
|
||||||
|
__TASKNAME__);
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Map it to faulty task */
|
||||||
|
l4_map((void *)page_to_phys(page),
|
||||||
|
(void *)page_align(fault->address), 1,
|
||||||
|
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
||||||
|
fault->task->tid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
/*
|
/*
|
||||||
* Old function, likely to be ditched.
|
* Old function, likely to be ditched.
|
||||||
*
|
*
|
||||||
@@ -79,337 +478,6 @@ struct vm_area *copy_on_write_vma(struct fault_data *fault)
|
|||||||
return shadow;
|
return shadow;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Given a reference to a vm_object link, returns the next link.
|
|
||||||
* If back to given head, returns 0.
|
|
||||||
*
|
|
||||||
* vma->link1->link2->link3
|
|
||||||
* | | |
|
|
||||||
* V V V
|
|
||||||
* vmo1 vmo2 vmo3|vm_file
|
|
||||||
*
|
|
||||||
* Example:
|
|
||||||
* Given a reference to link = vma, head = vma, returns link1.
|
|
||||||
* Given a reference to link = link3, head = vma, returns 0.
|
|
||||||
*/
|
|
||||||
struct vm_object *vma_next_link(struct list_head *link,
|
|
||||||
struct list_head *head)
|
|
||||||
{
|
|
||||||
BUG_ON(list_empty(link));
|
|
||||||
if (link == head)
|
|
||||||
return 0;
|
|
||||||
else
|
|
||||||
return list_entry(link.next, struct vm_obj_link, list);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Unlinks obj_link from its vma. */
|
|
||||||
int vma_drop_link(struct vm_obj_link *shadower,
|
|
||||||
struct vm_obj_link *obj_link)
|
|
||||||
{
|
|
||||||
/* Remove object link from vma's list */
|
|
||||||
list_del(&obj_link->list);
|
|
||||||
|
|
||||||
/* Reduce object's ref count */
|
|
||||||
obj_link->obj->refcnt--;
|
|
||||||
|
|
||||||
/* Remove the copier from obj_link's shadower list */
|
|
||||||
list_del(&shadower->shref);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Checks if page cache pages of lesser is a subset of those of copier.
|
|
||||||
* Note this just checks the page cache, so if any objects have pages
|
|
||||||
* swapped to disk, this function does not rule.
|
|
||||||
*/
|
|
||||||
int vm_object_check_subset(struct vm_object *copier,
|
|
||||||
struct vm_object *lesser)
|
|
||||||
{
|
|
||||||
struct page *pc, *pl;
|
|
||||||
|
|
||||||
/* Copier must have equal or more pages to overlap lesser */
|
|
||||||
if (copier->npages < lesser->npages)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Do a page by page comparison. Every lesser page
|
|
||||||
* must be in copier for overlap.
|
|
||||||
*/
|
|
||||||
list_for_each_entry(pl, &lesser->page_cache, list)
|
|
||||||
if (!(pc = find_page(copier, pl->offset)))
|
|
||||||
return 1;
|
|
||||||
/*
|
|
||||||
* For all pages of lesser vmo, there seems to be a page
|
|
||||||
* in the copier vmo. So lesser is a subset of copier
|
|
||||||
*/
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Merges link 1 to link 2 */
|
|
||||||
int vma_do_merge_link(struct vm_obj_link *link1, struct vm_obj_link *link2)
|
|
||||||
{
|
|
||||||
struct vm_object *obj1 = link1->obj;
|
|
||||||
struct vm_object *obj2 = link2->obj;
|
|
||||||
struct page *p1, *p2;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Move all non-intersecting pages to link2. Free all
|
|
||||||
* intersecting pages.
|
|
||||||
*/
|
|
||||||
list_for_each_entry(p1, &obj1->page_cache, list) {
|
|
||||||
/* Page doesn't exist, move it to shadower */
|
|
||||||
if (!(p2 = find_page(obj2, p1->offset))) {
|
|
||||||
list_del(&p1->list);
|
|
||||||
spin_lock(&page->lock);
|
|
||||||
p1->owner = obj2;
|
|
||||||
spin_unlock(&page->lock);
|
|
||||||
insert_page_olist(p1, obj2);
|
|
||||||
obj2->npages++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* Delete the object along with all its pages. */
|
|
||||||
vm_object_delete(obj1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Merges a vma's shadow object with its shadower. Note this
|
|
||||||
* must be called when merge is decided.
|
|
||||||
*/
|
|
||||||
int vma_merge_link(struct vm_obj_link *vmo_link)
|
|
||||||
{
|
|
||||||
struct vm_obj_link *sh_link;
|
|
||||||
|
|
||||||
/* Check refcount */
|
|
||||||
BUG_ON(vmo_link->obj->refcnt != 1);
|
|
||||||
|
|
||||||
/* Get the last shadower entry */
|
|
||||||
sh_link = list_entry(&vmo_link->obj->shadowers.next,
|
|
||||||
struct vm_obj_link, shref);
|
|
||||||
|
|
||||||
/* Remove it */
|
|
||||||
list_del(&sh_link->shref);
|
|
||||||
|
|
||||||
/* Check that there really was one shadower left */
|
|
||||||
BUG_ON(!list_empty(&vmo_link->obj->shadowers));
|
|
||||||
|
|
||||||
/* Do the actual merge */
|
|
||||||
vma_do_merge_link(vmo_link, sh_link);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
struct vm_obj_link *vma_create_shadow(void)
|
|
||||||
{
|
|
||||||
struct vm_object *vmo;
|
|
||||||
struct vm_obj_link *vmo_link;
|
|
||||||
|
|
||||||
if (!(vmo_link = kzalloc(sizeof(*vmo_link))))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!(vmo = vm_object_create())) {
|
|
||||||
kfree(vmo_link);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
INIT_LIST_HEAD(&vmo_link->list);
|
|
||||||
vmo->type = VM_OBJ_SHADOW;
|
|
||||||
vmo_link->obj = vmo;
|
|
||||||
|
|
||||||
return vmo_link;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allocates a new page, copies the original onto it and returns. */
|
|
||||||
struct page *copy_page(struct page *orig)
|
|
||||||
{
|
|
||||||
void *new_vaddr, *vaddr, *paddr;
|
|
||||||
struct page *new;
|
|
||||||
|
|
||||||
if (!(paddr = alloc_page(1)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
new = phys_to_page(paddr);
|
|
||||||
|
|
||||||
/* Map the new and orig page to self */
|
|
||||||
new_vaddr = l4_map_helper(paddr, 1);
|
|
||||||
vaddr = l4_map_helper(page_to_phys(orig), 1);
|
|
||||||
|
|
||||||
/* Copy the page into new page */
|
|
||||||
memcpy(new_vaddr, vaddr, PAGE_SIZE);
|
|
||||||
|
|
||||||
/* Unmap both pages from current task. */
|
|
||||||
l4_unmap_helper(vaddr, 1);
|
|
||||||
l4_unmap_helper(new_vaddr, 1);
|
|
||||||
|
|
||||||
return new;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* TODO:
|
|
||||||
* - Why not allocate a swap descriptor in vma_create_shadow() rather than
|
|
||||||
* a bare vm_object? It will be needed.
|
|
||||||
* - Does vm_write clash with any other object flags???
|
|
||||||
* - Check refcounting of shadows, their references, page refs,
|
|
||||||
* reduces increases etc.
|
|
||||||
*/
|
|
||||||
int copy_on_write(struct fault_data *fault)
|
|
||||||
{
|
|
||||||
struct vm_obj_link *vmo_link, *shadow_link, *copier_link;
|
|
||||||
struct vm_object *vmo, *shadow, *copier;
|
|
||||||
struct page *page, *new_page;
|
|
||||||
unsigned int reason = fault->reason;
|
|
||||||
unsigned int vma_flags = fault->vma->flags;
|
|
||||||
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
|
||||||
unsigned long file_offset = fault_to_file_offset(fault);
|
|
||||||
|
|
||||||
/* Get the first object, either original file or a shadow */
|
|
||||||
if (!(vmo_link = vma_next_link(&vma->vm_obj_list, &vma->vm_obj_list))) {
|
|
||||||
printf("%s:%s: No vm object in vma!\n",
|
|
||||||
__TASKNAME__, __FUNCTION__);
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Is the object read-only? Create a shadow object if so.
|
|
||||||
*
|
|
||||||
* NOTE: Whenever the topmost object is read-only, a new shadow
|
|
||||||
* object must be created. When there are no shadows one is created
|
|
||||||
* because, its the original vm_object that is not writeable, and
|
|
||||||
* when there are shadows one is created because a fork had just
|
|
||||||
* happened, in which case all shadows are rendered read-only.
|
|
||||||
*/
|
|
||||||
if (!(vmo->flags & VM_WRITE)) {
|
|
||||||
if (!(shadow_link = vma_create_shadow()))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* Initialise the shadow */
|
|
||||||
shadow = shadow_link->obj;
|
|
||||||
shadow->vma_refcnt = 1;
|
|
||||||
shadow->orig_obj = vmo_link->obj;
|
|
||||||
shadow->type = VM_OBJ_SHADOW | VM_WRITE;
|
|
||||||
shadow->pager = swap_pager;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add the shadow in front of the original:
|
|
||||||
*
|
|
||||||
* vma->link0->link1
|
|
||||||
* | |
|
|
||||||
* V V
|
|
||||||
* shadow original
|
|
||||||
*/
|
|
||||||
list_add(&shadow_link->list, &vma->vm_obj_list);
|
|
||||||
|
|
||||||
/* Shadow is the copier object */
|
|
||||||
copier_link = shadow_link;
|
|
||||||
} else {
|
|
||||||
/* No new shadows, the topmost r/w vmo is the copier object */
|
|
||||||
copier_link = vmo_link;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We start page search on read-only objects. If the first
|
|
||||||
* one was writable, go to next which must be read-only.
|
|
||||||
*/
|
|
||||||
BUG_ON(!(vmo_link = vma_next_object(vmo_link,
|
|
||||||
&vma->vm_obj_list)));
|
|
||||||
BUG_ON(vmo_link->obj->flags & VM_WRITE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Traverse the list of read-only vm objects and search for the page */
|
|
||||||
while (!(page = vmo_link->obj->pager.ops->page_in(vmo_link->obj,
|
|
||||||
file_offset))) {
|
|
||||||
if (!(vmo_link = vma_next_object(vmo_link,
|
|
||||||
&vma->vm_obj_list))) {
|
|
||||||
printf("%s:%s: Traversed all shadows and the original "
|
|
||||||
"file's vm_object, but could not find the "
|
|
||||||
"faulty page in this vma.\n",__TASKNAME__,
|
|
||||||
__FUNCTION__);
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy the page. This traverse and copy is like a page-in operation
|
|
||||||
* of a pager, except that the page is moving along vm_objects.
|
|
||||||
*/
|
|
||||||
new_page = copy_page(page);
|
|
||||||
|
|
||||||
/* Update page details */
|
|
||||||
spin_lock(&new_page->lock);
|
|
||||||
new_page->count = 1;
|
|
||||||
new_page->owner = copier_link->obj;
|
|
||||||
new_page->offset = page_offset;
|
|
||||||
new_page->virtual = 0;
|
|
||||||
|
|
||||||
/* Add the page to owner's list of in-memory pages */
|
|
||||||
BUG_ON(!list_empty(&new_page->list));
|
|
||||||
insert_page_olist(new_page, new_page->owner->obj);
|
|
||||||
spin_unlock(&page->lock);
|
|
||||||
|
|
||||||
/* Map the new page to faulty task */
|
|
||||||
l4_map(page_to_phys(new_page), (void *)page_align(fault->address), 1,
|
|
||||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
|
||||||
fault->task->tid);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Finished handling the actual fault, now check for possible
|
|
||||||
* shadow collapses. Does the copier shadow completely shadow
|
|
||||||
* the one underlying it?
|
|
||||||
*/
|
|
||||||
if (!(vmo_link = vma_next_object(copier_link, &vma->vm_obj_list))) {
|
|
||||||
/* Copier must have an object under it */
|
|
||||||
printf("Copier must have had an object under it!\n");
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compare whether page caches overlap */
|
|
||||||
if (vm_object_check_subset(copier_link->obj, vmo_link->obj)) {
|
|
||||||
/* They do overlap, so drop reference to lesser shadow */
|
|
||||||
vma_drop_link(copier_link, vmo_link);
|
|
||||||
|
|
||||||
/* vm object reference down to one? */
|
|
||||||
if (vmo_link->obj->refcnt == 1)
|
|
||||||
/* The object is mergeable? i.e. not a file? */
|
|
||||||
if (vmo_link->type != VM_OBJ_FILE)
|
|
||||||
/* Merge it with its only shadower */
|
|
||||||
vm_object_merge(vmo_link);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Handles the page fault, all entries here are assumed *legal* faults,
|
|
||||||
* i.e. do_page_fault() should have already checked for illegal accesses.
|
|
||||||
*/
|
|
||||||
int __do_page_fault(struct fault_data *fault)
|
|
||||||
{
|
|
||||||
unsigned int reason = fault->reason;
|
|
||||||
unsigned int vma_flags = fault->vma->flags;
|
|
||||||
unsigned int pte_flags = vm_prot_flags(fault->kdata->pte);
|
|
||||||
struct vm_object *vmo;
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
/* Handle read */
|
|
||||||
if ((reason & VM_READ) && (pte_flags & VM_NONE)) {
|
|
||||||
unsigned long file_offset = fault_to_file_offset(fault);
|
|
||||||
vmo = vma_get_next_object(&vma->vm_obj_list);
|
|
||||||
|
|
||||||
/* Get the page from its pager */
|
|
||||||
if (IS_ERR(page = vmo->pager.ops->page_in(vmo, file_offset))) {
|
|
||||||
printf("%s: Could not obtain faulty page.\n",
|
|
||||||
__TASKNAME__);
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
/* Map it to faulty task */
|
|
||||||
l4_map(page_to_phys(page), (void *)page_align(fault->address),1,
|
|
||||||
(reason & VM_READ) ? MAP_USR_RO_FLAGS : MAP_USR_RW_FLAGS,
|
|
||||||
fault->task->tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Handle write */
|
|
||||||
if ((reason & VM_WRITE) && (pte_flags & VM_READ)) {
|
|
||||||
/* Copy-on-write */
|
|
||||||
if (vma_flags & VMA_PRIVATE) {
|
|
||||||
copy_on_write(fault);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handles any page ownership change or allocation for file-backed pages.
|
* Handles any page ownership change or allocation for file-backed pages.
|
||||||
*/
|
*/
|
||||||
@@ -679,6 +747,7 @@ int do_anon_page(struct fault_data *fault)
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Page fault model:
|
* Page fault model:
|
||||||
@@ -743,10 +812,8 @@ int do_page_fault(struct fault_data *fault)
|
|||||||
BUG(); /* Can't handle this yet. */
|
BUG(); /* Can't handle this yet. */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma_flags & VMA_ANON)
|
/* Handle legitimate faults */
|
||||||
err = do_anon_page(fault);
|
__do_page_fault(fault);
|
||||||
else
|
|
||||||
err = do_file_page(fault);
|
|
||||||
|
|
||||||
/* Return the ipc and by doing so restart the faulty thread */
|
/* Return the ipc and by doing so restart the faulty thread */
|
||||||
l4_ipc_return(err);
|
l4_ipc_return(err);
|
||||||
|
|||||||
@@ -14,14 +14,16 @@
|
|||||||
#include <posix/sys/types.h>
|
#include <posix/sys/types.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
|
/* List of all generic files */
|
||||||
|
LIST_HEAD(vm_file_list);
|
||||||
|
|
||||||
int vfs_read(unsigned long vnum, unsigned long f_offset, unsigned long npages,
|
int vfs_read(unsigned long vnum, unsigned long file_offset,
|
||||||
void *pagebuf)
|
unsigned long npages, void *pagebuf)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
write_mr(L4SYS_ARG0, vnum);
|
write_mr(L4SYS_ARG0, vnum);
|
||||||
write_mr(L4SYS_ARG1, f_offset);
|
write_mr(L4SYS_ARG1, file_offset);
|
||||||
write_mr(L4SYS_ARG2, npages);
|
write_mr(L4SYS_ARG2, npages);
|
||||||
write_mr(L4SYS_ARG3, (u32)pagebuf);
|
write_mr(L4SYS_ARG3, (u32)pagebuf);
|
||||||
|
|
||||||
@@ -39,13 +41,13 @@ int vfs_read(unsigned long vnum, unsigned long f_offset, unsigned long npages,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vfs_write(unsigned long vnum, unsigned long f_offset, unsigned long npages,
|
int vfs_write(unsigned long vnum, unsigned long file_offset,
|
||||||
void *pagebuf)
|
unsigned long npages, void *pagebuf)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
write_mr(L4SYS_ARG0, vnum);
|
write_mr(L4SYS_ARG0, vnum);
|
||||||
write_mr(L4SYS_ARG1, f_offset);
|
write_mr(L4SYS_ARG1, file_offset);
|
||||||
write_mr(L4SYS_ARG2, npages);
|
write_mr(L4SYS_ARG2, npages);
|
||||||
write_mr(L4SYS_ARG3, (u32)pagebuf);
|
write_mr(L4SYS_ARG3, (u32)pagebuf);
|
||||||
|
|
||||||
@@ -81,7 +83,7 @@ int vfs_receive_sys_open(l4id_t sender, l4id_t opener, int fd,
|
|||||||
if (!(t = find_task(opener)))
|
if (!(t = find_task(opener)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (fd < 0 || fd > TASK_OFILES_MAX)
|
if (fd < 0 || fd > TASK_FILES_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Assign vnum to given fd on the task */
|
/* Assign vnum to given fd on the task */
|
||||||
@@ -93,20 +95,20 @@ int vfs_receive_sys_open(l4id_t sender, l4id_t opener, int fd,
|
|||||||
if (vmfile->vnum == vnum) {
|
if (vmfile->vnum == vnum) {
|
||||||
/* Add a reference to it from the task */
|
/* Add a reference to it from the task */
|
||||||
t->fd[fd].vmfile = vmfile;
|
t->fd[fd].vmfile = vmfile;
|
||||||
vmfile->refcnt++;
|
vmfile->vm_obj.refcnt++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Otherwise allocate a new one for this vnode */
|
/* Otherwise allocate a new one for this vnode */
|
||||||
if (IS_ERR(vmfile = vmfile_alloc_init()))
|
if (IS_ERR(vmfile = vm_file_create()))
|
||||||
return (int)vmfile;
|
return (int)vmfile;
|
||||||
|
|
||||||
/* Initialise and add it to global list */
|
/* Initialise and add it to global list */
|
||||||
vmfile->vnum = vnum;
|
vmfile->vnum = vnum;
|
||||||
vmfile->length = length;
|
vmfile->length = length;
|
||||||
vmfile->pager = &default_file_pager;
|
vmfile->vm_obj.pager = &file_pager;
|
||||||
list_add(&vmfile->list, &vm_file_list);
|
list_add(&vmfile->vm_obj.list, &vm_file_list);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -154,6 +156,7 @@ int insert_page_olist(struct page *this, struct vm_object *vmo)
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This reads-in a range of pages from a file and populates the page cache
|
* This reads-in a range of pages from a file and populates the page cache
|
||||||
* just like a page fault, but its not in the page fault path.
|
* just like a page fault, but its not in the page fault path.
|
||||||
@@ -164,34 +167,13 @@ int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++) {
|
for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++) {
|
||||||
/* The page is not resident in page cache. */
|
page = vmfile->vm_obj.pager->ops.page_in(&vmfile->vm_obj,
|
||||||
if (!(page = find_page(vmfile, f_offset))) {
|
f_offset);
|
||||||
/* Allocate a new page */
|
if (IS_ERR(page)) {
|
||||||
void *paddr = alloc_page(1);
|
printf("%s: %s:Could not read page %d "
|
||||||
void *vaddr = phys_to_virt(paddr);
|
"from file with vnum: 0x%x\n", __TASKNAME__,
|
||||||
page = phys_to_page(paddr);
|
__FUNCTION__, f_offset, vmfile->vnum);
|
||||||
|
break;
|
||||||
/*
|
|
||||||
* Map new page at a self virtual address.
|
|
||||||
* NOTE: this is not unmapped here but in
|
|
||||||
* read_cache_pages where mm0's work with the
|
|
||||||
* page is done.
|
|
||||||
*/
|
|
||||||
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
|
|
||||||
|
|
||||||
/* Read-in the page using the file's pager */
|
|
||||||
vmfile->pager->ops.read_page(vmfile, f_offset, vaddr);
|
|
||||||
|
|
||||||
spin_lock(&page->lock);
|
|
||||||
page->count++;
|
|
||||||
page->owner = vmfile;
|
|
||||||
page->f_offset = f_offset;
|
|
||||||
page->virtual = 0;
|
|
||||||
|
|
||||||
/* Add the page to owner's list of in-memory pages */
|
|
||||||
BUG_ON(!list_empty(&page->list));
|
|
||||||
insert_page_olist(page, vmfile);
|
|
||||||
spin_unlock(&page->lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,8 +189,8 @@ int read_cache_pages(struct vm_file *vmfile, void *buf, unsigned long pfn_start,
|
|||||||
int copysize, left;
|
int copysize, left;
|
||||||
void *page_virtual;
|
void *page_virtual;
|
||||||
|
|
||||||
list_for_each_entry(head, &vmfile->page_cache_list, list)
|
list_for_each_entry(head, &vmfile->vm_obj.page_cache, list)
|
||||||
if (head->f_offset == pfn_start)
|
if (head->offset == pfn_start)
|
||||||
goto copy;
|
goto copy;
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
@@ -230,7 +212,7 @@ copy:
|
|||||||
|
|
||||||
/* Copy the rest and unmap. */
|
/* Copy the rest and unmap. */
|
||||||
list_for_each_entry(next, &head->list, list) {
|
list_for_each_entry(next, &head->list, list) {
|
||||||
if (left == 0 || next->f_offset == pfn_end)
|
if (left == 0 || next->offset == pfn_end)
|
||||||
break;
|
break;
|
||||||
copysize = (left <= PAGE_SIZE) ? left : PAGE_SIZE;
|
copysize = (left <= PAGE_SIZE) ? left : PAGE_SIZE;
|
||||||
page_virtual = phys_to_virt((void *)page_to_phys(next));
|
page_virtual = phys_to_virt((void *)page_to_phys(next));
|
||||||
@@ -243,7 +225,6 @@ copy:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int sys_read(l4id_t sender, int fd, void *buf, int count)
|
int sys_read(l4id_t sender, int fd, void *buf, int count)
|
||||||
{
|
{
|
||||||
unsigned long foff_pfn_start, foff_pfn_end;
|
unsigned long foff_pfn_start, foff_pfn_end;
|
||||||
@@ -255,7 +236,7 @@ int sys_read(l4id_t sender, int fd, void *buf, int count)
|
|||||||
BUG_ON(!(t = find_task(sender)));
|
BUG_ON(!(t = find_task(sender)));
|
||||||
|
|
||||||
/* TODO: Check user buffer and count validity */
|
/* TODO: Check user buffer and count validity */
|
||||||
if (fd < 0 || fd > TASK_OFILES_MAX)
|
if (fd < 0 || fd > TASK_FILES_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
vmfile = t->fd[fd].vmfile;
|
vmfile = t->fd[fd].vmfile;
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
*/
|
*/
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <kdata.h>
|
|
||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
#include <mm/alloc_page.h>
|
#include <mm/alloc_page.h>
|
||||||
#include <kmalloc/kmalloc.h>
|
#include <kmalloc/kmalloc.h>
|
||||||
@@ -57,7 +56,7 @@ void init_mm(struct initdata *initdata)
|
|||||||
printf("%s: Initialised devzero.\n", __TASKNAME__);
|
printf("%s: Initialised devzero.\n", __TASKNAME__);
|
||||||
|
|
||||||
/* Initialise in-memory boot files */
|
/* Initialise in-memory boot files */
|
||||||
init_boot_files();
|
init_boot_files(initdata);
|
||||||
printf("%s: Initialised in-memory boot files.\n", __TASKNAME__);
|
printf("%s: Initialised in-memory boot files.\n", __TASKNAME__);
|
||||||
|
|
||||||
shm_init();
|
shm_init();
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
#include <l4lib/arch/syscalls.h>
|
#include <l4lib/arch/syscalls.h>
|
||||||
|
|
||||||
|
|
||||||
|
#if 0
|
||||||
/* TODO: This is to be implemented when fs0 is ready. */
|
/* TODO: This is to be implemented when fs0 is ready. */
|
||||||
int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *task)
|
int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *task)
|
||||||
{
|
{
|
||||||
@@ -44,9 +45,9 @@ int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *tas
|
|||||||
int page_release(struct page *page)
|
int page_release(struct page *page)
|
||||||
{
|
{
|
||||||
spin_lock(&page->lock);
|
spin_lock(&page->lock);
|
||||||
page->count--;
|
page->refcnt--;
|
||||||
BUG_ON(page->count < -1);
|
BUG_ON(page->refcnt < -1);
|
||||||
if (page->count == -1) {
|
if (page->refcnt == -1) {
|
||||||
/* Unlink the page from its owner's list */
|
/* Unlink the page from its owner's list */
|
||||||
list_del_init(&page->list);
|
list_del_init(&page->list);
|
||||||
|
|
||||||
@@ -94,7 +95,7 @@ int vma_release_pages(struct vm_area *vma, struct tcb *task,
|
|||||||
f_end = vma->f_offset + vma->pfn_end - pfn_end;
|
f_end = vma->f_offset + vma->pfn_end - pfn_end;
|
||||||
|
|
||||||
list_for_each_entry_safe(page, n, &vma->owner->page_cache_list, list) {
|
list_for_each_entry_safe(page, n, &vma->owner->page_cache_list, list) {
|
||||||
if (page->f_offset >= f_start && page->f_offset <= f_end) {
|
if (page->offset >= f_start && page->f_offset <= f_end) {
|
||||||
l4_unmap((void *)virtual(page), 1, task->tid);
|
l4_unmap((void *)virtual(page), 1, task->tid);
|
||||||
page_release(page);
|
page_release(page);
|
||||||
}
|
}
|
||||||
@@ -133,38 +134,6 @@ int vma_unmap_shadows(struct vm_area *vma, struct tcb *task, unsigned long pfn_s
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
|
|
||||||
unsigned int flags, unsigned long f_offset,
|
|
||||||
struct vm_file *mapfile)
|
|
||||||
{
|
|
||||||
struct vm_area *vma;
|
|
||||||
struct vm_obj_link *obj_link;
|
|
||||||
|
|
||||||
/* Allocate new area */
|
|
||||||
if (!(vma = kzalloc(sizeof(struct vm_area))))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Allocate vm object link */
|
|
||||||
if (!(obj_link = kzalloc(sizeof(struct vm_obj_link)))) {
|
|
||||||
kfree(vma);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vma->list);
|
|
||||||
INIT_LIST_HEAD(&vma->vm_obj_list);
|
|
||||||
|
|
||||||
vma->pfn_start = pfn_start;
|
|
||||||
vma->pfn_end = pfn_start + npages;
|
|
||||||
vma->flags = flags;
|
|
||||||
vma->f_offset = f_offset;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&obj_link->list);
|
|
||||||
obj_link->obj = &mapfile->vm_obj;
|
|
||||||
list_add(&obj_link->list, &vma->vm_obj_list);
|
|
||||||
|
|
||||||
return vma;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* TODO: vma_destroy/shrink/split should also handle swap file modification */
|
/* TODO: vma_destroy/shrink/split should also handle swap file modification */
|
||||||
|
|
||||||
/* Frees and unlinks a vma from its list. TODO: Add list locking */
|
/* Frees and unlinks a vma from its list. TODO: Add list locking */
|
||||||
@@ -369,6 +338,40 @@ int sys_munmap(l4id_t sender, void *vaddr, unsigned long size)
|
|||||||
|
|
||||||
return do_munmap(vaddr, size, task);
|
return do_munmap(vaddr, size, task);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
|
||||||
|
unsigned int flags, unsigned long file_offset,
|
||||||
|
struct vm_file *mapfile)
|
||||||
|
{
|
||||||
|
struct vm_area *vma;
|
||||||
|
struct vm_obj_link *obj_link;
|
||||||
|
|
||||||
|
/* Allocate new area */
|
||||||
|
if (!(vma = kzalloc(sizeof(struct vm_area))))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Allocate vm object link */
|
||||||
|
if (!(obj_link = kzalloc(sizeof(struct vm_obj_link)))) {
|
||||||
|
kfree(vma);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&vma->list);
|
||||||
|
INIT_LIST_HEAD(&vma->vm_obj_list);
|
||||||
|
|
||||||
|
vma->pfn_start = pfn_start;
|
||||||
|
vma->pfn_end = pfn_start + npages;
|
||||||
|
vma->flags = flags;
|
||||||
|
vma->file_offset = file_offset;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&obj_link->list);
|
||||||
|
INIT_LIST_HEAD(&obj_link->shref);
|
||||||
|
obj_link->obj = &mapfile->vm_obj;
|
||||||
|
list_add(&obj_link->list, &vma->vm_obj_list);
|
||||||
|
|
||||||
|
return vma;
|
||||||
|
}
|
||||||
|
|
||||||
int vma_intersect(unsigned long pfn_start, unsigned long pfn_end,
|
int vma_intersect(unsigned long pfn_start, unsigned long pfn_end,
|
||||||
struct vm_area *vma)
|
struct vm_area *vma)
|
||||||
@@ -397,17 +400,17 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* If no vmas, first map slot is available. */
|
/* If no vmas, first map slot is available. */
|
||||||
if (list_empty(&task->vm_area_head))
|
if (list_empty(&task->vm_area_list))
|
||||||
return __pfn(task->map_start);
|
return __pfn(task->map_start);
|
||||||
|
|
||||||
/* First vma to check our range against */
|
/* First vma to check our range against */
|
||||||
vma = list_entry(&task->vm_area_head.next, struct vm_area, list);
|
vma = list_entry(task->vm_area_list.next, struct vm_area, list);
|
||||||
|
|
||||||
/* Start searching from task's end of data to start of stack */
|
/* Start searching from task's end of data to start of stack */
|
||||||
while (pfn_end <= __pfn(task->map_end)) {
|
while (pfn_end <= __pfn(task->map_end)) {
|
||||||
|
|
||||||
/* If intersection, skip the vma and fast-forward to next */
|
/* If intersection, skip the vma and fast-forward to next */
|
||||||
if (vma_intersection(pfn_start, pfn_end, vma)) {
|
if (vma_intersect(pfn_start, pfn_end, vma)) {
|
||||||
|
|
||||||
/* Update interval to next available space */
|
/* Update interval to next available space */
|
||||||
pfn_start = vma->pfn_end;
|
pfn_start = vma->pfn_end;
|
||||||
@@ -417,7 +420,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
|
|||||||
* Decision point, no more vmas left to check.
|
* Decision point, no more vmas left to check.
|
||||||
* Are we out of task map area?
|
* Are we out of task map area?
|
||||||
*/
|
*/
|
||||||
if (vma->list.next == &task->vm_area_head) {
|
if (vma->list.next == &task->vm_area_list) {
|
||||||
if (pfn_end > __pfn(task->map_end))
|
if (pfn_end > __pfn(task->map_end))
|
||||||
break; /* Yes, fail */
|
break; /* Yes, fail */
|
||||||
else
|
else
|
||||||
@@ -447,7 +450,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task
|
|||||||
{
|
{
|
||||||
unsigned long file_npages = __pfn(page_align_up(mapfile->length));
|
unsigned long file_npages = __pfn(page_align_up(mapfile->length));
|
||||||
unsigned long map_pfn = __pfn(map_address);
|
unsigned long map_pfn = __pfn(map_address);
|
||||||
struct vm_area *vma_new, *vma_mapped;
|
struct vm_area *new, *mapped;
|
||||||
|
|
||||||
if (!mapfile) {
|
if (!mapfile) {
|
||||||
if (flags & VMA_ANONYMOUS) {
|
if (flags & VMA_ANONYMOUS) {
|
||||||
@@ -463,12 +466,12 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check invalid page size */
|
/* Check invalid page size */
|
||||||
if (pages == 0) {
|
if (npages == 0) {
|
||||||
printf("Trying to map %d pages.\n", pages);
|
printf("Trying to map %d pages.\n", npages);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (pages > __pfn(task->stack_start - task->data_end)) {
|
if (npages > __pfn(task->stack_start - task->data_end)) {
|
||||||
printf("Trying to map too many pages: %d\n", pages);
|
printf("Trying to map too many pages: %d\n", npages);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -476,7 +479,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task
|
|||||||
if (map_address == 0 || map_address < task->data_end ||
|
if (map_address == 0 || map_address < task->data_end ||
|
||||||
map_address >= task->stack_start) {
|
map_address >= task->stack_start) {
|
||||||
/* Address invalid or not specified */
|
/* Address invalid or not specified */
|
||||||
if (flags & VM_FIXED)
|
if (flags & VMA_FIXED)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Get new map address for region of this size */
|
/* Get new map address for region of this size */
|
||||||
@@ -485,8 +488,8 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task
|
|||||||
return (int)map_address;
|
return (int)map_address;
|
||||||
|
|
||||||
/* Create a new vma for newly allocated address */
|
/* Create a new vma for newly allocated address */
|
||||||
else if (!(vma_new = vma_new(__pfn(map_address), npages,
|
else if (!(new = vma_new(__pfn(map_address), npages,
|
||||||
flags, file_offset, mapfile)))
|
flags, file_offset, mapfile)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
/* Successful? Add it to list and return */
|
/* Successful? Add it to list and return */
|
||||||
goto out_success;
|
goto out_success;
|
||||||
@@ -496,18 +499,18 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset, struct tcb *task
|
|||||||
* FIXME: Currently we don't allow overlapping vmas. To be fixed soon
|
* FIXME: Currently we don't allow overlapping vmas. To be fixed soon
|
||||||
* We need to handle intersection, splitting, shrink/grow etc.
|
* We need to handle intersection, splitting, shrink/grow etc.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(vma_mapped, &task->vm_area_list, list)
|
list_for_each_entry(mapped, &task->vm_area_list, list)
|
||||||
BUG_ON(vma_intersect(map_pfn, map_pfn + npages, vma_mapped));
|
BUG_ON(vma_intersect(map_pfn, map_pfn + npages, mapped));
|
||||||
|
|
||||||
/* For valid regions that aren't allocated by us, create the vma. */
|
/* For valid regions that aren't allocated by us, create the vma. */
|
||||||
if (!(vma_new = vma_new(__pfn(map_address), npages, flags, file_offset,
|
if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset,
|
||||||
mapfile)))
|
mapfile)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
out_success:
|
out_success:
|
||||||
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__,
|
printf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__,
|
||||||
map_address, map_address + npages * PAGE_SIZE);
|
map_address, map_address + npages * PAGE_SIZE);
|
||||||
list_add(&vma->list, &task->vm_area_list);
|
list_add(&new->list, &task->vm_area_list);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
#include <l4/lib/list.h>
|
#include <l4/lib/list.h>
|
||||||
#include <l4lib/arch/syscalls.h>
|
#include <l4lib/arch/syscalls.h>
|
||||||
#include <l4lib/arch/syslib.h>
|
#include <l4lib/arch/syslib.h>
|
||||||
|
#include <kmalloc/kmalloc.h>
|
||||||
#include <mm/alloc_page.h>
|
#include <mm/alloc_page.h>
|
||||||
#include <vm_area.h>
|
#include <vm_area.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
@@ -12,6 +13,18 @@
|
|||||||
#include <init.h>
|
#include <init.h>
|
||||||
#include INC_ARCH(bootdesc.h)
|
#include INC_ARCH(bootdesc.h)
|
||||||
|
|
||||||
|
|
||||||
|
struct page *page_init(struct page *page)
|
||||||
|
{
|
||||||
|
/* Reset page */
|
||||||
|
memset(page, 0, sizeof(*page));
|
||||||
|
page->refcnt = -1;
|
||||||
|
spin_lock_init(&page->lock);
|
||||||
|
INIT_LIST_HEAD(&page->list);
|
||||||
|
|
||||||
|
return page;
|
||||||
|
|
||||||
|
}
|
||||||
struct page *find_page(struct vm_object *obj, unsigned long pfn)
|
struct page *find_page(struct vm_object *obj, unsigned long pfn)
|
||||||
{
|
{
|
||||||
struct page *p;
|
struct page *p;
|
||||||
@@ -23,25 +36,32 @@ struct page *find_page(struct vm_object *obj, unsigned long pfn)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deletes all pages in a page cache, assumes pages are from the
|
||||||
|
* page allocator, and page structs are from the page_array, which
|
||||||
|
* is the default situation.
|
||||||
|
*/
|
||||||
int default_release_pages(struct vm_object *vm_obj)
|
int default_release_pages(struct vm_object *vm_obj)
|
||||||
{
|
{
|
||||||
struct page *p;
|
struct page *p, *n;
|
||||||
struct list_head *n;
|
|
||||||
void *phys;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
|
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
|
||||||
list_del(&p->list);
|
list_del(&p->list);
|
||||||
BUG_ON(p->refcnt);
|
BUG_ON(p->refcnt);
|
||||||
|
|
||||||
/* Return page back to allocator */
|
/* Return page back to allocator */
|
||||||
free_page(page_to_phys(p));
|
free_page((void *)page_to_phys(p));
|
||||||
|
|
||||||
/* Free the page structure */
|
/* Free the page structure */
|
||||||
kfree(p);
|
kfree(p);
|
||||||
|
|
||||||
|
/* Reduce object page count */
|
||||||
|
BUG_ON(--vm_obj->npages < 0);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct page *file_page_in(struct vm_object *vm_obj, unsigned long page_offset)
|
struct page *file_page_in(struct vm_object *vm_obj, unsigned long page_offset)
|
||||||
{
|
{
|
||||||
struct vm_file *f = vm_object_to_file(vm_obj);
|
struct vm_file *f = vm_object_to_file(vm_obj);
|
||||||
@@ -78,8 +98,8 @@ struct page *file_page_in(struct vm_object *vm_obj, unsigned long page_offset)
|
|||||||
vm_obj->npages++;
|
vm_obj->npages++;
|
||||||
|
|
||||||
/* Update page details */
|
/* Update page details */
|
||||||
spin_lock(&page->lock);
|
page_init(page);
|
||||||
page->count++;
|
page->refcnt++;
|
||||||
page->owner = vm_obj;
|
page->owner = vm_obj;
|
||||||
page->offset = page_offset;
|
page->offset = page_offset;
|
||||||
page->virtual = 0;
|
page->virtual = 0;
|
||||||
@@ -87,7 +107,6 @@ struct page *file_page_in(struct vm_object *vm_obj, unsigned long page_offset)
|
|||||||
/* Add the page to owner's list of in-memory pages */
|
/* Add the page to owner's list of in-memory pages */
|
||||||
BUG_ON(!list_empty(&page->list));
|
BUG_ON(!list_empty(&page->list));
|
||||||
insert_page_olist(page, vm_obj);
|
insert_page_olist(page, vm_obj);
|
||||||
spin_unlock(&page->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
@@ -142,7 +161,7 @@ struct vm_swap_node {
|
|||||||
struct page *swap_page_in(struct vm_object *vm_obj, unsigned long file_offset)
|
struct page *swap_page_in(struct vm_object *vm_obj, unsigned long file_offset)
|
||||||
{
|
{
|
||||||
/* No swapping yet, so the page is either here or not here. */
|
/* No swapping yet, so the page is either here or not here. */
|
||||||
return find_page(vm_obj, page_offset);
|
return find_page(vm_obj, file_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct vm_pager swap_pager = {
|
struct vm_pager swap_pager = {
|
||||||
@@ -152,6 +171,27 @@ struct vm_pager swap_pager = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Just releases the page structures since the actual pages are
|
||||||
|
* already in memory as read-only.
|
||||||
|
*/
|
||||||
|
int bootfile_release_pages(struct vm_object *vm_obj)
|
||||||
|
{
|
||||||
|
struct page *p, *n;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(p, n, &vm_obj->page_cache, list) {
|
||||||
|
list_del(&p->list);
|
||||||
|
BUG_ON(p->refcnt);
|
||||||
|
|
||||||
|
/* Free the page structure */
|
||||||
|
kfree(p);
|
||||||
|
|
||||||
|
/* Reduce object page count */
|
||||||
|
BUG_ON(--vm_obj->npages < 0);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Returns the page with given offset in this vm_object */
|
/* Returns the page with given offset in this vm_object */
|
||||||
struct page *bootfile_page_in(struct vm_object *vm_obj,
|
struct page *bootfile_page_in(struct vm_object *vm_obj,
|
||||||
unsigned long offset)
|
unsigned long offset)
|
||||||
@@ -161,23 +201,29 @@ struct page *bootfile_page_in(struct vm_object *vm_obj,
|
|||||||
struct page *page = phys_to_page(img->phys_start +
|
struct page *page = phys_to_page(img->phys_start +
|
||||||
__pfn_to_addr(offset));
|
__pfn_to_addr(offset));
|
||||||
|
|
||||||
spin_lock(&page->lock);
|
/* TODO: Check file length against page offset! */
|
||||||
page->count++;
|
|
||||||
spin_unlock(&page->lock);
|
|
||||||
|
|
||||||
/* FIXME: Why not add pages to linked list and update npages? */
|
/* Update page */
|
||||||
|
page_init(page);
|
||||||
|
page->refcnt++;
|
||||||
|
|
||||||
|
/* Update object */
|
||||||
vm_obj->npages++;
|
vm_obj->npages++;
|
||||||
|
|
||||||
|
/* Add the page to owner's list of in-memory pages */
|
||||||
|
BUG_ON(!list_empty(&page->list));
|
||||||
|
insert_page_olist(page, vm_obj);
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct vm_pager bootfile_pager = {
|
struct vm_pager bootfile_pager = {
|
||||||
.ops = {
|
.ops = {
|
||||||
.page_in = bootfile_page_in,
|
.page_in = bootfile_page_in,
|
||||||
|
.release_pages = bootfile_release_pages,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
LIST_HEAD(boot_file_list);
|
|
||||||
|
|
||||||
/* From bare boot images, create mappable device files */
|
/* From bare boot images, create mappable device files */
|
||||||
int init_boot_files(struct initdata *initdata)
|
int init_boot_files(struct initdata *initdata)
|
||||||
@@ -196,7 +242,7 @@ int init_boot_files(struct initdata *initdata)
|
|||||||
boot_file->type = VM_FILE_BOOTFILE;
|
boot_file->type = VM_FILE_BOOTFILE;
|
||||||
|
|
||||||
/* Initialise the vm object */
|
/* Initialise the vm object */
|
||||||
boot_file->vm_obj.type = VM_OBJ_FILE;
|
boot_file->vm_obj.flags = VM_OBJ_FILE;
|
||||||
boot_file->vm_obj.pager = &bootfile_pager;
|
boot_file->vm_obj.pager = &bootfile_pager;
|
||||||
|
|
||||||
/* Add the object to global vm_object list */
|
/* Add the object to global vm_object list */
|
||||||
@@ -219,8 +265,8 @@ struct page *devzero_page_in(struct vm_object *vm_obj,
|
|||||||
|
|
||||||
/* Update zero page struct. */
|
/* Update zero page struct. */
|
||||||
spin_lock(&zpage->lock);
|
spin_lock(&zpage->lock);
|
||||||
BUG_ON(zpage->count < 0);
|
BUG_ON(zpage->refcnt < 0);
|
||||||
zpage->count++;
|
zpage->refcnt++;
|
||||||
spin_unlock(&zpage->lock);
|
spin_unlock(&zpage->lock);
|
||||||
|
|
||||||
return zpage;
|
return zpage;
|
||||||
@@ -254,13 +300,13 @@ int init_devzero(void)
|
|||||||
zvirt = l4_map_helper(zphys, 1);
|
zvirt = l4_map_helper(zphys, 1);
|
||||||
memset(zvirt, 0, PAGE_SIZE);
|
memset(zvirt, 0, PAGE_SIZE);
|
||||||
l4_unmap_helper(zvirt, 1);
|
l4_unmap_helper(zvirt, 1);
|
||||||
zpage->count++;
|
zpage->refcnt++;
|
||||||
|
|
||||||
/* Allocate and initialise devzero file */
|
/* Allocate and initialise devzero file */
|
||||||
devzero = vmfile_alloc_init();
|
devzero = vmfile_alloc_init();
|
||||||
devzero->vm_obj.npages = ~0;
|
devzero->vm_obj.npages = ~0;
|
||||||
devzero->vm_obj.pager = &devzero_pager;
|
devzero->vm_obj.pager = &devzero_pager;
|
||||||
devzero->vm_obj.type = VM_OBJ_FILE;
|
devzero->vm_obj.flags = VM_OBJ_FILE;
|
||||||
devzero->type = VM_FILE_DEVZERO;
|
devzero->type = VM_FILE_DEVZERO;
|
||||||
devzero->priv_data = zpage;
|
devzero->priv_data = zpage;
|
||||||
|
|
||||||
|
|||||||
@@ -1,87 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2008 Bahadir Balban
|
|
||||||
*/
|
|
||||||
#include <l4lib/types.h>
|
|
||||||
#include <l4/lib/list.h>
|
|
||||||
#include <l4/api/kip.h>
|
|
||||||
#include <l4/api/errno.h>
|
|
||||||
#include <kmalloc/kmalloc.h>
|
|
||||||
#include <vm_area.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <file.h>
|
|
||||||
#include <task.h>
|
|
||||||
#include <proc.h>
|
|
||||||
|
|
||||||
|
|
||||||
/* Allocates and fills in the env page. This is like a pre-faulted file. */
|
|
||||||
int task_populate_env(struct task *task)
|
|
||||||
{
|
|
||||||
void *paddr = alloc_page(1);
|
|
||||||
void *vaddr = phys_to_virt(paddr);
|
|
||||||
struct page *page = phys_to_page(paddr);
|
|
||||||
|
|
||||||
/* Map new page at a self virtual address temporarily */
|
|
||||||
l4_map(paddr, vaddr, 1, MAP_USR_RW_FLAGS, self_tid());
|
|
||||||
|
|
||||||
/* Clear the page */
|
|
||||||
memset((void *)vaddr, 0, PAGE_SIZE);
|
|
||||||
|
|
||||||
/* Fill in environment data */
|
|
||||||
memcpy((void *)vaddr, &t->utcb_address, sizeof(t->utcb_address));
|
|
||||||
|
|
||||||
/* Remove temporary mapping */
|
|
||||||
l4_unmap((void *)vaddr, 1, self_tid());
|
|
||||||
|
|
||||||
spin_lock(&page->lock);
|
|
||||||
|
|
||||||
/* Environment file owns this page */
|
|
||||||
page->owner = task->proc_files->env_file;
|
|
||||||
|
|
||||||
/* Add the page to it's owner's list of in-memory pages */
|
|
||||||
BUG_ON(!list_empty(&page->list));
|
|
||||||
insert_page_olist(page, page->owner);
|
|
||||||
|
|
||||||
/* The offset of this page in its owner file */
|
|
||||||
page->f_offset = 0;
|
|
||||||
|
|
||||||
page->count++;
|
|
||||||
page->virtual = 0;
|
|
||||||
spin_unlock(&page->lock);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* For a task that is about to execute, this dynamically
|
|
||||||
* generates its environment file, and environment data.
|
|
||||||
*/
|
|
||||||
int task_setup_vm_objects(struct tcb *t)
|
|
||||||
{
|
|
||||||
struct proc_vm_objects *po = &t->proc_vm_objects;
|
|
||||||
|
|
||||||
if (IS_ERR(pf->stack = vmfile_alloc_init()))
|
|
||||||
return (int)t->stack_file;
|
|
||||||
if (IS_ERR(pf->env = vmfile_alloc_init()))
|
|
||||||
return (int)t->env_file;
|
|
||||||
if (IS_ERR(pf->env_file = vmfile_alloc_init()))
|
|
||||||
return (int)t->data_file;
|
|
||||||
|
|
||||||
t->env_file->vnum = (t->tid << 16) | TASK_ENV_VNUM;
|
|
||||||
t->env_file->length = t->env_end - t->env_start;
|
|
||||||
t->env_file->pager = &task_anon_pager;
|
|
||||||
list_add(&t->env_file->list, &vm_file_list);
|
|
||||||
|
|
||||||
t->stack_file->vnum = (t->tid << 16) TASK_STACK_VNUM;
|
|
||||||
t->stack_file->length = t->stack_end - t->stack_start;
|
|
||||||
t->stack_file->pager = &task_anon_pager;
|
|
||||||
list_add(&t->stack_file->list, &vm_file_list);
|
|
||||||
|
|
||||||
t->data_file->vnum = (t->tid << 16) TASK_DATA_VNUM;
|
|
||||||
t->data_file->length = t->data_end - t->data_start;
|
|
||||||
t->data_file->pager = &task_anon_pager;
|
|
||||||
list_add(&t->data_file->list, &vm_file_list);
|
|
||||||
|
|
||||||
/* Allocate, initialise and add per-task env data */
|
|
||||||
return task_populate_env(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
#include <l4lib/ipcdefs.h>
|
#include <l4lib/ipcdefs.h>
|
||||||
#include <lib/addr.h>
|
#include <lib/addr.h>
|
||||||
#include <kmalloc/kmalloc.h>
|
#include <kmalloc/kmalloc.h>
|
||||||
#include <kdata.h>
|
#include <init.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <vm_area.h>
|
#include <vm_area.h>
|
||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
@@ -26,6 +26,9 @@
|
|||||||
#include <proc.h>
|
#include <proc.h>
|
||||||
#include <task.h>
|
#include <task.h>
|
||||||
|
|
||||||
|
/* A separate list than the generic file list that keeps just the boot files */
|
||||||
|
LIST_HEAD(boot_file_list);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
|
int start_boot_tasks(struct initdata *initdata, struct tcb_head *tcbs)
|
||||||
{
|
{
|
||||||
@@ -198,16 +201,20 @@ struct tcb *tcb_alloc_init(void)
|
|||||||
*/
|
*/
|
||||||
int start_boot_task(struct vm_file *file, struct task_ids *ids)
|
int start_boot_task(struct vm_file *file, struct task_ids *ids)
|
||||||
{
|
{
|
||||||
|
int err;
|
||||||
|
struct tcb *task;
|
||||||
|
unsigned int sp, pc;
|
||||||
|
|
||||||
/* Create the thread structures and address space */
|
/* Create the thread structures and address space */
|
||||||
printf("Creating new thread.\n");
|
printf("Creating new thread.\n");
|
||||||
if ((err = l4_thread_control(THREAD_CREATE, &ids)) < 0) {
|
if ((err = l4_thread_control(THREAD_CREATE, ids)) < 0) {
|
||||||
printf("l4_thread_control failed with %d.\n", err);
|
printf("l4_thread_control failed with %d.\n", err);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create a task and use given space and thread ids. */
|
/* Create a task and use given space and thread ids. */
|
||||||
printf("New task with id: %d, space id: %d\n", ids->tid, ids->spid);
|
printf("New task with id: %d, space id: %d\n", ids->tid, ids->spid);
|
||||||
task = tcb_alloc_init(tcbs);
|
task = tcb_alloc_init();
|
||||||
task->tid = ids->tid;
|
task->tid = ids->tid;
|
||||||
task->spid = ids->spid;
|
task->spid = ids->spid;
|
||||||
|
|
||||||
@@ -220,11 +227,11 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
|
|||||||
task->args_end = task->env_start;
|
task->args_end = task->env_start;
|
||||||
task->args_start = task->env_start;
|
task->args_start = task->env_start;
|
||||||
|
|
||||||
/* Task stack starts right after the environment. TODO: Fix this. */
|
/* Task stack starts right after the environment. */
|
||||||
task->stack_end = task->env_start;
|
task->stack_end = task->env_start;
|
||||||
task->stack_start = task->stack_end - DEFAULT_STACK_SIZE;
|
task->stack_start = task->stack_end - DEFAULT_STACK_SIZE;
|
||||||
|
|
||||||
/* Currently RO text and RW data are one region */
|
/* Currently RO text and RW data are one region. TODO: Fix this */
|
||||||
task->data_start = USER_AREA_START;
|
task->data_start = USER_AREA_START;
|
||||||
task->data_end = USER_AREA_START + page_align_up(file->length);
|
task->data_end = USER_AREA_START + page_align_up(file->length);
|
||||||
task->text_start = task->data_start;
|
task->text_start = task->data_start;
|
||||||
@@ -283,15 +290,20 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Add the task to the global task list */
|
/* Add the task to the global task list */
|
||||||
list_add(&task->list, tcb_head->list);
|
list_add(&task->list, &tcb_head.list);
|
||||||
tcb_head->total++;
|
tcb_head.total++;
|
||||||
|
|
||||||
/* Start the thread */
|
/* Start the thread */
|
||||||
printf("Starting task with id %d\n", task->tid);
|
printf("Starting task with id %d\n", task->tid);
|
||||||
if ((err = l4_thread_control(THREAD_RUN, &ids) < 0)) {
|
if ((err = l4_thread_control(THREAD_RUN, ids) < 0)) {
|
||||||
printf("l4_thread_control failed with %d\n", err);
|
printf("l4_thread_control failed with %d\n", err);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
error:
|
||||||
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -300,9 +312,8 @@ int start_boot_task(struct vm_file *file, struct task_ids *ids)
|
|||||||
*/
|
*/
|
||||||
int start_boot_tasks(struct initdata *initdata)
|
int start_boot_tasks(struct initdata *initdata)
|
||||||
{
|
{
|
||||||
struct vm_file *file;
|
struct vm_file *file, *n;
|
||||||
struct list_head *n;
|
struct svc_image *img;
|
||||||
struct svg_image *img;
|
|
||||||
struct task_ids ids;
|
struct task_ids ids;
|
||||||
int total = 0;
|
int total = 0;
|
||||||
|
|
||||||
@@ -332,7 +343,7 @@ int start_boot_tasks(struct initdata *initdata)
|
|||||||
|
|
||||||
/* Add the file to global vm lists */
|
/* Add the file to global vm lists */
|
||||||
list_add(&file->list, &vm_file_list);
|
list_add(&file->list, &vm_file_list);
|
||||||
list_add(&file->vm_obj->list, &vm_object_list);
|
list_add(&file->vm_obj.list, &vm_object_list);
|
||||||
|
|
||||||
/* Start the file as a task */
|
/* Start the file as a task */
|
||||||
start_boot_task(file, &ids);
|
start_boot_task(file, &ids);
|
||||||
@@ -349,7 +360,7 @@ int start_boot_tasks(struct initdata *initdata)
|
|||||||
|
|
||||||
void init_pm(struct initdata *initdata)
|
void init_pm(struct initdata *initdata)
|
||||||
{
|
{
|
||||||
start_boot_tasks(initdata, &tcb_head);
|
start_boot_tasks(initdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -360,7 +371,7 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
|
|||||||
unsigned long vaddr_vma_offset;
|
unsigned long vaddr_vma_offset;
|
||||||
unsigned long vaddr_file_offset;
|
unsigned long vaddr_file_offset;
|
||||||
struct vm_area *vma;
|
struct vm_area *vma;
|
||||||
struct vm_file *vmfile;
|
struct vm_object *vmo;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
/* First find the vma that maps that virtual address */
|
/* First find the vma that maps that virtual address */
|
||||||
@@ -376,9 +387,14 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
|
|||||||
vaddr_vma_offset = __pfn(virtual) - vma->pfn_start;
|
vaddr_vma_offset = __pfn(virtual) - vma->pfn_start;
|
||||||
|
|
||||||
/* Find the file offset of virtual address in this file */
|
/* Find the file offset of virtual address in this file */
|
||||||
vmfile = vma->owner;
|
vmo = vma->owner;
|
||||||
vaddr_file_offset = vma->f_offset + vaddr_vma_offset;
|
vaddr_file_offset = vma->f_offset + vaddr_vma_offset;
|
||||||
|
|
||||||
|
/* TODO:
|
||||||
|
* Traverse vm objects to find the one with the page.
|
||||||
|
* Check each object's cache by find page. dont page in.
|
||||||
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the page with the same file offset with that of the
|
* Find the page with the same file offset with that of the
|
||||||
* virtual address, that is, if the page is resident in memory.
|
* virtual address, that is, if the page is resident in memory.
|
||||||
|
|||||||
Reference in New Issue
Block a user