mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Got mm0 to compile. A lot of issues expected.
This commit is contained in:
@@ -28,8 +28,18 @@
|
||||
/* Private copy of a file */
|
||||
#define VMA_PRIVATE (1 << 6)
|
||||
/* Copy-on-write semantics */
|
||||
#define VMA_COW (1 << 7)
|
||||
#define VMA_FIXED (1 << 8)
|
||||
#define VMA_FIXED (1 << 7)
|
||||
|
||||
/* Defines the type of file. A device file? Regular file? One used at boot? */
|
||||
enum VM_FILE_TYPE {
|
||||
VM_FILE_DEVZERO = 0,
|
||||
VM_FILE_REGULAR,
|
||||
VM_FILE_BOOTFILE,
|
||||
};
|
||||
|
||||
/* Defines the type of object. A file? Just a standalone object? */
|
||||
#define VM_OBJ_SHADOW (1 << 8) /* Anonymous pages, swap_pager */
|
||||
#define VM_OBJ_FILE (1 << 9) /* VFS file and device pages */
|
||||
|
||||
struct page {
|
||||
int refcnt; /* Refcount */
|
||||
@@ -80,18 +90,6 @@ struct vm_pager {
|
||||
struct vm_pager_ops ops; /* The ops the pager does on area */
|
||||
};
|
||||
|
||||
/* Defines the type of file. A device file? Regular file? One used at boot? */
|
||||
enum VM_FILE_TYPE {
|
||||
VM_FILE_DEVZERO = 0,
|
||||
VM_FILE_REGULAR,
|
||||
VM_FILE_BOOTFILE,
|
||||
};
|
||||
|
||||
/* Defines the type of object. A file? Just a standalone object? */
|
||||
enum VM_OBJ_TYPE {
|
||||
VM_OBJ_SHADOW = 1, /* Anonymous pages, swap_pager */
|
||||
VM_OBJ_FILE, /* VFS file and device pages */
|
||||
};
|
||||
|
||||
/* TODO:
|
||||
* How to distinguish different devices handling page faults ???
|
||||
@@ -184,11 +182,13 @@ extern struct vm_pager bootfile_pager;
|
||||
extern struct vm_pager devzero_pager;
|
||||
extern struct vm_pager swap_pager;
|
||||
|
||||
/* Pager initialisation, Special files */
|
||||
|
||||
/* vm object and vm file lists */
|
||||
extern struct list_head vm_object_list;
|
||||
|
||||
/* vm object link related functions */
|
||||
struct vm_obj_link *vma_next_link(struct list_head *link,
|
||||
struct list_head *head);
|
||||
|
||||
/* vm file and object initialisation */
|
||||
struct vm_file *vm_file_alloc_init(void);
|
||||
struct vm_object *vm_object_alloc_init(void);
|
||||
|
||||
@@ -168,12 +168,12 @@ int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
|
||||
|
||||
for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++) {
|
||||
page = vmfile->vm_obj.pager->ops.page_in(&vmfile->vm_obj,
|
||||
f_offset);
|
||||
f_offset);
|
||||
if (IS_ERR(page)) {
|
||||
printf("%s: %s:Could not read page %d "
|
||||
"from file with vnum: 0x%x\n", __TASKNAME__,
|
||||
__FUNCTION__, f_offset, vmfile->vnum);
|
||||
break;
|
||||
return (int)page;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
/*
|
||||
* Initialise the memory structures.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
* Copyright (C) 2007, 2008 Bahadir Balban
|
||||
*/
|
||||
#include <kdata.h>
|
||||
#include <init.h>
|
||||
#include <memory.h>
|
||||
#include <l4/macros.h>
|
||||
#include <l4/config.h>
|
||||
@@ -78,7 +78,7 @@ void init_physmem(struct initdata *initdata, struct membank *membank)
|
||||
|
||||
/* Set use counts for pages the kernel has already used up */
|
||||
if (!(pmap->map[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i)))
|
||||
membank[0].page_array[i].count = -1;
|
||||
membank[0].page_array[i].refcnt = -1;
|
||||
else /* Last page used +1 is free */
|
||||
ffree_addr = (i + 1) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
@@ -329,6 +329,13 @@ pgtable_unmap:
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
int do_munmap(void *vaddr, unsigned long size, struct tcb *task)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_munmap(l4id_t sender, void *vaddr, unsigned long size)
|
||||
{
|
||||
@@ -338,7 +345,6 @@ int sys_munmap(l4id_t sender, void *vaddr, unsigned long size)
|
||||
|
||||
return do_munmap(vaddr, size, task);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct vm_area *vma_new(unsigned long pfn_start, unsigned long npages,
|
||||
unsigned int flags, unsigned long file_offset,
|
||||
|
||||
@@ -117,23 +117,6 @@ out_err:
|
||||
return PTR_ERR(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* This reads-in a range of pages from a file and populates the page cache
|
||||
* just like a page fault, but its not in the page fault path.
|
||||
*/
|
||||
int read_file_pages(struct vm_file *f, unsigned long pfn_start,
|
||||
unsigned long pfn_end)
|
||||
{
|
||||
struct page *p;
|
||||
|
||||
for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++)
|
||||
if (IS_ERR(p = f->vm_obj.pager->ops.page_in(&f->vm_obj,
|
||||
f_offset)))
|
||||
return (int)p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* All non-mmapable char devices are handled by this.
|
||||
* VFS calls those devices to read their pages
|
||||
@@ -236,7 +219,7 @@ int init_boot_files(struct initdata *initdata)
|
||||
|
||||
for (int i = 0; i < bd->total_images; i++) {
|
||||
img = &bd->images[i];
|
||||
boot_file = vm_file_alloc_init();
|
||||
boot_file = vm_file_create();
|
||||
boot_file->priv_data = img;
|
||||
boot_file->length = img->phys_end - img->phys_start;
|
||||
boot_file->type = VM_FILE_BOOTFILE;
|
||||
@@ -303,7 +286,7 @@ int init_devzero(void)
|
||||
zpage->refcnt++;
|
||||
|
||||
/* Allocate and initialise devzero file */
|
||||
devzero = vmfile_alloc_init();
|
||||
devzero = vm_file_create();
|
||||
devzero->vm_obj.npages = ~0;
|
||||
devzero->vm_obj.pager = &devzero_pager;
|
||||
devzero->vm_obj.flags = VM_OBJ_FILE;
|
||||
|
||||
@@ -365,13 +365,14 @@ void init_pm(struct initdata *initdata)
|
||||
|
||||
/*
|
||||
* Makes the virtual to page translation for a given user task.
|
||||
* If page is not mapped (either not faulted or swapped), returns 0.
|
||||
*/
|
||||
struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
|
||||
{
|
||||
unsigned long vaddr_vma_offset;
|
||||
unsigned long vaddr_file_offset;
|
||||
unsigned long vma_offset;
|
||||
unsigned long file_offset;
|
||||
struct vm_obj_link *vmo_link;
|
||||
struct vm_area *vma;
|
||||
struct vm_object *vmo;
|
||||
struct page *page;
|
||||
|
||||
/* First find the vma that maps that virtual address */
|
||||
@@ -384,33 +385,31 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
|
||||
/* Find the pfn offset of virtual address in this vma */
|
||||
BUG_ON(__pfn(virtual) < vma->pfn_start ||
|
||||
__pfn(virtual) > vma->pfn_end);
|
||||
vaddr_vma_offset = __pfn(virtual) - vma->pfn_start;
|
||||
vma_offset = __pfn(virtual) - vma->pfn_start;
|
||||
|
||||
/* Find the file offset of virtual address in this file */
|
||||
vmo = vma->owner;
|
||||
vaddr_file_offset = vma->f_offset + vaddr_vma_offset;
|
||||
file_offset = vma->file_offset + vma_offset;
|
||||
|
||||
/* TODO:
|
||||
* Traverse vm objects to find the one with the page.
|
||||
* Check each object's cache by find page. dont page in.
|
||||
*/
|
||||
/* Get the initial link */
|
||||
BUG_ON(!(vmo_link = vma_next_link(&vma->vm_obj_list,
|
||||
&vma->vm_obj_list)));
|
||||
|
||||
/*
|
||||
* Find the page with the same file offset with that of the
|
||||
* virtual address, that is, if the page is resident in memory.
|
||||
*/
|
||||
list_for_each_entry(page, &vmfile->page_cache_list, list)
|
||||
if (vaddr_file_offset == page->f_offset) {
|
||||
printf("%s: %s: Found page @ 0x%x, f_offset: 0x%x, with vma @ 0x%x, vmfile @ 0x%x\n", __TASKNAME__,
|
||||
__FUNCTION__, (unsigned long)page, page->f_offset, vma, vma->owner);
|
||||
return page;
|
||||
}
|
||||
/* Is page there in the cache ??? */
|
||||
while(!(page = find_page(vmo_link->obj, file_offset))) {
|
||||
/* No, check the next link */
|
||||
if (!(vmo_link = vma_next_link(&vma->vm_obj_list,
|
||||
&vma->vm_obj_list)));
|
||||
/* Exhausted the objects. The page is not there. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The page is not found, meaning that it is not mapped in
|
||||
* yet, e.g. via a page fault.
|
||||
*/
|
||||
return 0;
|
||||
/* Found it */
|
||||
printf("%s: %s: Found page @ 0x%x, file_offset: 0x%x, "
|
||||
"with vma @ 0x%x, vm object @ 0x%x\n", __TASKNAME__,
|
||||
__FUNCTION__, (unsigned long)page, page->offset,
|
||||
vma, vmo_link->obj);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
struct task_data {
|
||||
|
||||
@@ -12,14 +12,10 @@
|
||||
/* Global list of in-memory vm objects. */
|
||||
LIST_HEAD(vm_object_list);
|
||||
|
||||
/* Global list of in-memory vm files */
|
||||
LIST_HEAD(vm_file_list);
|
||||
|
||||
struct vm_object *vm_object_init(struct vm_object *obj)
|
||||
{
|
||||
INIT_LIST_HEAD(&obj->list);
|
||||
INIT_LIST_HEAD(&obj->page_cache);
|
||||
INIT_LIST_HEAD(&obj->shadows);
|
||||
|
||||
return obj;
|
||||
}
|
||||
@@ -42,9 +38,9 @@ struct vm_file *vm_file_create(void)
|
||||
if (!(f = kzalloc(sizeof(*f))))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&f->file_list);
|
||||
INIT_LIST_HEAD(&f->list);
|
||||
vm_object_init(&f->vm_obj);
|
||||
f->vm_obj->type = VM_OBJ_FILE;
|
||||
f->vm_obj.flags = VM_OBJ_FILE;
|
||||
|
||||
return f;
|
||||
}
|
||||
@@ -55,7 +51,7 @@ int vm_object_delete(struct vm_object *vmo)
|
||||
struct vm_file *f;
|
||||
|
||||
/* Release all pages */
|
||||
vmo->pager.ops->release_pages(vmo);
|
||||
vmo->pager->ops.release_pages(vmo);
|
||||
|
||||
/* Remove from global list */
|
||||
list_del(&vmo->list);
|
||||
@@ -63,14 +59,14 @@ int vm_object_delete(struct vm_object *vmo)
|
||||
/* Check any references */
|
||||
BUG_ON(vmo->refcnt);
|
||||
BUG_ON(!list_empty(&vmo->shadowers));
|
||||
BUG_ON(!list_emtpy(&vmo->page_cache));
|
||||
BUG_ON(!list_empty(&vmo->page_cache));
|
||||
|
||||
/* Obtain and free via the base object */
|
||||
if (vmo->flags & VM_OBJ_FILE) {
|
||||
f = vm_object_to_file(vmo);
|
||||
kfree(f);
|
||||
} else if (vmo->flags & VM_OBJ_SHADOW)
|
||||
kfree(obj);
|
||||
kfree(vmo);
|
||||
else BUG();
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user