FS0 compiles now, with a mock-up rootfs.

Having progress on vfs slowly but surely ;-)
This commit is contained in:
Bahadir Balban
2008-01-15 00:34:10 +00:00
parent efd797c678
commit 6bb5b45212
14 changed files with 174 additions and 275 deletions

Binary file not shown.

View File

@@ -22,7 +22,7 @@ struct file_ops {
file_op_t write;
file_op_t close;
file_op_t mmap;
file_op_t seek;
file_op_t lseek;
file_op_t flush;
file_op_t fsync;
};
@@ -52,11 +52,14 @@ struct filesystem;
struct superblock;
struct vnode;
#define VFS_DENTRY_NAME_MAX 512
struct dentry {
int refcnt;
char name[512];
char name[VFS_DENTRY_NAME_MAX];
struct dentry *parent; /* Parent dentry */
struct list_head siblings; /* List of dentries with same parent */
struct list_head child; /* List of dentries with same parent */
struct list_head children; /* List of children dentries */
struct list_head dref_list; /* For vnode's dirent reference list */
struct vnode *vnode; /* The vnode associated with dirent */
struct dentry_ops ops;
};
@@ -70,6 +73,7 @@ struct file {
struct vnode {
unsigned long id; /* Filesystem-wide unique vnode id */
int refcnt; /* Reference counter */
int hardlinks; /* Number of hard links */
struct vnode_ops ops; /* Operations on this vnode */
struct list_head dirents; /* Dirents that refer to this vnode */
struct list_head state_list; /* List for vnode's dirty/clean state */

View File

@@ -15,7 +15,6 @@
struct initdata {
struct bootdesc *bootdesc;
struct block_device *bdev;
};
extern struct initdata initdata;

View File

@@ -0,0 +1,14 @@
/*
* System call function signatures.
*
* Copyright (C) 2007, 2008 Bahadir Balban
*/
#ifndef __FS0_SYSCALLS_H__
#define __FS0_SYSCALLS_H__
int sys_open(l4id_t sender, char *pathname, int flags, u32 mode);
int sys_read(l4id_t sender, int fd, void *buf, int cnt);
int sys_write(l4id_t sender, int fd, void *buf, int cnt);
int sys_lseek(l4id_t sender, int fd, unsigned long offset, int whence);
#endif /* __FS0_SYSCALLS_H__ */

View File

@@ -7,18 +7,21 @@
#include <string.h>
#include <l4lib/arch/message.h>
#include <l4lib/arch/syscalls.h>
#include <l4lib/arch/syslib.h>
#include <l4lib/kip.h>
#include <l4lib/utcb.h>
#include <l4lib/ipcdefs.h>
#include <fs.h>
#include <init.h>
#include <kdata.h>
#include <syscalls.h>
#include <task.h>
/* Synchronise with pager via a `wait' tagged ipc with destination as pager */
void wait_pager(l4id_t partner)
{
u32 tag = L4_IPC_TAG_WAIT;
printf("Going to wait till pager finishes dumping.\n");
l4_ipc(partner, l4_nilthread, tag);
printf("Pager synced with us.\n");
l4_send(partner, L4_IPC_TAG_WAIT);
printf("%s: Pager synced with us.\n", __TASKNAME__);
}
void handle_fs_requests(void)
@@ -68,6 +71,8 @@ void handle_fs_requests(void)
void main(void)
{
initialise();
wait_pager(PAGER_TID);
while (1) {

120
tasks/fs0/src/init.c Normal file
View File

@@ -0,0 +1,120 @@
/*
* FS0 Initialisation.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <kdata.h>
#include <fs.h>
#include <string.h>
#include <stdio.h>
#include <l4/lib/list.h>
#include <init.h>
struct filesystem bootfs = {
.magic = 0,
.name = "Tempfs for boot images",
};
struct superblock bootfs_sb;
#define BOOTFS_IMG_MAX 10
struct vnode bootfs_vnode[BOOTFS_IMG_MAX];
struct dentry bootfs_dentry[BOOTFS_IMG_MAX];
struct file bootfs_file[BOOTFS_IMG_MAX];
struct rootdir {
struct dentry *d;
struct filesystem *fs;
};
struct rootdir bootfs_root;
void init_root(struct vnode *root_vn, struct dentry *root_d)
{
/* Initialise dentry for rootdir */
root_d->refcnt = 0;
strcpy(root_d->name, "");
INIT_LIST_HEAD(&root_d->child);
INIT_LIST_HEAD(&root_d->children);
INIT_LIST_HEAD(&root_d->dref_list);
root_d->vnode = root_vn;
/* Initialise vnode for rootdir */
root_vn->id = 0;
root_vn->refcnt = 0;
INIT_LIST_HEAD(&root_vn->dirents);
INIT_LIST_HEAD(&root_vn->state_list);
list_add(&root_d->dref_list, &root_vn->dirents);
root_vn->size = 0;
/* Initialise global struct rootdir ptr */
bootfs_root.d = root_d;
bootfs_root.fs = &bootfs;
}
#define PATH_SEP "/"
#define PATH_CURDIR "."
#define PATH_OUTDIR ".."
void fs_debug_list_all(struct dentry *root)
{
struct dentry *d;
int stotal = 0;
/* List paths first */
printf("%s%s\n", root->name, PATH_SEP);
list_for_each_entry(d, &root->children, child) {
printf("%s%s%s, size: 0x%x\n", d->parent->name, PATH_SEP, d->name, d->vnode->size);
stotal += d->vnode->size;
}
}
void init_bootfs(struct initdata *initdata)
{
struct bootdesc *bd = initdata->bootdesc;
struct dentry *img_d = &bootfs_dentry[1];
struct vnode *img_vn = &bootfs_vnode[1];
struct file *img_f = &bootfs_file[1];
struct svc_image *img;
/* The first vfs object slot is for the root */
init_root(&bootfs_vnode[0], &bootfs_dentry[0]);
BUG_ON(bd->total_images >= BOOTFS_IMG_MAX);
for (int i = 0; i < bd->total_images; i++) {
img = &bd->images[i];
/* Initialise dentry for image */
img_d->refcnt = 0;
strncpy(img_d->name, img->name, VFS_DENTRY_NAME_MAX);
INIT_LIST_HEAD(&img_d->child);
INIT_LIST_HEAD(&img_d->children);
img_d->vnode = img_vn;
img_d->parent = bootfs_root.d;
list_add(&img_d->child, &bootfs_root.d->children);
/* Initialise vnode for image */
img_vn->id = img->phys_start;
img_vn->refcnt = 0;
INIT_LIST_HEAD(&img_vn->dirents);
list_add(&img_d->dref_list, &img_vn->dirents);
img_vn->size = img->phys_end - img->phys_start;
/* Initialise file struct for image */
img_f->refcnt = 0;
img_f->dentry = img_d;
img_d++;
img_vn++;
img_f++;
}
}
void initialise(void)
{
request_initdata(&initdata);
init_bootfs(&initdata);
/* A debug call that lists all vfs structures */
fs_debug_list_all(bootfs_root.d);
}

View File

@@ -38,5 +38,6 @@
#define L4_IPC_TAG_WRITE 14
#define L4_IPC_TAG_LSEEK 15
#define L4_IPC_TAG_CLOSE 16
#define L4_IPC_TAG_BRK 17
#endif /* __IPCDEFS_H__ */

View File

@@ -20,6 +20,7 @@ static inline int l4_open(const char *pathname, int flags, mode_t mode)
write_mr(L4SYS_ARG0, (unsigned long)pathname);
write_mr(L4SYS_ARG1, flags);
write_mr(L4SYS_ARG2, (u32)mode);
/* Call pager with shmget() request. Check ipc error. */
if ((errno = l4_sendrecv(VFS_TID, VFS_TID, L4_IPC_TAG_OPEN)) < 0) {

View File

@@ -85,6 +85,10 @@ void handle_requests(void)
sys_mmap(sender, args->start, args->length, args->prot, args->flags, args->fd, args->offset);
break;
}
case L4_IPC_TAG_BRK: {
// sys_brk(sender, (void *)mr[0]);
// break;
}
case L4_IPC_TAG_MUNMAP: {
/* TODO: Use arg struct instead */
// sys_munmap(sender, (void *)mr[0], (int)mr[1]);

View File

@@ -1,258 +0,0 @@
#if 0
void shm_request_handler(struct shm_request *request, struct id_pool **shm_ids)
{
struct shm_descriptor *pending, *new;
struct shm_kdata *kdata;
if (request->type == SHM_SENDER) {
list_for_each_entry(pending, &shm_pending_list, list) {
kdata = &pending->kdata;
/*
* The receiver request should have set this, and also
* few other parameters should match with this request.
*/
if (kdata->receiver == request->pair &&
kdata->npages == request->npages &&
kdata->sender == request->self) {
/* Fill in rest of the incomplete information */
kdata->send_pfn = request->pfn;
/* Allocate a new id for the shm setup */
pending->shmid = id_new(*shm_ids);
/* Add it to completed shm area list */
list_del(&pending->list);
list_add(&pending->list, &shm_desc_list);
/* Arrange the actual shm setup with the kernel */
do_shm_setup(pending);
return;
}
}
/*
* If no matching pending shm descriptor is found, a new one
* should be allocated.
*/
new = kzalloc(sizeof(struct shm_descriptor));
kdata = &new->kdata;
/* Fill in all information available from the request */
kdata->sender = request->self;
kdata->receiver = request->pair;
kdata->send_pfn = request->pfn;
kdata->npages = request->npages;
/* Add the descriptor to pending list. */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &shm_pending_list);
} else if (request->type == SHM_RECEIVER) {
list_for_each_entry(pending, &shm_pending_list, list) {
kdata = &pending->kdata;
/*
* The sender request should have already setup, and
* few other parameters should match with this request.
*/
if (kdata->receiver == request->self &&
kdata->npages == request->npages &&
kdata->sender == request->pair) {
/* Fill in rest of the incomplete information */
kdata->recv_pfn = request->pfn;
/* Allocate a new id for the shm setup */
pending->shmid = id_new(*shm_ids);
list_del(&pending->list);
/* Add it to completed shm area list */
list_add(&pending->list, &shm_desc_list);
/* Arrange the actual shm setup with the kernel */
do_shm_setup(pending);
return;
}
}
/*
* If no matching pending shm descriptor is found, a new one
* should be allocated.
*/
new = kzalloc(sizeof(struct shm_descriptor));
kdata = &new->kdata;
/* Fill in all information available from the request */
kdata->sender = request->pair;
kdata->receiver = request->self;
kdata->recv_pfn = request->pfn;
kdata->npages = request->npages;
/* Add the descriptor to pending list. */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &shm_pending_list);
}
}
#endif
/* Original incomplete untested code for do_munmap */
/* Unmaps given address from its vma. Releases those pages in that vma. */
int do_munmap(unsigned long addr, unsigned long size, struct tcb *task)
{
unsigned long npages = __pfn(size);
unsigned long pfn_start = __pfn(addr);
unsigned long pfn_end = pfn_start + npages;
struct vm_area *vma = 0, *shadow = 0, *vma_new = 0, *shadow_new = 0;
struct list_head *n;
if (!(vma = find_vma(addr, &task->vm_area_list)))
return -EINVAL;
/* Do the real file's vma. Split needed? */
if (vma->pfn_start < pfn_start && vma->pfn_end > pfn_end) {
if (!(vma_new = kzalloc(sizeof(struct vm_area))))
return -ENOMEM;
vma = vma_split(vma, vma_new, pfn_start, pfn_end);
INIT_LIST_HEAD(&vma_new->list);
INIT_LIST_HEAD(&vma_new->shadow_list);
list_add(&vma_new->list, &vma->list);
/* Shrink needed? */
} else if (((vma->pfn_start == pfn_start) && (vma->pfn_end > pfn_end))
|| ((vma->pfn_start < pfn_start) && (vma->pfn_end == pfn_end)))
vma = vma_shrink(vma, pfn_start, pfn_end);
/* Destroy needed? */
else if ((vma->pfn_start == pfn_start) && (vma->pfn_end == pfn_end)) {
/* NOTE: VMA can't be referred after this point. */
vma_destroy(vma);
goto pgtable_unmap;
} else
BUG();
/* Sort out the shadows, if any. non-cow mappings would skip this. */
list_for_each_entry(shadow, vma->shadow_list, shadow_list) {
/* Split needed? */
if (shadow->pfn_start < pfn_start && shadow->pfn_end > pfn_end) {
shadow_new = kzalloc(sizeof(struct vm_area));
vma = vma_split(shadow, shadow_new, pfn_start, pfn_end);
INIT_LIST_HEAD(&shadow_new->list);
list_add_tail(&shadow_new->list, &shadow->list);
/* Destroy needed? */
} else if ((shadow->pfn_start == pfn_start) && (shadow->pfn_end == pfn_end)) {
/* NOTE: vma can't be referred after this point. */
vma_destroy(shadow);
/* Shrink needed? */
} else if (((shadow->pfn_start == pfn_start) && (shadow->pfn_end > pfn_end))
|| ((shadow->pfn_start < pfn_start) && (shadow->pfn_end == pfn_end)))
shadow = vma_shrink(shadow, pfn_start, pfn_end);
else
BUG();
}
/*
* If the real file was COW and its vma had split, the shadows must be
* separated into the two new vmas according to which one they
* belong to.
*/
if (vma_new)
list_for_each_entry_safe(shadow, n, vma->shadow_list, shadow_list) {
if (shadow->pfn_start >= vma_new->pfn_start &&
shadow->pfn_end <= vma_new->pfn_end) {
list_del_init(&shadow->list);
list_add(&shadow->list, &vma_new->shadow_list);
} else
BUG_ON(!(shadow->pfn_start >= vma->pfn_start &&
shadow->pfn_end <= vma->pfn_end));
}
/* The stage where the actual pages are unmapped from the page tables */
pgtable_unmap:
/* TODO:
* - Find out if the vma is cow, and contains shadow vmas.
* - Remove and free shadow vmas or the real vma, or shrink them if applicable.
* - Free the swap file segment for the vma if vma is private (cow).
* - Reduce refcount for the in-memory pages.
* - If refcount is zero (they could be shared!), either add pages to some page
* cache, or simpler the better, free the actual pages back to the page allocator.
* - l4_unmap() the corresponding virtual region from the page tables.
*/
}
/*
* TODO: UNUSED: This used to be used during mmap to map swap for anon
* areas.
* Now same code might be used *during swapping* to set up the swapfile
* for the very same areas. Also the fault handler might find the swap
* slot on the file using info from this code.
*
* For an anonymous and/or private region this provides a per-task swap
* file for backing. For shared memory it attaches shm regions to a
* global shm swap file.
*/
struct vm_file *setup_swap_file(unsigned long map_address,
unsigned int flags, unsigned long *f_offset,
struct tcb *t, int pages)
{
struct vm_file *swap_file;
unsigned long shm_f_offset;
BUG_ON(!(flags & VMA_ANON))
BUG_ON(!is_page_aligned(map_address));
/*
* All anon shared memory is kept on a single global swap file. Shm
* addresses are globally the same among processes, so the file mapping
* is a function of shm segment address.
*/
if (flags & VMA_SHARED) {
swap_file = shm_swap_file;
*file = swap_file;
/*
* The file offset is the shm segment's page offset in the shm
* virtual region, which is unique among all processes.
*/
*f_offset = __pfn(map_address) - SHM_AREA_START;
/*
* Extend the file if this shm vaddr lies beyond file end.
*/
if (swap_file->length < __pfn_to_addr(*f_offset + pages))
swap_file->length = __pfn_to_addr(*f_offset + npages);
/* Regular non-shareable anonymous regions */
} else {
swap_file = t->swap_file;
/*
* Anonymous vmas are mapped into process' swap during mmap.
* Copy-on-write vmas are mapped into swap as shadow vmas when
* they get copy-on-write'ed.
*/
*file = swap_file;
BUG_ON(!is_page_aligned(swap_file->length));
/*
* vmas map into the next available swap slot. The whole vma is
* mapped so that it has a contiguous existence on swap file.
* Swap slots are allocated by the per-task swap offset pool.
*/
*f_offset = vaddr_pool_new(task->swap_file_offset_pool, pages);
/* If new offset is greater than current file size, update. */
if (__pfn_to_addr(*f_offset + pages) > swap_file->length)
swap_file->length = __pfn_to_addr(*f_offset + pages);
BUG_ON(swap_file->length > TASK_SWAPFILE_MAXSIZE);
}
printf("Set up swapfile for anon%svma @ pfn offset %d. ",
(flags & VMA_SHARED) ? "/shared " : " ",
"Swap file size: %d bytes.\n", *f_offset, swap_file->length);
return 0;
}

View File

@@ -18,13 +18,6 @@ static struct vm_file shm_swap_file;
static struct id_pool *swap_file_offset_pool;
*/
/* mmap system call implementation */
int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
int flags, int fd, off_t offset)
{
return 0;
}
/* TODO: This is to be implemented when fs0 is ready. */
int do_msync(void *addr, unsigned long size, unsigned int flags, struct tcb *task)
{
@@ -487,3 +480,18 @@ int do_mmap(struct vm_file *mapfile, unsigned long f_offset, struct tcb *t,
return 0;
}
/* mmap system call implementation */
int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
int flags, int fd, off_t offset)
{
return 0;
}
/* Sets the end of data segment for sender */
int sys_brk(l4id_t sender, void *ds_end)
{
// do_brk(find_task(sender), ds_end);
return 0;
}

View File

@@ -28,7 +28,7 @@ void main(void)
wait_pager(0);
/* Check mmap/munmap */
// mmaptest();
mmaptest();
/* Check shmget/shmat/shmdt */
shmtest();

View File

@@ -1,7 +1,7 @@
/*
* Test mmap/munmap posix calls.
*
* Copyright (C) 2007 - 2008 Bahadir Balban
* Copyright (C) 2007, 2008 Bahadir Balban
*/
#include <sys/ipc.h>
#include <sys/shm.h>
@@ -58,3 +58,4 @@ int mmaptest(void)
return 0;
}

Binary file not shown.