mirror of
https://github.com/drasko/codezero.git
synced 2026-01-12 02:43:15 +01:00
Mixed changes
- Adding prefaulting of fs0 to avoid page fault deadlocks. - Fixed a bug that a vmo page_cache equivalence would simply drop a link to an original vmo, even if the vmo could have more pages outside the page cache, or if the vmo was not a shadow vmo. - Fixed a bug with page allocator where recursion would corrupt global variables. - Now going to fix or re-write a simpler page allocator that works.
This commit is contained in:
@@ -18,7 +18,7 @@
|
||||
#include INC_SUBARCH(mm.h)
|
||||
|
||||
/* Abort debugging conditions */
|
||||
// #define DEBUG_ABORTS
|
||||
#define DEBUG_ABORTS
|
||||
#if defined (DEBUG_ABORTS)
|
||||
#define dbg_abort(...) dprintk(__VA_ARGS__)
|
||||
#else
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
struct page_allocator allocator;
|
||||
|
||||
static struct mem_cache *new_dcache();
|
||||
static int find_and_free_page_area(void *addr, struct page_allocator *p);
|
||||
|
||||
/*
|
||||
* Allocate a new page area from @area_sources_start. If no areas left,
|
||||
@@ -63,25 +64,46 @@ get_free_page_area(int quantity, struct page_allocator *p,
|
||||
if (quantity <= 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* First, allocate a new area, which may involve recursion.
|
||||
* If we call this while we touch the global area list, we will
|
||||
* corrupt it so we call it first.
|
||||
*/
|
||||
if (!(new = new_page_area(p, cache_list)))
|
||||
return 0; /* No more pages */
|
||||
|
||||
list_for_each_entry(area, &p->page_area_list, list) {
|
||||
/* Free but needs dividing */
|
||||
|
||||
/* Check for exact size match */
|
||||
if (area->numpages == quantity && !area->used) {
|
||||
/* Mark it as used */
|
||||
area->used = 1;
|
||||
|
||||
/*
|
||||
* We don't need the area we allocated
|
||||
* earlier, just free it.
|
||||
*/
|
||||
BUG_ON(find_and_free_page_area(
|
||||
(void *)__pfn_to_addr(new->pfn), p) < 0);
|
||||
return area;
|
||||
}
|
||||
|
||||
if (area->numpages > quantity && !area->used) {
|
||||
area->numpages -= quantity;
|
||||
if (!(new = new_page_area(p, cache_list)))
|
||||
return 0; /* No more pages */
|
||||
new->pfn = area->pfn + area->numpages;
|
||||
new->numpages = quantity;
|
||||
new->used = 1;
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
list_add(&new->list, &area->list);
|
||||
return new;
|
||||
/* Free and exact size match, no need to divide. */
|
||||
} else if (area->numpages == quantity && !area->used) {
|
||||
area->used = 1;
|
||||
return area;
|
||||
}
|
||||
}
|
||||
/* No more pages */
|
||||
|
||||
/*
|
||||
* No more pages. We could not use the area
|
||||
* we allocated earlier, just free it.
|
||||
*/
|
||||
find_and_free_page_area((void *)__pfn_to_addr(new->pfn), p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -3,18 +3,17 @@ libposix
|
||||
|
||||
Copyright (C) 2007 Bahadir Balban
|
||||
|
||||
Despite the name, this is a library that supports only a tiny portion of posix functions.
|
||||
|
||||
Currently supported functions are:
|
||||
Despite the name, this is a library that supports only a small portion of posix functions.
|
||||
|
||||
|
||||
Functions to be supported in the near future are:
|
||||
Highest priority POSIX functions are:
|
||||
|
||||
shmat
|
||||
shmget
|
||||
shmdt
|
||||
mmap
|
||||
munmap
|
||||
sbrk
|
||||
read
|
||||
readdir
|
||||
write
|
||||
@@ -27,12 +26,50 @@ mknod
|
||||
link
|
||||
unlink
|
||||
fork
|
||||
clone
|
||||
execve
|
||||
getpid
|
||||
wait
|
||||
kill
|
||||
getenv
|
||||
setenv
|
||||
|
||||
Others:
|
||||
|
||||
Currently supported functions are:
|
||||
|
||||
shmat
|
||||
shmget
|
||||
shmdt
|
||||
mmap
|
||||
read
|
||||
readdir
|
||||
write
|
||||
lseek
|
||||
open
|
||||
close
|
||||
creat
|
||||
mkdir
|
||||
mknod
|
||||
fork
|
||||
|
||||
|
||||
Functions to be supported in the near future are:
|
||||
|
||||
munmap
|
||||
link
|
||||
unlink
|
||||
getpid
|
||||
execve
|
||||
clone
|
||||
wait
|
||||
kill
|
||||
exit
|
||||
sbrk
|
||||
getenv
|
||||
setenv
|
||||
|
||||
|
||||
Other calls:
|
||||
pipe
|
||||
mount
|
||||
unmount
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <arch/mm.h>
|
||||
#include <lib/spinlock.h>
|
||||
|
||||
// #define DEBUG_FAULT_HANDLING
|
||||
#define DEBUG_FAULT_HANDLING
|
||||
#ifdef DEBUG_FAULT_HANDLING
|
||||
#define dprintf(...) printf(__VA_ARGS__)
|
||||
#else
|
||||
|
||||
@@ -148,11 +148,53 @@ void handle_requests(void)
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
int self_spawn(void)
|
||||
{
|
||||
struct task_ids ids;
|
||||
struct tcb *self, *self_child;
|
||||
|
||||
BUG_ON(!(self = find_task(self_tid())));
|
||||
|
||||
ids.tid = THREAD_ID_INVALID;
|
||||
ids.spid = self->spid;
|
||||
|
||||
/* Create a new L4 thread in current thread's address space. */
|
||||
self_child = task_create(&ids, THREAD_CREATE_SAMESPC);
|
||||
|
||||
/* Copy self tcb to child. TODO: ??? Not sure about this */
|
||||
copy_tcb(self_child, self);
|
||||
|
||||
/*
|
||||
* Create a new utcb. Every pager thread will
|
||||
* need its own utcb to answer calls.
|
||||
*/
|
||||
self_child->utcb = utcb_vaddr_new();
|
||||
|
||||
/* TODO: Create a new utcb shm for own thread ??? Does it need to shmat??? */
|
||||
|
||||
/* TODO: Notify vfs ??? */
|
||||
|
||||
/* TODO: Modify registers ???, it depends on what state is copied in C0 */
|
||||
|
||||
task_add_global(self_child);
|
||||
|
||||
l4_thread_control(THREAD_RUN, &ids);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void main(void)
|
||||
{
|
||||
/* Initialise the memory, server tasks, mmap and start them. */
|
||||
initialise();
|
||||
|
||||
/*
|
||||
if (self_spawn())
|
||||
while (1)
|
||||
;
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
handle_requests();
|
||||
}
|
||||
|
||||
@@ -96,24 +96,27 @@ struct vm_object *vma_drop_link(struct vm_obj_link *shadower_link,
|
||||
|
||||
/*
|
||||
* Checks if page cache pages of lesser is a subset of those of copier.
|
||||
*
|
||||
* FIXME:
|
||||
* Note this just checks the page cache, so if any objects have pages
|
||||
* swapped to disk, this function does not rule.
|
||||
* swapped to disk, this function won't work, which is a logic error.
|
||||
* This should really count the swapped ones as well.
|
||||
*/
|
||||
int vm_object_is_subset(struct vm_object *copier,
|
||||
struct vm_object *lesser)
|
||||
int vm_object_is_subset(struct vm_object *shadow,
|
||||
struct vm_object *original)
|
||||
{
|
||||
struct page *pc, *pl;
|
||||
|
||||
/* Copier must have equal or more pages to overlap lesser */
|
||||
if (copier->npages < lesser->npages)
|
||||
if (shadow->npages < original->npages)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Do a page by page comparison. Every lesser page
|
||||
* must be in copier for overlap.
|
||||
*/
|
||||
list_for_each_entry(pl, &lesser->page_cache, list)
|
||||
if (!(pc = find_page(copier, pl->offset)))
|
||||
list_for_each_entry(pl, &original->page_cache, list)
|
||||
if (!(pc = find_page(shadow, pl->offset)))
|
||||
return 0;
|
||||
/*
|
||||
* For all pages of lesser vmo, there seems to be a page
|
||||
@@ -122,6 +125,16 @@ int vm_object_is_subset(struct vm_object *copier,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int vm_object_is_droppable(struct vm_object *shadow,
|
||||
struct vm_object *original)
|
||||
{
|
||||
if (vm_object_is_subset(shadow, original)
|
||||
&& (original->flags & VM_OBJ_SHADOW))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* When one shadow object is redundant, merges it into the shadow in front of it.
|
||||
* Note it must be determined that it is redundant before calling this function.
|
||||
@@ -231,8 +244,7 @@ struct page *copy_to_new_page(struct page *orig)
|
||||
void *new_vaddr, *vaddr, *paddr;
|
||||
struct page *new;
|
||||
|
||||
if (!(paddr = alloc_page(1)))
|
||||
return 0;
|
||||
BUG_ON(!(paddr = alloc_page(1)));
|
||||
|
||||
new = phys_to_page(paddr);
|
||||
|
||||
@@ -258,8 +270,8 @@ struct page *copy_to_new_page(struct page *orig)
|
||||
int vma_drop_merge_delete(struct vm_obj_link *shadow_link,
|
||||
struct vm_obj_link *orig_link)
|
||||
{
|
||||
/* Can we can drop one link? */
|
||||
if (vm_object_is_subset(shadow_link->obj, orig_link->obj)) {
|
||||
/* Can we drop one link? */
|
||||
if (vm_object_is_droppable(shadow_link->obj, orig_link->obj)) {
|
||||
struct vm_object *dropped;
|
||||
|
||||
dprintf("VM OBJECT is a subset of its shadow.\nShadow:\n");
|
||||
|
||||
@@ -258,6 +258,28 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefaults all mapped regions of a task. The reason we have this is
|
||||
* some servers are in the page fault handling path (e.g. fs0), and we
|
||||
* don't want them to fault and cause deadlocks and circular deps.
|
||||
*
|
||||
* Normally fs0 faults dont cause dependencies because its faults
|
||||
* are handled by the boot pager, which is part of mm0. BUT: It may
|
||||
* cause deadlocks because fs0 may fault while serving a request
|
||||
* from mm0.(Which is expected to also handle the fault).
|
||||
*/
|
||||
int task_prefault_regions(struct tcb *task, struct vm_file *f)
|
||||
{
|
||||
struct vm_area *vma;
|
||||
|
||||
list_for_each_entry(vma, &task->vm_area_list, list) {
|
||||
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
|
||||
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
|
||||
VM_READ | VM_WRITE) < 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Main entry point for the creation, initialisation and
|
||||
* execution of a new task.
|
||||
@@ -278,6 +300,10 @@ int task_exec(struct vm_file *f, unsigned long task_region_start,
|
||||
if ((err = task_mmap_regions(task, f)) < 0)
|
||||
return err;
|
||||
|
||||
if (ids->tid == VFS_TID)
|
||||
if ((err = task_prefault_regions(task, f)) < 0)
|
||||
return err;
|
||||
|
||||
if ((err = task_setup_registers(task, 0, 0, 0)) < 0)
|
||||
return err;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user