mirror of
https://github.com/drasko/codezero.git
synced 2026-02-15 11:23:16 +01:00
exchange_registers(), modified thread_control calls seem to work
- Fixed do_mmap() so that it returns mapped address, and various bugs. - A child seems to fork with new setup, but with incorrect return value. Need to use and test exregs() for fork + clone. - Shmat searches an unmapped area if input arg is invalid, do_mmap() should do this.
This commit is contained in:
@@ -7,6 +7,12 @@
|
||||
#include <exregs.h>
|
||||
|
||||
|
||||
void exregs_set_pager(struct exregs_data *s, l4id_t pagerid)
|
||||
{
|
||||
s->pagerid = pagerid;
|
||||
s->flags |= EXREGS_SET_PAGER;
|
||||
}
|
||||
|
||||
void exregs_set_stack(struct exregs_data *s, unsigned long sp)
|
||||
{
|
||||
s->context.sp = sp;
|
||||
|
||||
@@ -380,6 +380,9 @@ int vma_intersect(unsigned long pfn_start, unsigned long pfn_end,
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: PASS THIS A VM_SHARED FLAG SO THAT IT CAN SEARCH FOR AN EMPTY
|
||||
* SEGMENT FOR SHM, instead of shmat() searching for one.
|
||||
*
|
||||
* Search an empty space in the task's mmapable address region.
|
||||
*/
|
||||
unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
|
||||
@@ -470,9 +473,9 @@ int mmap_address_validate(struct tcb *task, unsigned long map_address,
|
||||
* The actual paging in/out of the file from/into memory pages is handled by
|
||||
* the file's pager upon page faults.
|
||||
*/
|
||||
int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
struct tcb *task, unsigned long map_address, unsigned int flags,
|
||||
unsigned int npages)
|
||||
void *do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
struct tcb *task, unsigned long map_address, unsigned int flags,
|
||||
unsigned int npages)
|
||||
{
|
||||
unsigned long map_pfn = __pfn(map_address);
|
||||
struct vm_area *new, *mapped;
|
||||
@@ -485,7 +488,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
BUG_ON(!(mapfile = get_devzero()));
|
||||
file_offset = 0;
|
||||
} else
|
||||
BUG();
|
||||
return PTR_ERR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Get total file pages, check if mapping is within file size */
|
||||
@@ -494,25 +497,24 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
printf("%s: Trying to map %d pages from page %d, "
|
||||
"but file length is %d\n", __FUNCTION__,
|
||||
npages, file_offset, file_npages);
|
||||
return -EINVAL;
|
||||
return PTR_ERR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Check invalid page size */
|
||||
if (npages == 0) {
|
||||
printf("Trying to map %d pages.\n", npages);
|
||||
return -EINVAL;
|
||||
return PTR_ERR(-EINVAL);
|
||||
}
|
||||
if (npages > __pfn(task->stack_start - task->data_end)) {
|
||||
printf("Trying to map too many pages: %d\n", npages);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Check invalid map address */
|
||||
if (!mmap_address_validate(task, map_address, flags)) {
|
||||
/* Get new map address for region of this size */
|
||||
map_address = find_unmapped_area(npages, task);
|
||||
if ((int)map_address < 0)
|
||||
return (int)map_address;
|
||||
if(!(map_address = find_unmapped_area(npages, task)))
|
||||
return PTR_ERR(-ENOMEM);
|
||||
} else {
|
||||
/*
|
||||
* FIXME: Currently we don't allow overlapping vmas.
|
||||
@@ -526,12 +528,12 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
|
||||
/* For valid regions that aren't allocated by us, create the vma. */
|
||||
if (!(new = vma_new(__pfn(map_address), npages, flags, file_offset)))
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(-ENOMEM);
|
||||
|
||||
/* Attach the file as the first vm object of this vma */
|
||||
if (!(vmo_link = vm_objlink_create())) {
|
||||
kfree(new);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Attach link to object */
|
||||
@@ -557,7 +559,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
if (!(vmo_link2 = vm_objlink_create())) {
|
||||
kfree(new);
|
||||
kfree(vmo_link);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(-ENOMEM);
|
||||
}
|
||||
vm_link_object(vmo_link2, &dzero->vm_obj);
|
||||
list_add_tail(&vmo_link2->list, &new->vm_obj_list);
|
||||
@@ -565,7 +567,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
|
||||
/* Finished initialising the vma, add it to task */
|
||||
dprintf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__,
|
||||
map_address, map_address + npages * PAGE_SIZE);
|
||||
map_address, map_address + __pfn_to_addr(npages));
|
||||
task_add_vma(task, new);
|
||||
|
||||
/*
|
||||
@@ -573,9 +575,9 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
|
||||
* we return the *end* of the area as the start address.
|
||||
*/
|
||||
if (flags & VMA_GROWSDOWN)
|
||||
map_address += npages;
|
||||
map_address += __pfn_to_addr(npages);
|
||||
|
||||
return map_address;
|
||||
return (void *)map_address;
|
||||
}
|
||||
|
||||
/* mmap system call implementation */
|
||||
@@ -635,9 +637,9 @@ int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
|
||||
if (prot & PROT_EXEC)
|
||||
vmflags |= VM_EXEC;
|
||||
|
||||
base = do_mmap(file, __pfn_to_addr(pfn), task, base, vmflags, npages);
|
||||
start = do_mmap(file, __pfn_to_addr(pfn), task, base, vmflags, npages);
|
||||
|
||||
l4_ipc_return(base);
|
||||
l4_ipc_return((int)start);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
|
||||
{
|
||||
struct shm_descriptor *shm = shm_file_to_desc(shm_file);
|
||||
unsigned int vmflags;
|
||||
int err;
|
||||
void *mapped;
|
||||
|
||||
if (!task) {
|
||||
printf("%s:%s: Cannot find caller task with tid %d\n",
|
||||
@@ -85,7 +85,7 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
|
||||
if (mmap_address_validate(task, (unsigned long)shm_addr,
|
||||
vmflags))
|
||||
shm->shm_addr = shm_addr;
|
||||
else
|
||||
else /* FIXME: Do this in do_mmap/find_unmapped_area !!! */
|
||||
shm->shm_addr = address_new(&shm_vaddr_pool,
|
||||
shm->npages);
|
||||
else /* Address must be already assigned */
|
||||
@@ -95,9 +95,11 @@ static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
|
||||
* mmap the area to the process as shared. Page fault handler would
|
||||
* handle allocating and paging-in the shared pages.
|
||||
*/
|
||||
if ((err = do_mmap(shm_file, 0, task, (unsigned long)shm->shm_addr,
|
||||
vmflags, shm->npages)) < 0) {
|
||||
printf("do_mmap: Mapping shm area failed with %d.\n", err);
|
||||
if (IS_ERR(mapped = do_mmap(shm_file, 0, task,
|
||||
(unsigned long)shm->shm_addr,
|
||||
vmflags, shm->npages))) {
|
||||
printf("do_mmap: Mapping shm area failed with %d.\n",
|
||||
(int)mapped);
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
@@ -225,36 +225,40 @@ struct tcb *task_create(struct tcb *orig, struct task_ids *ids,
|
||||
|
||||
int task_mmap_regions(struct tcb *task, struct vm_file *file)
|
||||
{
|
||||
int err;
|
||||
void *mapped;
|
||||
struct vm_file *shm;
|
||||
|
||||
/*
|
||||
* mmap each task's physical image to task's address space.
|
||||
* TODO: Map data and text separately when available from bootdesc.
|
||||
*/
|
||||
if ((err = do_mmap(file, 0, task, task->text_start,
|
||||
VM_READ | VM_WRITE | VM_EXEC | VMA_PRIVATE,
|
||||
__pfn(page_align_up(task->text_end) -
|
||||
task->text_start))) < 0) {
|
||||
printf("do_mmap: failed with %d.\n", err);
|
||||
return err;
|
||||
if (IS_ERR(mapped = do_mmap(file, 0, task, task->text_start,
|
||||
VM_READ | VM_WRITE | VM_EXEC | VMA_PRIVATE,
|
||||
__pfn(page_align_up(task->text_end) -
|
||||
task->text_start)))) {
|
||||
printf("do_mmap: failed with %d.\n", (int)mapped);
|
||||
return (int)mapped;
|
||||
}
|
||||
|
||||
/* mmap each task's environment as anonymous memory. */
|
||||
if ((err = do_mmap(0, 0, task, task->env_start,
|
||||
VM_READ | VM_WRITE | VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
__pfn(task->env_end - task->env_start))) < 0) {
|
||||
if (IS_ERR(mapped = do_mmap(0, 0, task, task->env_start,
|
||||
VM_READ | VM_WRITE |
|
||||
VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
__pfn(task->env_end - task->env_start)))) {
|
||||
printf("do_mmap: Mapping environment failed with %d.\n",
|
||||
err);
|
||||
return err;
|
||||
(int)mapped);
|
||||
return (int)mapped;
|
||||
}
|
||||
|
||||
/* mmap each task's stack as anonymous memory. */
|
||||
if ((err = do_mmap(0, 0, task, task->stack_start,
|
||||
VM_READ | VM_WRITE | VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
__pfn(task->stack_end - task->stack_start))) < 0) {
|
||||
printf("do_mmap: Mapping stack failed with %d.\n", err);
|
||||
return err;
|
||||
if (IS_ERR(mapped = do_mmap(0, 0, task, task->stack_start,
|
||||
VM_READ | VM_WRITE |
|
||||
VMA_PRIVATE | VMA_ANONYMOUS,
|
||||
__pfn(task->stack_end -
|
||||
task->stack_start)))) {
|
||||
printf("do_mmap: Mapping stack failed with %d.\n",
|
||||
(int)mapped);
|
||||
return (int)mapped;
|
||||
}
|
||||
|
||||
/* Task's utcb */
|
||||
@@ -304,7 +308,7 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
unsigned int sp, l4id_t pager)
|
||||
{
|
||||
int err;
|
||||
struct exregs_data regs;
|
||||
struct exregs_data exregs;
|
||||
|
||||
/* Set up task's registers to default. */
|
||||
if (!sp)
|
||||
@@ -315,9 +319,11 @@ int task_setup_registers(struct tcb *task, unsigned int pc,
|
||||
pager = self_tid();
|
||||
|
||||
/* Set up the task's thread details, (pc, sp, pager etc.) */
|
||||
exregs_set_stack(®s, sp);
|
||||
exregs_set_pc(®s, pc);
|
||||
if ((err = l4_exchange_registers(®s, pager, task->tid) < 0)) {
|
||||
exregs_set_stack(&exregs, sp);
|
||||
exregs_set_pc(&exregs, pc);
|
||||
exregs_set_pager(&exregs, pager);
|
||||
|
||||
if ((err = l4_exchange_registers(&exregs, task->tid) < 0)) {
|
||||
printf("l4_exchange_registers failed with %d.\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user