Shared tcb structures are made independent

For clone, file descriptor and vm area structures need to be
separate from the tcb and reached via a pointer so that they
can be shared among multiple tcbs.
This commit is contained in:
Bahadir Balban
2008-09-09 22:17:42 +03:00
parent 002fe79a54
commit d7de9aa643
8 changed files with 156 additions and 176 deletions

View File

@@ -25,6 +25,12 @@
#define DEFAULT_UTCB_SIZE PAGE_SIZE
enum tcb_create_flags {
TCB_NO_SHARING = 0,
TCB_SHARED_VM = 1,
TCB_SHARED_FILES = 2,
};
struct vm_file;
struct file_descriptor {
@@ -33,6 +39,17 @@ struct file_descriptor {
struct vm_file *vmfile;
};
struct task_fd_head {
struct file_descriptor fd[TASK_FILES_MAX];
int tcb_refs;
};
struct task_vma_head {
struct list_head list;
int tcb_refs;
};
/* Stores all task information that can be kept in userspace. */
struct tcb {
/* Task list */
@@ -44,6 +61,7 @@ struct tcb {
/* Task ids */
int tid;
int spid;
int tgid;
/* Related task ids */
unsigned int pagerid; /* Task's pager */
@@ -76,10 +94,10 @@ struct tcb {
void *utcb;
/* Virtual memory areas */
struct list_head vm_area_list;
struct task_vma_head *vm_area_head;
/* File descriptors for this task */
struct file_descriptor fd[TASK_FILES_MAX];
struct task_fd_head *files;
};
/* Structures to use when sending new task information to vfs */
@@ -99,8 +117,11 @@ void task_add_global(struct tcb *t);
struct initdata;
void init_pm(struct initdata *initdata);
struct tcb *task_create(struct task_ids *ids, unsigned int flags);
struct tcb *task_create(struct task_ids *ids,
unsigned int ctrl_flags,
unsigned int alloc_flags);
int send_task_data(l4id_t requester);
void task_map_prefault_utcb(struct tcb *mapper, struct tcb *owner);
int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags);
#endif /* __TASK_H__ */

View File

@@ -243,6 +243,11 @@ int validate_task_range(struct tcb *t, unsigned long start,
/* Changes all shadows and their ptes to read-only */
int vm_freeze_shadows(struct tcb *task);
static inline void task_add_vma(struct tcb *task, struct vm_area *vma)
{
list_add(&vma->list, &task->vm_area_head->list);
}
/* Main page fault entry point */
int page_fault_handler(l4id_t tid, fault_kdata_t *fkdata);

View File

@@ -11,6 +11,7 @@
#include <l4lib/kip.h>
#include <l4lib/utcb.h>
#include <l4lib/ipcdefs.h>
#include <l4lib/types.h>
#include <l4/api/thread.h>
#include <l4/api/space.h>
#include <l4/api/ipc.h>
@@ -148,7 +149,6 @@ void handle_requests(void)
}
}
#if 0
int self_spawn(void)
{
struct task_ids ids;
@@ -156,14 +156,16 @@ int self_spawn(void)
BUG_ON(!(self = find_task(self_tid())));
ids.tid = THREAD_ID_INVALID;
ids.tid = TASK_ID_INVALID;
ids.spid = self->spid;
ids.tgid = self->tgid;
/* Create a new L4 thread in current thread's address space. */
self_child = task_create(&ids, THREAD_CREATE_SAMESPC);
self_child = task_create(&ids, THREAD_CREATE_SAMESPC,
TCB_SHARED_VM | TCB_SHARED_FILES);
/* Copy self tcb to child. TODO: ??? Not sure about this */
copy_tcb(self_child, self);
copy_tcb(self_child, self, TCB_SHARED_VM | TCB_SHARED_FILES);
/*
* Create a new utcb. Every pager thread will
@@ -171,7 +173,13 @@ int self_spawn(void)
*/
self_child->utcb = utcb_vaddr_new();
/* TODO: Create a new utcb shm for own thread ??? Does it need to shmat??? */
/* Map utcb to child */
task_map_prefault_utcb(self_child, self_child);
/*
* TODO: Set up a child stack by mmapping an anonymous
* region of mmap's choice. TODO: Time to add MAP_GROWSDOWN ???
*/
/* TODO: Notify vfs ??? */
@@ -180,8 +188,9 @@ int self_spawn(void)
task_add_global(self_child);
l4_thread_control(THREAD_RUN, &ids);
return 0;
}
#endif
void main(void)

View File

@@ -24,7 +24,7 @@ int copy_vmas(struct tcb *to, struct tcb *from)
struct vm_area *vma, *new_vma;
struct vm_obj_link *vmo_link, *new_link;
list_for_each_entry(vma, &from->vm_area_list, list) {
list_for_each_entry(vma, &from->vm_area_head->list, list) {
/* Create a new vma */
new_vma = vma_new(vma->pfn_start, vma->pfn_end - vma->pfn_start,
@@ -49,13 +49,13 @@ int copy_vmas(struct tcb *to, struct tcb *from)
&vma->vm_obj_list)));
/* All link copying is finished, now add the new vma to task */
list_add_tail(&new_vma->list, &to->vm_area_list);
task_add_vma(to, new_vma);
}
return 0;
}
int copy_tcb(struct tcb *to, struct tcb *from)
int copy_tcb(struct tcb *to, struct tcb *from, unsigned int flags)
{
/* Copy program segment boundary information */
to->start = from->start;
@@ -77,12 +77,23 @@ int copy_tcb(struct tcb *to, struct tcb *from)
to->map_start = from->map_start;
to->map_end = from->map_end;
/* Copy all vm areas */
copy_vmas(to, from);
/* Sharing the list of vmas */
if (flags & TCB_SHARED_VM) {
to->vm_area_head = from->vm_area_head;
to->vm_area_head->tcb_refs++;
} else {
/* Copy all vm areas */
copy_vmas(to, from);
}
/* Copy all file descriptors */
memcpy(to->fd, from->fd,
TASK_FILES_MAX * sizeof(struct file_descriptor));
if (flags & TCB_SHARED_FILES) {
to->files = from->files;
to->files->tcb_refs++;
} else {
/* Copy all file descriptors */
memcpy(to->files, from->files, sizeof(*to->files));
}
return 0;
}
@@ -137,10 +148,11 @@ int do_fork(struct tcb *parent)
* Create a new L4 thread with parent's page tables
* kernel stack and kernel-side tcb copied
*/
child = task_create(&ids, THREAD_CREATE_COPYSPC);
child = task_create(&ids, THREAD_CREATE_COPYSPC,
TCB_NO_SHARING);
/* Copy parent tcb to child */
copy_tcb(child, parent);
copy_tcb(child, parent, TCB_NO_SHARING);
/* Create new utcb for child since it can't use its parent's */
child->utcb = utcb_vaddr_new();

View File

@@ -569,7 +569,7 @@ int vm_freeze_shadows(struct tcb *task)
struct vm_object *vmo;
struct page *p;
list_for_each_entry(vma, &task->vm_area_list, list) {
list_for_each_entry(vma, &task->vm_area_head->list, list) {
/* Shared vmas don't have shadows */
if (vma->flags & VMA_SHARED)
@@ -699,7 +699,7 @@ int page_fault_handler(l4id_t sender, fault_kdata_t *fkdata)
/* Get vma info */
if (!(fault.vma = find_vma(fault.address,
&fault.task->vm_area_list)))
&fault.task->vm_area_head->list)))
printf("Hmm. No vma for faulty region. "
"Bad things will happen.\n");
@@ -726,7 +726,7 @@ int validate_task_range(struct tcb *t, unsigned long start,
/* Find the vma that maps that virtual address */
for (unsigned long vaddr = start; vaddr < end; vaddr += PAGE_SIZE) {
if (!(vma = find_vma(vaddr, &t->vm_area_list))) {
if (!(vma = find_vma(vaddr, &t->vm_area_head->list))) {
printf("%s: No VMA found for 0x%x on task: %d\n",
__FUNCTION__, vaddr, t->tid);
return -EINVAL;
@@ -752,7 +752,7 @@ struct page *task_virt_to_page(struct tcb *t, unsigned long virtual)
struct page *page;
/* First find the vma that maps that virtual address */
if (!(vma = find_vma(virtual, &t->vm_area_list))) {
if (!(vma = find_vma(virtual, &t->vm_area_head->list))) {
printf("%s: No VMA found for 0x%x on task: %d\n",
__FUNCTION__, virtual, t->tid);
return PTR_ERR(-EINVAL);
@@ -813,7 +813,7 @@ int prefault_page(struct tcb *task, unsigned long address,
/* Find the vma */
if (!(fault.vma = find_vma(fault.address,
&fault.task->vm_area_list))) {
&fault.task->vm_area_head->list))) {
err = -EINVAL;
dprintf("%s: Invalid: No vma for given address. %d\n",
__FUNCTION__, err);

View File

@@ -111,8 +111,8 @@ int vfs_receive_sys_open(l4id_t sender, l4id_t opener, int fd,
}
/* Assign vnum to given fd on the task */
t->fd[fd].vnum = vnum;
t->fd[fd].cursor = 0;
t->files->fd[fd].vnum = vnum;
t->files->fd[fd].cursor = 0;
/* Check if that vm_file is already in the list */
list_for_each_entry(vmfile, &vm_file_list, list) {
@@ -120,7 +120,7 @@ int vfs_receive_sys_open(l4id_t sender, l4id_t opener, int fd,
if ((vmfile->type & VM_FILE_VFS) &&
vm_file_to_vnum(vmfile) == vnum) {
/* Add a reference to it from the task */
t->fd[fd].vmfile = vmfile;
t->files->fd[fd].vmfile = vmfile;
vmfile->openers++;
l4_ipc_return(0);
return 0;
@@ -137,7 +137,7 @@ int vfs_receive_sys_open(l4id_t sender, l4id_t opener, int fd,
vm_file_to_vnum(vmfile) = vnum;
vmfile->length = length;
vmfile->vm_obj.pager = &file_pager;
t->fd[fd].vmfile = vmfile;
t->files->fd[fd].vmfile = vmfile;
vmfile->openers++;
/* Add to global list */
@@ -340,11 +340,11 @@ int fsync_common(l4id_t sender, int fd)
BUG_ON(!(task = find_task(sender)));
/* Check fd validity */
if (fd < 0 || fd > TASK_FILES_MAX || !task->fd[fd].vmfile)
if (fd < 0 || fd > TASK_FILES_MAX || !task->files->fd[fd].vmfile)
return -EBADF;
/* Finish I/O on file */
f = task->fd[fd].vmfile;
f = task->files->fd[fd].vmfile;
if ((err = flush_file_pages(f)) < 0)
return err;
@@ -366,11 +366,11 @@ int fd_close(l4id_t sender, int fd)
return err;
/* Reduce file's opener count */
task->fd[fd].vmfile->openers--;
task->files->fd[fd].vmfile->openers--;
task->fd[fd].vnum = 0;
task->fd[fd].cursor = 0;
task->fd[fd].vmfile = 0;
task->files->fd[fd].vnum = 0;
task->files->fd[fd].cursor = 0;
task->files->fd[fd].vmfile = 0;
return 0;
}
@@ -567,7 +567,7 @@ int sys_read(l4id_t sender, int fd, void *buf, int count)
BUG_ON(!(task = find_task(sender)));
/* Check fd validity */
if (fd < 0 || fd > TASK_FILES_MAX || !task->fd[fd].vmfile) {
if (fd < 0 || fd > TASK_FILES_MAX || !task->files->fd[fd].vmfile) {
retval = -EBADF;
goto out;
}
@@ -589,8 +589,8 @@ int sys_read(l4id_t sender, int fd, void *buf, int count)
goto out;
}
vmfile = task->fd[fd].vmfile;
cursor = task->fd[fd].cursor;
vmfile = task->files->fd[fd].vmfile;
cursor = task->files->fd[fd].cursor;
/* If cursor is beyond file end, simply return 0 */
if (cursor >= vmfile->length) {
@@ -627,7 +627,7 @@ int sys_read(l4id_t sender, int fd, void *buf, int count)
}
/* Update cursor on success */
task->fd[fd].cursor += count;
task->files->fd[fd].cursor += count;
retval = count;
out:
@@ -655,7 +655,7 @@ int sys_write(l4id_t sender, int fd, void *buf, int count)
BUG_ON(!(task = find_task(sender)));
/* Check fd validity */
if (fd < 0 || fd > TASK_FILES_MAX || !task->fd[fd].vmfile) {
if (fd < 0 || fd > TASK_FILES_MAX || !task->files->fd[fd].vmfile) {
retval = -EBADF;
goto out;
}
@@ -677,8 +677,8 @@ int sys_write(l4id_t sender, int fd, void *buf, int count)
goto out;
}
vmfile = task->fd[fd].vmfile;
cursor = task->fd[fd].cursor;
vmfile = task->files->fd[fd].vmfile;
cursor = task->files->fd[fd].cursor;
/* See what pages user wants to write */
pfn_wstart = __pfn(cursor);
@@ -751,10 +751,10 @@ int sys_write(l4id_t sender, int fd, void *buf, int count)
* of this change when the file is flushed (e.g. via fflush()
* or close())
*/
if (task->fd[fd].cursor + count > vmfile->length)
vmfile->length = task->fd[fd].cursor + count;
if (task->files->fd[fd].cursor + count > vmfile->length)
vmfile->length = task->files->fd[fd].cursor + count;
task->fd[fd].cursor += count;
task->files->fd[fd].cursor += count;
retval = count;
out:
@@ -772,7 +772,7 @@ int sys_lseek(l4id_t sender, int fd, off_t offset, int whence)
BUG_ON(!(task = find_task(sender)));
/* Check fd validity */
if (fd < 0 || fd > TASK_FILES_MAX || !task->fd[fd].vmfile) {
if (fd < 0 || fd > TASK_FILES_MAX || !task->files->fd[fd].vmfile) {
retval = -EBADF;
goto out;
}
@@ -785,23 +785,23 @@ int sys_lseek(l4id_t sender, int fd, off_t offset, int whence)
switch (whence) {
case SEEK_SET:
retval = task->fd[fd].cursor = offset;
retval = task->files->fd[fd].cursor = offset;
break;
case SEEK_CUR:
cursor = (unsigned long long)task->fd[fd].cursor;
cursor = (unsigned long long)task->files->fd[fd].cursor;
if (cursor + offset > 0xFFFFFFFF)
retval = -EINVAL;
else
retval = task->fd[fd].cursor += offset;
retval = task->files->fd[fd].cursor += offset;
break;
case SEEK_END:
cursor = (unsigned long long)task->fd[fd].cursor;
total = (unsigned long long)task->fd[fd].vmfile->length;
cursor = (unsigned long long)task->files->fd[fd].cursor;
total = (unsigned long long)task->files->fd[fd].vmfile->length;
if (cursor + total > 0xFFFFFFFF)
retval = -EINVAL;
else {
retval = task->fd[fd].cursor =
task->fd[fd].vmfile->length + offset;
retval = task->files->fd[fd].cursor =
task->files->fd[fd].vmfile->length + offset;
}
default:
retval = -EINVAL;

View File

@@ -292,7 +292,7 @@ int do_munmap(void *vaddr, unsigned long npages, struct tcb *task)
int err;
/* Check if any such vma exists */
if (!(vma = find_vma((unsigned long)vaddr, &task->vm_area_list)))
if (!(vma = find_vma((unsigned long)vaddr, &task->vm_area_head.list)))
return -EINVAL;
/*
@@ -392,11 +392,11 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
return 0;
/* If no vmas, first map slot is available. */
if (list_empty(&task->vm_area_list))
if (list_empty(&task->vm_area_head->list))
return task->start;
/* First vma to check our range against */
vma = list_entry(task->vm_area_list.next, struct vm_area, list);
vma = list_entry(task->vm_area_head->list.next, struct vm_area, list);
/* Start searching from task's end of data to start of stack */
while (pfn_end <= __pfn(task->end)) {
@@ -412,7 +412,7 @@ unsigned long find_unmapped_area(unsigned long npages, struct tcb *task)
* Decision point, no more vmas left to check.
* Are we out of task map area?
*/
if (vma->list.next == &task->vm_area_list) {
if (vma->list.next == &task->vm_area_head->list) {
if (pfn_end > __pfn(task->end))
break; /* Yes, fail */
else /* No, success */
@@ -519,7 +519,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
* To be fixed soon. We need to handle intersection,
* splitting, shrink/grow etc.
*/
list_for_each_entry(mapped, &task->vm_area_list, list)
list_for_each_entry(mapped, &task->vm_area_head->list, list)
BUG_ON(vma_intersect(map_pfn, map_pfn + npages,
mapped));
}
@@ -537,7 +537,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
/* Attach link to object */
vm_link_object(vmo_link, &mapfile->vm_obj);
/* ADd link to vma list */
/* Add link to vma list */
list_add_tail(&vmo_link->list, &new->vm_obj_list);
/*
@@ -566,7 +566,7 @@ int do_mmap(struct vm_file *mapfile, unsigned long file_offset,
/* Finished initialising the vma, add it to task */
dprintf("%s: Mapping 0x%x - 0x%x\n", __FUNCTION__,
map_address, map_address + npages * PAGE_SIZE);
list_add(&new->list, &task->vm_area_list);
task_add_vma(task, new);
return 0;
}
@@ -607,7 +607,7 @@ int sys_mmap(l4id_t sender, void *start, size_t length, int prot,
file = 0;
vmflags |= VMA_ANONYMOUS;
} else {
file = task->fd[fd].vmfile;
file = task->files->fd[fd].vmfile;
}
if (flags & MAP_FIXED)

View File

@@ -63,42 +63,76 @@ struct tcb *find_task(int tid)
return 0;
}
struct tcb *tcb_alloc_init(void)
/* Allocate structures that could be shared upon a clone() */
void tcb_alloc_shared(struct tcb *task)
{
BUG_ON(!(task->vm_area_head = kmalloc(sizeof(*task->vm_area_head))));
BUG_ON(!(task->files = kmalloc(sizeof(*task->files))));
task->files->tcb_refs = 1;
task->vm_area_head->tcb_refs = 1;
}
struct tcb *tcb_alloc_init(unsigned int flags)
{
struct tcb *task;
if (!(task = kzalloc(sizeof(struct tcb))))
return PTR_ERR(-ENOMEM);
/* Allocate new vma head if its not shared */
if (!(flags & TCB_SHARED_VM)) {
if (!(task->vm_area_head =
kzalloc(sizeof(*task->vm_area_head)))) {
kfree(task);
return PTR_ERR(-ENOMEM);
}
task->vm_area_head->tcb_refs = 1;
INIT_LIST_HEAD(&task->vm_area_head->list);
}
/* Allocate file structures if not shared */
if (!(flags & TCB_SHARED_FILES)) {
if (!(task->files =
kzalloc(sizeof(*task->files)))) {
kfree(task->vm_area_head);
kfree(task);
return PTR_ERR(-ENOMEM);
}
task->files->tcb_refs = 1;
}
/* Ids will be acquired from the kernel */
task->tid = TASK_ID_INVALID;
task->spid = TASK_ID_INVALID;
task->tgid = TASK_ID_INVALID;
/* Initialise its lists */
/* Initialise list structure */
INIT_LIST_HEAD(&task->list);
INIT_LIST_HEAD(&task->vm_area_list);
return task;
}
struct tcb *task_create(struct task_ids *ids, unsigned int flags)
struct tcb *task_create(struct task_ids *ids,
unsigned int ctrl_flags,
unsigned int alloc_flags)
{
struct tcb *task;
int err;
/* Create the thread structures and address space */
if ((err = l4_thread_control(THREAD_CREATE | flags, ids)) < 0) {
if ((err = l4_thread_control(THREAD_CREATE | ctrl_flags, ids)) < 0) {
printf("l4_thread_control failed with %d.\n", err);
return PTR_ERR(err);
}
/* Create a task and use given space and thread ids. */
if (IS_ERR(task = tcb_alloc_init()))
if (IS_ERR(task = tcb_alloc_init(alloc_flags)))
return PTR_ERR(task);
task->tid = ids->tid;
task->spid = ids->spid;
task->tgid = ids->tgid;
return task;
}
@@ -234,10 +268,11 @@ int mm0_task_init(struct vm_file *f, unsigned long task_start,
* The thread itself is already known by the kernel, so we just
* allocate a local task structure.
*/
BUG_ON(IS_ERR(task = tcb_alloc_init()));
BUG_ON(IS_ERR(task = tcb_alloc_init(TCB_NO_SHARING)));
task->tid = ids->tid;
task->spid = ids->spid;
task->tgid = ids->tgid;
if ((err = task_setup_regions(f, task, task_start, task_end)) < 0)
return err;
@@ -272,7 +307,7 @@ int task_prefault_regions(struct tcb *task, struct vm_file *f)
{
struct vm_area *vma;
list_for_each_entry(vma, &task->vm_area_list, list) {
list_for_each_entry(vma, &task->vm_area_head->list, list) {
for (int pfn = vma->pfn_start; pfn < vma->pfn_end; pfn++)
BUG_ON(prefault_page(task, __pfn_to_addr(pfn),
VM_READ | VM_WRITE) < 0);
@@ -290,7 +325,8 @@ int task_exec(struct vm_file *f, unsigned long task_region_start,
struct tcb *task;
int err;
if (IS_ERR(task = task_create(ids, THREAD_CREATE_NEWSPC)))
if (IS_ERR(task = task_create(ids, THREAD_CREATE_NEWSPC,
TCB_NO_SHARING)))
return (int)task;
if ((err = task_setup_regions(f, task, task_region_start,
@@ -322,114 +358,6 @@ int task_exec(struct vm_file *f, unsigned long task_region_start,
return 0;
}
#if 0
/*
* Creates a process environment, mmaps the given file along
* with any other necessary segment, and executes it as a task.
*/
int start_boot_task(struct vm_file *file, unsigned long task_start,
unsigned long task_end, struct task_ids *ids)
{
int err;
struct tcb *task;
unsigned int sp, pc;
/* Create the thread structures and address space */
printf("Creating new thread.\n");
if ((err = l4_thread_control(THREAD_CREATE, ids)) < 0) {
printf("l4_thread_control failed with %d.\n", err);
goto error;
}
/* Create a task and use given space and thread ids. */
printf("New task with id: %d, space id: %d\n", ids->tid, ids->spid);
task = tcb_alloc_init();
task->tid = ids->tid;
task->spid = ids->spid;
/* Prepare environment boundaries. */
task->env_end = task_end;
task->env_start = task->env_end - DEFAULT_ENV_SIZE;
task->args_end = task->env_start;
task->args_start = task->env_start;
/* Task stack starts right after the environment. */
task->stack_end = task->env_start;
task->stack_start = task->stack_end - DEFAULT_STACK_SIZE;
/* Currently RO text and RW data are one region. TODO: Fix this */
task->data_start = task_start;
task->data_end = task_start + page_align_up(file->length);
task->text_start = task->data_start;
task->text_end = task->data_end;
/* Task's region available for mmap */
task->map_start = task->data_end;
task->map_end = task->stack_start;
/* Task's utcb */
task->utcb = utcb_vaddr_new();
/* Create a shared memory segment available for shmat() */
shm_new((key_t)task->utcb, __pfn(DEFAULT_UTCB_SIZE));
/* Set up task's registers */
sp = align(task->stack_end - 1, 8);
pc = task->text_start;
/* Set up the task's thread details, (pc, sp, pager etc.) */
if ((err = l4_exchange_registers(pc, sp, self_tid(), task->tid) < 0)) {
printf("l4_exchange_registers failed with %d.\n", err);
goto error;
}
/*
* mmap each task's physical image to task's address space.
* TODO: Map data and text separately when available from bootdesc.
*/
if ((err = do_mmap(file, 0, task, task->text_start,
VM_READ | VM_WRITE | VM_EXEC | VMA_PRIVATE,
__pfn(page_align_up(task->text_end) -
task->text_start))) < 0) {
printf("do_mmap: failed with %d.\n", err);
goto error;
}
/* mmap each task's environment as anonymous memory. */
if ((err = do_mmap(0, 0, task, task->env_start,
VM_READ | VM_WRITE | VMA_PRIVATE | VMA_ANONYMOUS,
__pfn(task->env_end - task->env_start))) < 0) {
printf("do_mmap: Mapping environment failed with %d.\n",
err);
goto error;
}
/* mmap each task's stack as anonymous memory. */
if ((err = do_mmap(0, 0, task, task->stack_start,
VM_READ | VM_WRITE | VMA_PRIVATE | VMA_ANONYMOUS,
__pfn(task->stack_end - task->stack_start))) < 0) {
printf("do_mmap: Mapping stack failed with %d.\n", err);
goto error;
}
/* Add the task to the global task list */
list_add(&task->list, &tcb_head.list);
tcb_head.total++;
/* Start the thread */
printf("Starting task with id %d\n", task->tid);
if ((err = l4_thread_control(THREAD_RUN, ids)) < 0) {
printf("l4_thread_control failed with %d\n", err);
goto error;
}
return 0;
error:
BUG();
}
#endif
struct vm_file *initdata_next_bootfile(struct initdata *initdata)
{
struct vm_file *file, *n;
@@ -477,6 +405,8 @@ int start_boot_tasks(struct initdata *initdata)
printf("%s: Initialising mm0 tcb.\n", __TASKNAME__);
ids.tid = PAGER_TID;
ids.spid = PAGER_TID;
ids.tgid = PAGER_TID;
if (mm0_task_init(mm0, INITTASK_AREA_START, INITTASK_AREA_END, &ids) < 0)
BUG();
total++;
@@ -484,6 +414,8 @@ int start_boot_tasks(struct initdata *initdata)
/* Initialise vfs with its predefined id */
ids.tid = VFS_TID;
ids.spid = VFS_TID;
ids.tgid = VFS_TID;
printf("%s: Initialising fs0\n",__TASKNAME__);
if (task_exec(fs0, USER_AREA_START, USER_AREA_END, &ids) < 0)
BUG();
@@ -494,6 +426,7 @@ int start_boot_tasks(struct initdata *initdata)
printf("%s: Initialising new boot task.\n", __TASKNAME__);
ids.tid = TASK_ID_INVALID;
ids.spid = TASK_ID_INVALID;
ids.tgid = TASK_ID_INVALID;
if (task_exec(file, USER_AREA_START, USER_AREA_END, &ids) < 0)
BUG();
total++;