Made changes to map devices dynamically upon irq registration.

All will be reverted since mapping devices statically is much simpler.
This commit is contained in:
Bahadir Balban
2009-12-11 19:02:10 +02:00
parent 54301e8026
commit 59af5d3794
16 changed files with 248 additions and 64 deletions

View File

@@ -141,5 +141,6 @@
#define ENOCAP 134 /* None or insufficient capability */
#define ENOUTCB 135 /* Task has no utcb set up */
#define ENOMAP 136 /* The memory area has unmapped regions */
#define ENOIRQ 137 /* Irq cannot be registered */
#endif /* __ERRNO_H__ */

View File

@@ -105,6 +105,6 @@ int cap_cap_check(struct ktcb *task, unsigned int req, unsigned int flags);
int cap_mutex_check(unsigned long mutex_address, int mutex_op);
int cap_irq_check(struct ktcb *registrant, unsigned int req,
unsigned int flags, l4id_t irq);
unsigned int flags, l4id_t irq, struct capability **cap);
#endif /* __GENERIC_CAPABILITY_H__ */

View File

@@ -40,11 +40,17 @@ struct irq_desc {
struct irq_chip *chip;
/* Thread registered for this irq */
struct ktcb *irq_thread;
struct ktcb *task;
/* Notification slot for this irq */
int task_notify_slot;
/* Device virtual address */
unsigned long device_virtual;
/* Device capability */
struct capability *devcap;
/* NOTE: This could be a list for multiple handlers for shared irqs */
irq_handler_t handler;
};
@@ -68,8 +74,8 @@ static inline void irq_disable(int irq_index)
this_chip->ops.ack_and_mask(irq_index - this_chip->start);
}
int irq_register(struct ktcb *task, int notify_slot,
l4id_t irq_index, irq_handler_t handler);
void irq_generic_map_device(struct irq_desc *desc);
int irq_register(struct ktcb *task, int notify_slot, l4id_t irq_index);
void do_irq(void);
void irq_controllers_init(void);

View File

@@ -58,7 +58,7 @@
* Complicated for you? Suggest a simpler design and it shall be implemented!
*/
#define MR_REST ((UTCB_SIZE >> 2) - MR_TOTAL - 2) /* -2 is for fields on utcb */
#define MR_REST ((UTCB_SIZE >> 2) - MR_TOTAL - 4) /* -4 is for fields on utcb */
#define MR_TOTAL 6
#define MR_TAG 0 /* Contains the purpose of message */
#define MR_SENDER 1 /* For anythread receivers to discover sender */
@@ -83,6 +83,7 @@ struct utcb {
u32 mr[MR_TOTAL]; /* MRs that are mapped to real registers */
u32 saved_tag; /* Saved tag field for stacked ipcs */
u32 saved_sender; /* Saved sender field for stacked ipcs */
u8 notify_slot[8]; /* Irq notification slots */
u32 mr_rest[MR_REST]; /* Complete the utcb for up to 64 words */
};
#endif

24
include/l4/lib/addr.h Normal file
View File

@@ -0,0 +1,24 @@
/*
* Address allocation pool
*
* Copyright (C) 2007 - 2009 Bahadir Balban
*/
#ifndef __KERNEL_ADDR_H__
#define __KERNEL_ADDR_H__
#include <lib/idpool.h>
/* Address pool to allocate from a range of addresses */
struct address_pool {
struct id_pool idpool;
unsigned long start;
unsigned long end;
};
void *kernel_new_address(int npages);
int kernel_delete_address(void *addr, int npages);
void *address_new(struct address_pool *pool, int npages);
int address_del(struct address_pool *, void *addr, int npages);
#endif /* __KERNEL_ADDR_H__ */

View File

@@ -40,9 +40,9 @@ static inline void spin_unlock(struct spinlock *s)
* on other cpus.
*/
static inline void spin_lock_irq(struct spinlock *s,
unsigned long state)
unsigned long *state)
{
irq_local_disable_save(&state);
irq_local_disable_save(state);
#if defined(CONFIG_SMP)
__spin_lock(&s->lock);
#endif

View File

@@ -13,8 +13,9 @@ struct waitqueue {
#define WAKEUP_ASYNC 0
enum wakeup_flags {
WAKEUP_INTERRUPT = (1 << 0),
WAKEUP_SYNC = (1 << 1)
WAKEUP_INTERRUPT = (1 << 0), /* Set interrupt flag for task */
WAKEUP_SYNC = (1 << 1), /* Wake it up synchronously */
WAKEUP_IRQ = (1 << 2) /* Disable irqs on spinlocks */
};
#define CREATE_WAITQUEUE_ON_STACK(wq, tsk) \

View File

@@ -41,8 +41,8 @@
#define PB926_UART0_VOFFSET 0x00001000
#define PB926_VIC_VOFFSET 0x00002000
#define PB926_SIC_VOFFSET 0x00003000
#define PB926_SYSREGS_VOFFSET 0x00005000
#define PB926_SYSCTRL_VOFFSET 0x00006000
#define PB926_SYSREGS_VOFFSET 0x00004000
#define PB926_SYSCTRL_VOFFSET 0x00005000
#define PLATFORM_CONSOLE_VIRTUAL (IO_AREA0_VADDR + PB926_UART0_VOFFSET)
#define PLATFORM_TIMER0_VIRTUAL (IO_AREA0_VADDR + PB926_TIMER01_VOFFSET)
@@ -50,6 +50,8 @@
#define PLATFORM_IRQCTRL0_VIRTUAL (IO_AREA0_VADDR + PB926_VIC_VOFFSET)
#define PLATFORM_IRQCTRL1_VIRTUAL (IO_AREA0_VADDR + PB926_SIC_VOFFSET)
/* Add userspace devices here as they become necessary for irqs */
#define PLATFORM_TIMER1_VIRTUAL (IO_AREA0_VADDR + PB926_TIMER23_VOFFSET)
#endif /* __PLATFORM_PB926_OFFSETS_H__ */

View File

@@ -367,6 +367,16 @@ int cap_destroy(struct capability *cap)
if (!(cap_generic_perms(orig) & CAP_CHANGEABLE))
return -ENOCAP;
/*
* Check that it is not a device.
*
* We don't allow devices for now. To do this
* correctly, we need to check if device irq
* is not currently registered.
*/
if (cap_is_devmem(orig))
return -ENOCAP;
cap_list_remove(orig, clist);
free_capability(orig);
return 0;

View File

@@ -13,10 +13,9 @@
#include INC_GLUE(message.h)
#include <l4/lib/wait.h>
#if 0
/*
* Default function that handles userspace
* threaded irqs. Increases notification count and wakes
* threaded irqs. Increases irq count and wakes
* up any waiters.
*
* The increment is a standard read/update/write, and
@@ -36,36 +35,32 @@
*
* FIXME: Instead of UTCB, do it by incrementing a semaphore.
*/
int thread_notify_default(struct irq_desc *desc)
int irq_thread_notify(struct irq_desc *desc)
{
struct utcb *utcb;
int err;
/* Make sure irq thread's utcb is mapped */
if ((err = tcb_check_and_lazy_map_utcb(desc->irq_thread,
if ((err = tcb_check_and_lazy_map_utcb(desc->task,
0)) < 0) {
printk("%s: Irq occured but registered task's utcb "
"is inaccessible. task id=0x%x err=%d\n"
"is inaccessible without a page fault. "
"task id=0x%x err=%d\n"
"Destroying task.", __FUNCTION__,
desc->irq_thread->tid, err);
thread_destroy(desc->irq_thread);
desc->task->tid, err);
thread_destroy(desc->task);
/* FIXME: Deregister and disable irq as well */
}
/* Get thread's utcb */
utcb = (struct utcb *)desc->irq_thread->utcb_address;
utcb = (struct utcb *)desc->task->utcb_address;
/* Atomic increment (See above comments) with no wraparound */
if (utcb->notify[desc->task_notify_slot] != TASK_NOTIFY_MAX)
utcb->notify[desc->task_notify_slot]++;
/*
* Wake up any waiters
*
* NOTE: There's no contention on this queue, if there was,
* we would have to have spin_lock_irq()'s on the wakeup
*/
wake_up(&desc->irq_thread->wqh_notify, WAKEUP_ASYNC);
/* Async wake up any waiter irq threads */
wake_up(&desc->task->wqh_notify, WAKEUP_ASYNC | WAKEUP_IRQ);
return 0;
}
@@ -74,7 +69,9 @@ int thread_notify_default(struct irq_desc *desc)
* Register the given globally unique irq number with the
* current thread with given flags
*/
int irq_control_register(struct ktcb *task, int notify_slot, l4id_t irqnum)
int irq_control_register(struct ktcb *task, int slot, l4id_t irqnum,
unsigned long device_virtual,
struct capability *devcap)
{
int err;
@@ -89,9 +86,8 @@ int irq_control_register(struct ktcb *task, int notify_slot, l4id_t irqnum)
if ((err = tcb_check_and_lazy_map_utcb(current, 1)) < 0)
return err;
/* Register the irq and thread notification handler */
if ((err = irq_register(current, notify_slot, irqnum,
thread_notify_default)) < 0)
/* Register the irq for thread notification */
if ((err = irq_register(current, slot, irqnum, devcap)) < 0)
return err;
return 0;
@@ -101,31 +97,24 @@ int irq_control_register(struct ktcb *task, int notify_slot, l4id_t irqnum)
* Register/deregister device irqs. Optional synchronous and
* asynchronous irq handling.
*/
int sys_irq_control(unsigned int req, int slot, unsigned int flags, l4id_t irqno)
int sys_irq_control(unsigned int req, unsigned int flags, l4id_t irqno)
{
/* Currently a task is allowed to register only for itself */
struct ktcb *task = current;
struct capability *devcap;
int err;
if ((err = cap_irq_check(task, req, flags, irqno)) < 0)
if ((err = cap_irq_check(task, req, flags, irqno, &devcap)) < 0)
return err;
switch (req) {
case IRQ_CONTROL_REGISTER:
irq_control_register(task, flags, irqno);
if ((err = irq_control_register(task, slot, flags,
irqno, devcap)) < 0)
return err;
default:
return -EINVAL;
}
return 0;
}
#endif
/*
* Register/deregister device irqs. Optional synchronous and
* asynchronous irq handling.
*/
int sys_irq_control(unsigned int req, int slot, unsigned int flags, l4id_t irqno)
{
return 0;
}

View File

@@ -877,7 +877,8 @@ int cap_thread_check(struct ktcb *task,
int cap_irq_check(struct ktcb *registrant, unsigned int req,
unsigned int flags, l4id_t irq)
unsigned int flags, l4id_t irq,
struct capability **device_cap)
{
struct sys_irqctrl_args args = {
.registrant = registrant,
@@ -895,8 +896,8 @@ int cap_irq_check(struct ktcb *registrant, unsigned int req,
* Find the device capability and
* check that it allows irq registration
*/
if (!(cap_find(current, cap_match_devmem,
&args, CAP_TYPE_MAP_PHYSMEM)))
if (!(*device_cap = cap_find(current, cap_match_devmem,
&args, CAP_TYPE_MAP_PHYSMEM)))
return -ENOCAP;
return 0;
@@ -939,7 +940,7 @@ int cap_thread_check(struct ktcb *task,
}
int cap_irq_check(struct ktcb *registrant, unsigned int req,
unsigned int flags, l4id_t irq)
unsigned int flags, l4id_t irq, struct capability **cap)
{
return 0;
}

View File

@@ -1,6 +1,5 @@
/*
* Kernel irq handling (core irqs like timer).
* Also thread-level irq handling.
* Generic kernel irq handling.
*
* Copyright (C) 2007 - 2009 Bahadir Balban
*/
@@ -12,31 +11,76 @@
#include <l4/generic/irq.h>
#include <l4/lib/mutex.h>
#include <l4/lib/printk.h>
#include <l4/api/errno.h>
#include INC_PLAT(irq.h)
#include INC_ARCH(exception.h)
/*
* Checks that a device was validly registered for the irq,
* and lazily maps it to currently interrupted process.
*/
void irq_generic_map_device(struct irq_desc *desc)
{
/*
* Check that irq is registered with a
* valid device capability and virtual address
*/
if (!desc->devcap || !KERN_ADDR(devcap->device_virtual)) {
printk("Spurious irq. %s irq occured but "
"no device capability or valid virtual device "
"address associated with the irq.\n",
desc->name);
BUG();
}
/* Check and lazy map device */
if (check_access(desc->device_virtual,
desc->devcap->end - desc->devcap->start,
MAP_SVC_RW_FLAGS, 0) < 0) {
add_mapping(__pfn_to_addr(devcap->start),
desc->device_virtual, MAP_SVC_RW_FLAGS,
desc->devcap->end - desc->devcap->start);
}
}
/*
* Registers a userspace thread as an irq handler.
*
* A userspace irq thread should have a low-level, device-specific
* irq handler as an in-kernel counterpart. This and its irq chip
* must have been set up at compile-time. These handlers should
* also know how to notify their userspace threads.
*
* If the irq does not have these set up, we cannot allow
* the irq registry.
*/
int irq_register(struct ktcb *task, int notify_slot,
l4id_t irq_index, irq_handler_t handler)
l4id_t irq_index, struct capability *device)
{
struct irq_desc *this_desc = irq_desc_array + irq_index;
struct irq_chip *current_chip = irq_chip_array;
for (int i = 0; i < IRQ_CHIPS_MAX; i++) {
if (irq_index >= current_chip->start &&
irq_index < current_chip->end) {
this_desc->chip = current_chip;
break;
}
}
/* Setup the handler */
this_desc->handler = handler;
/* Kernel counterpart not set up, don't allow */
if (!this_desc->handler || !this_desc->chip)
return -ENOIRQ;
/* Setup the slot to notify the task */
/* Setup the task and notify slot */
this_desc->task = task;
this_desc->task_notify_slot = notify_slot;
/*
* Setup capability and allocate virtual kernel address.
*
* This is required so that the irq handler may reach
* the device from the kernel at any runnable process.
*/
this_desc->devcap = device;
if (!(this_desc->device_virtual =
kernel_new_address(device->end - device->start)))
return -ENOMEM;
/* Enable the irq */
irq_enable(irq_index);
return 0;
}

View File

@@ -416,6 +416,19 @@ int free_boot_memory(struct kernel_resources *kres)
return 0;
}
void kernel_address_pool_init(struct kernel_resources *kres)
{
/* Initialize id pool spinlock */
spin_lock_init(&kres->kernel_address_pool.idpool.lock);
/* Initialize id pool number of words */
kres->kernel_address_pool.idpool.nwords = SYSTEM_IDS_MAX;
/* Initialize address pool start and end ranges */
kres->kernel_address_pool.start = page_align_up(_end);
kres->kernel_address_pool.end = KERNEL_AREA_END;
}
/*
* Initializes kernel caplists, and sets up total of physical
* and virtual memory as single capabilities of the kernel.
@@ -435,6 +448,9 @@ void init_kernel_resources(struct kernel_resources *kres)
kres->mutex_ids.nwords = SYSTEM_IDS_MAX;
kres->capability_ids.nwords = SYSTEM_IDS_MAX;
/* Initialize kernel's virtual address pool */
kernel_address_pool_init(kres);
/* Initialize container head */
container_head_init(&kres->containers);

48
src/lib/addr.c Normal file
View File

@@ -0,0 +1,48 @@
/*
* This module allocates an unused address range from
* a given memory region defined as the pool range.
*
* Copyright (C) 2007 - 2009 Bahadir Balban
*/
#include <lib/bit.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include <lib/addr.h>
#include <stdio.h>
extern struct kernel_resources kres;
void *address_new(struct address_pool *pool, int npages)
{
unsigned int pfn;
if ((int)(pfn = ids_new_contiguous(pool->idpool, npages)) < 0)
return 0;
return (void *)__pfn_to_addr(pfn) + pool->start;
}
int address_del(struct address_pool *pool, void *addr, int npages)
{
unsigned long pfn = __pfn(page_align(addr) - pool->start);
if (ids_del_contiguous(pool->idpool, pfn, npages) < 0) {
printf("%s: Invalid address range returned to "
"virtual address pool.\n", __FUNCTION__);
return -1;
}
return 0;
}
void *kernel_new_address(int npages)
{
return address_new(&kres->kernel_address_pool, npages);
}
int kernel_delete_address(void *addr, int npages)
{
address_del(&kres->kernel_address_pool, addr, npages);
}

View File

@@ -139,8 +139,15 @@ void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
/* Wake up single waiter */
void wake_up(struct waitqueue_head *wqh, unsigned int flags)
{
unsigned int irqflags;
BUG_ON(wqh->sleepers < 0);
spin_lock(&wqh->slock);
/* Irq version */
if (flags & WAKEUP_IRQ)
spin_lock_irq(&wqh->lock, &irqflags);
else
spin_lock(&wqh->slock);
if (wqh->sleepers > 0) {
struct waitqueue *wq = link_to_struct(wqh->task_list.next,
struct waitqueue,
@@ -153,7 +160,10 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
if (flags & WAKEUP_INTERRUPT)
sleeper->flags |= TASK_INTERRUPTED;
//printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
spin_unlock(&wqh->slock);
if (flags & WAKEUP_IRQ)
spin_unlock_irqrestore(&wqh->slock, irqflags);
else
spin_unlock(&wqh->slock);
if (flags & WAKEUP_SYNC)
sched_resume_sync(sleeper);
@@ -161,7 +171,10 @@ void wake_up(struct waitqueue_head *wqh, unsigned int flags)
sched_resume_async(sleeper);
return;
}
spin_unlock(&wqh->slock);
if (flags & WAKEUP_IRQ)
spin_unlock_irqrestore(&wqh->slock, irqflags);
else
spin_unlock(&wqh->slock);
}
/*
@@ -174,6 +187,9 @@ int wake_up_task(struct ktcb *task, unsigned int flags)
struct waitqueue_head *wqh;
struct waitqueue *wq;
/* Not yet handled. need spin_lock_irqs */
BUG_ON(flags & WAKEUP_IRQ);
spin_lock(&task->waitlock);
if (!task->waiting_on) {
spin_unlock(&task->waitlock);

View File

@@ -51,6 +51,24 @@ static int platform_timer_handler(struct irq_desc *desc)
return do_timer_irq();
}
/*
* Timer handler for userspace
*/
static int platform_user_timer_irq_handler(struct irq_desc *desc)
{
/* Lazily map the device to process kernel tables */
irq_generic_map_device(desc);
/* Ack the device irq */
sp804_irq_handler(desc->device_virtual);
/* Notify the userspace */
irq_thread_notify(desc);
return 0;
}
/*
* Built-in irq handlers initialised at compile time.
* Else register with register_irq()
@@ -61,5 +79,12 @@ struct irq_desc irq_desc_array[IRQS_MAX] = {
.chip = &irq_chip_array[0],
.handler = platform_timer_handler,
},
[IRQ_TIMER1] = {
.name = "Timer1",
.chip = &irq_chip_array[0],
.handler = platform_user_timer_handler
};