mirror of
https://github.com/drasko/codezero.git
synced 2026-01-21 07:13:15 +01:00
Changes between 16 March 2010 - 6 April 2010
Mutex system call fixed for multiple contenders Userspace irq support extended to keyboard/mouse. Scheduler modified for real-time irq tasks
This commit is contained in:
@@ -91,6 +91,9 @@ int irq_control_register(struct ktcb *task, int slot, l4id_t irqnum)
|
||||
if ((err = irq_register(current, slot, irqnum)) < 0)
|
||||
return err;
|
||||
|
||||
/* Make thread a real-time task */
|
||||
current->flags |= TASK_REALTIME;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -111,16 +114,6 @@ int irq_wait(l4id_t irq_index)
|
||||
if ((ret = tcb_check_and_lazy_map_utcb(current, 1)) < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* In case user has asked for unmasking the irq only after
|
||||
* user hanlder is done, unmask the irq
|
||||
*
|
||||
* FIXME: This is not the correct place for this call,
|
||||
* fix this.
|
||||
*/
|
||||
if (desc->user_ack)
|
||||
irq_enable(irq_index);
|
||||
|
||||
/* Wait until the irq changes slot value */
|
||||
WAIT_EVENT(&desc->wqh_irq,
|
||||
utcb->notify[desc->task_notify_slot] != 0,
|
||||
|
||||
191
src/api/mutex.c
191
src/api/mutex.c
@@ -102,16 +102,56 @@ void mutex_control_delete(struct mutex_queue *mq)
|
||||
}
|
||||
|
||||
/*
|
||||
* A contended thread is expected to show up with the
|
||||
* contended mutex address here.
|
||||
* Here's how this whole mutex implementation works:
|
||||
*
|
||||
* (1) The mutex is converted into its physical form and
|
||||
* searched for in the existing mutex list. If it does not
|
||||
* appear there, it gets added.
|
||||
* (2) The thread is put to sleep in the mutex wait queue
|
||||
* until a wake up event occurs. If there is already an asleep
|
||||
* lock holder (i.e. unlocker) that is woken up and we return.
|
||||
* A thread who locked a user mutex learns how many
|
||||
* contentions were on it as it unlocks it. It is obliged to
|
||||
* go to the kernel to wake that many threads up.
|
||||
*
|
||||
* Each contender sleeps in the kernel, but the time
|
||||
* of arrival in the kernel by both the unlocker or
|
||||
* contenders is asynchronous.
|
||||
*
|
||||
* Mutex queue scenarios at any one time:
|
||||
*
|
||||
* 1) There may be multiple contenders waiting for
|
||||
* an earlier lock holder:
|
||||
*
|
||||
* Lock holders waitqueue: Empty
|
||||
* Contenders waitqueue: C - C - C - C
|
||||
* Contenders to wake up: 0
|
||||
*
|
||||
* The lock holder would wake up that many contenders that it counted
|
||||
* earlier in userspace as it released the lock.
|
||||
*
|
||||
* 2) There may be one lock holder waiting for contenders to arrive:
|
||||
*
|
||||
* Lock holders waitqueue: LH
|
||||
* Contenders waitqueue: Empty
|
||||
* Contenders to wake up: 5
|
||||
*
|
||||
* As each contender comes in, the contenders value is reduced, and
|
||||
* when it becomes zero, the lock holder is woken up and mutex
|
||||
* deleted.
|
||||
*
|
||||
* 3) Occasionally multiple lock holders who just released the lock
|
||||
* make it to the kernel before any contenders:
|
||||
*
|
||||
* Contenders: Empty
|
||||
* Lock holders: LH
|
||||
* Contenders to wake up: 5
|
||||
*
|
||||
* -> New Lock holder arrives.
|
||||
*
|
||||
* As soon as the above occurs, the new LH wakes up the waiting one,
|
||||
* increments the contenders by its own contender count and starts
|
||||
* waiting. The scenario transitions to Scenario (2) in this case.
|
||||
*
|
||||
* The asynchronous nature of contender and lock holder arrivals make
|
||||
* for many possibilities, but what matters is the same number of
|
||||
* wake ups must occur as the number of contended waits.
|
||||
*/
|
||||
|
||||
int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_address)
|
||||
{
|
||||
@@ -128,24 +168,27 @@ int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
}
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mqhead, mutex_queue);
|
||||
} else {
|
||||
/* See if there is a lock holder */
|
||||
if (mutex_queue->wqh_holders.sleepers) {
|
||||
/*
|
||||
* If yes, wake it up async and we can *hope*
|
||||
* to acquire the lock before the lock holder
|
||||
*/
|
||||
|
||||
} else if (mutex_queue->wqh_holders.sleepers) {
|
||||
/*
|
||||
* There's a lock holder, so we can consume from
|
||||
* number of contenders since we are one of them.
|
||||
*/
|
||||
mutex_queue->contenders--;
|
||||
|
||||
/* No contenders left as far as current holder is concerned */
|
||||
if (mutex_queue->contenders == 0) {
|
||||
/* Wake up current holder */
|
||||
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
/* There must not be any contenders, delete the mutex */
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Prepare to wait on the contenders queue */
|
||||
@@ -160,22 +203,8 @@ int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
|
||||
/*
|
||||
* A thread that has detected a contention on a mutex that
|
||||
* it had locked but has just released is expected to show up with
|
||||
* that mutex here.
|
||||
*
|
||||
* (1) The mutex is converted into its physical form and
|
||||
* searched for in the existing mutex list. If not found,
|
||||
* a new one is created and the thread sleeps there as a lock
|
||||
* holder.
|
||||
* (2) All the threads waiting on this mutex are woken up. This may
|
||||
* cause a thundering herd, but user threads cannot be trusted
|
||||
* to acquire the mutex, waking up all of them increases the
|
||||
* chances that some thread may acquire it.
|
||||
*/
|
||||
int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_address)
|
||||
unsigned long mutex_address, int contenders)
|
||||
{
|
||||
struct mutex_queue *mutex_queue;
|
||||
|
||||
@@ -190,6 +219,9 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Set new or increment the contenders value */
|
||||
mutex_queue->contenders = contenders;
|
||||
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mqhead, mutex_queue);
|
||||
|
||||
@@ -206,51 +238,67 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
|
||||
/* Set new or increment the contenders value */
|
||||
mutex_queue->contenders += contenders;
|
||||
|
||||
/* Wake up holders if any, and take wake up responsibility */
|
||||
if (mutex_queue->wqh_holders.sleepers)
|
||||
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
||||
|
||||
/*
|
||||
* Note, the mutex in userspace was left free before the
|
||||
* syscall was entered.
|
||||
*
|
||||
* It is possible that a thread has acquired it, another
|
||||
* contended on it and the holder made it to the kernel
|
||||
* quicker than us. We detect this situation here.
|
||||
* Now wake up as many contenders as possible, otherwise
|
||||
* go to sleep on holders queue
|
||||
*/
|
||||
if (mutex_queue->wqh_holders.sleepers) {
|
||||
/*
|
||||
* Let the first holder do all the waking up
|
||||
*/
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
while (mutex_queue->contenders &&
|
||||
mutex_queue->wqh_contenders.sleepers) {
|
||||
/* Reduce total contenders to be woken up */
|
||||
mutex_queue->contenders--;
|
||||
|
||||
/* Wake up a contender who made it to kernel */
|
||||
wake_up(&mutex_queue->wqh_contenders, WAKEUP_ASYNC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Found it, if it exists, there are contenders,
|
||||
* now wake all of them up in FIFO order.
|
||||
* FIXME: Make sure this is FIFO order. It doesn't seem so.
|
||||
* Are we done with all? Leave.
|
||||
*
|
||||
* Not enough contenders? Go to sleep and wait for a new
|
||||
* contender rendezvous.
|
||||
*/
|
||||
wake_up_all(&mutex_queue->wqh_contenders, WAKEUP_ASYNC);
|
||||
if (mutex_queue->contenders == 0) {
|
||||
/* Delete only if no more contenders */
|
||||
if (mutex_queue->wqh_contenders.sleepers == 0) {
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
}
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
} else {
|
||||
/* Prepare to wait on the lock holders queue */
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
|
||||
/* Prepare to wait */
|
||||
wait_on_prepare(&mutex_queue->wqh_holders, &wq);
|
||||
|
||||
/* Release lock first */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
/* Initiate prepared wait */
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
int sys_mutex_control(unsigned long mutex_address, int mutex_flags)
|
||||
{
|
||||
unsigned long mutex_physical;
|
||||
int ret = 0;
|
||||
int mutex_op = mutex_operation(mutex_flags);
|
||||
int contenders = mutex_contenders(mutex_flags);
|
||||
int ret;
|
||||
|
||||
// printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
|
||||
|
||||
/* Check valid operation */
|
||||
if (mutex_op != MUTEX_CONTROL_LOCK &&
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK) {
|
||||
printk("Invalid args to %s.\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
//printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
|
||||
|
||||
/* Check valid user virtual address */
|
||||
if (KERN_ADDR(mutex_address)) {
|
||||
@@ -258,6 +306,10 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mutex_op != MUTEX_CONTROL_LOCK &&
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK)
|
||||
return -EPERM;
|
||||
|
||||
if ((ret = cap_mutex_check(mutex_address, mutex_op)) < 0)
|
||||
return ret;
|
||||
|
||||
@@ -278,11 +330,8 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
break;
|
||||
case MUTEX_CONTROL_UNLOCK:
|
||||
ret = mutex_control_unlock(&curcont->mutex_queue_head,
|
||||
mutex_physical);
|
||||
mutex_physical, contenders);
|
||||
break;
|
||||
default:
|
||||
printk("%s: Invalid operands\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -494,10 +494,22 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
|
||||
MAP_USR_RW, 1)) < 0)
|
||||
return err;
|
||||
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE)
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) {
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
/*
|
||||
* Tasks may only operate on their children. They may
|
||||
* also destroy themselves or any children.
|
||||
*/
|
||||
if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY &&
|
||||
!task_is_child(task) && task != current)
|
||||
return -EPERM;
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY
|
||||
&& !task_is_child(task))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if ((err = cap_thread_check(task, flags, ids)) < 0)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -325,7 +325,7 @@ extern int current_irq_nest_count;
|
||||
*/
|
||||
void irq_overnest_error(void)
|
||||
{
|
||||
dprintk("Irqs nested beyond limit. Current count: ",
|
||||
printk("Irqs nested beyond limit. Current count: %d",
|
||||
current_irq_nest_count);
|
||||
print_early("System halted...\n");
|
||||
while(1)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c']
|
||||
src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c', 'atomic.S']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -58,59 +58,59 @@ int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
|
||||
|
||||
/* Aborts that can't be handled by a pager yet: */
|
||||
case DABT_TERMINAL:
|
||||
dprintk("Terminal fault dabt %x", far);
|
||||
dprintk("Terminal fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_VECTOR:
|
||||
dprintk("Vector abort (obsolete!) %x", far);
|
||||
dprintk("Vector abort (obsolete!) ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_ALIGN:
|
||||
dprintk("Alignment fault dabt %x", far);
|
||||
dprintk("Alignment fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_XLATE_LEVEL1:
|
||||
dprintk("External LVL1 translation fault %x", far);
|
||||
dprintk("External LVL1 translation fault ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_XLATE_LEVEL2:
|
||||
dprintk("External LVL2 translation fault %x", far);
|
||||
dprintk("External LVL2 translation fault ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_DOMAIN_SECT:
|
||||
dprintk("Section domain fault dabt %x", far);
|
||||
dprintk("Section domain fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_DOMAIN_PAGE:
|
||||
dprintk("Page domain fault dabt %x", far);
|
||||
dprintk("Page domain fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_PERM_SECT:
|
||||
dprintk("Section permission fault dabt %x", far);
|
||||
dprintk("Section permission fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_LFETCH_SECT:
|
||||
dprintk("External section linefetch "
|
||||
"fault dabt %x", far);
|
||||
"fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_LFETCH_PAGE:
|
||||
dprintk("Page perm fault dabt %x", far);
|
||||
dprintk("Page perm fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_NON_LFETCH_SECT:
|
||||
dprintk("External section non-linefetch "
|
||||
"fault dabt %x ", far);
|
||||
"fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_NON_LFETCH_PAGE:
|
||||
dprintk("External page non-linefetch "
|
||||
"fault dabt %x ", far);
|
||||
"fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
default:
|
||||
dprintk("FATAL: Unrecognised/Unknown "
|
||||
"data abort %x ", far);
|
||||
"data abort ", far);
|
||||
dprintk("FATAL: FSR code: ", fsr);
|
||||
ret = -EABORT;
|
||||
}
|
||||
@@ -122,7 +122,7 @@ int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
|
||||
*/
|
||||
if (is_kernel_address(faulted_pc)) {
|
||||
dprintk("Unhandled kernel data "
|
||||
"abort at address %x",
|
||||
"abort at address ",
|
||||
faulted_pc);
|
||||
ret = -EABORT;
|
||||
}
|
||||
|
||||
@@ -30,33 +30,6 @@ void irq_local_restore(unsigned long state)
|
||||
);
|
||||
}
|
||||
|
||||
u8 l4_atomic_dest_readb(unsigned long *location)
|
||||
{
|
||||
#if 0
|
||||
unsigned int tmp;
|
||||
__asm__ __volatile__ (
|
||||
"swpb r0, r2, [r1] \n"
|
||||
: "=r"(tmp)
|
||||
: "r"(location), "r"(0)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return (u8)tmp;
|
||||
#endif
|
||||
|
||||
unsigned int tmp;
|
||||
unsigned long state;
|
||||
irq_local_disable_save(&state);
|
||||
|
||||
tmp = *location;
|
||||
*location = 0;
|
||||
|
||||
irq_local_restore(state);
|
||||
|
||||
return (u8)tmp;
|
||||
|
||||
}
|
||||
|
||||
int irqs_enabled(void)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
@@ -372,9 +372,9 @@ void arch_space_switch(struct ktcb *to)
|
||||
void idle_task(void)
|
||||
{
|
||||
while(1) {
|
||||
/* Do maintenance */
|
||||
tcb_delete_zombies();
|
||||
|
||||
// printk("Idle task.\n");
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_ARCH(asm-macros.S)
|
||||
|
||||
.balign 4096
|
||||
.section .data.vectors
|
||||
__vector_vaddr:
|
||||
|
||||
@@ -896,5 +895,4 @@ __irq_stack: .space 128
|
||||
__fiq_stack: .space 128
|
||||
__und_stack: .space 128
|
||||
|
||||
.balign 4096
|
||||
|
||||
|
||||
@@ -1,25 +1,30 @@
|
||||
/*
|
||||
* PLXXX Generic Interrupt Controller support.
|
||||
* Generic Interrupt Controller support.
|
||||
*
|
||||
* This is more ARM Realview EB/PB
|
||||
* Copyright (C) 2009-2010 B Labs Ltd.
|
||||
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
|
||||
*
|
||||
* Authors: Prem Mallappa, Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_SUBARCH(mmu_ops.h) /* for dmb/dsb() */
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include <l4/generic/smp.h>
|
||||
|
||||
#define GIC_ACK_IRQ_MASK 0x1FF
|
||||
#define GIC_ACK_CPU_MASK 0xE00
|
||||
#define GIC_IRQ_SPURIOUS 0x3FF
|
||||
|
||||
volatile struct gic_data gic_data[IRQ_CHIPS_MAX];
|
||||
|
||||
static inline struct gic_data *get_gic_data(l4id_t irq)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_array[irq].chip;
|
||||
volatile struct irq_chip *chip = irq_desc_array[irq].chip;
|
||||
|
||||
if (chip)
|
||||
return (struct gic_data *)irq_desc_array[irq].chip->data;
|
||||
return (struct gic_data *)chip->data;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@@ -27,115 +32,129 @@ static inline struct gic_data *get_gic_data(l4id_t irq)
|
||||
/* Returns the irq number on this chip converting the irq bitvector */
|
||||
l4id_t gic_read_irq(void *data)
|
||||
{
|
||||
int irq;
|
||||
volatile struct gic_data *gic = (struct gic_data *)data;
|
||||
irq = gic->cpu->ack & 0x1ff;
|
||||
l4id_t irq = gic->cpu->ack;
|
||||
|
||||
if (irq == 1023)
|
||||
return -1023; /* Spurious */
|
||||
/* This is an IPI - EOI it here, since it requires cpu field */
|
||||
if ((irq & GIC_ACK_IRQ_MASK) < 16) {
|
||||
gic_eoi_irq(irq);
|
||||
/* Get the actual irq number */
|
||||
irq &= GIC_ACK_IRQ_MASK;
|
||||
}
|
||||
|
||||
/* Detect GIC spurious magic value and return generic one */
|
||||
if (irq == GIC_IRQ_SPURIOUS)
|
||||
return IRQ_SPURIOUS;
|
||||
return irq;
|
||||
}
|
||||
|
||||
void gic_mask_irq(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5; /* irq / 32 */
|
||||
|
||||
gic->dist->clr_en[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
void gic_unmask_irq(l4id_t irq)
|
||||
{
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5 ; /* irq / 32 */
|
||||
|
||||
u32 offset = irq >> 5 ; /* offset = irq / 32 */
|
||||
gic->dist->set_en[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
void gic_ack_irq(l4id_t irq)
|
||||
void gic_eoi_irq(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
gic->dist->clr_en[offset] = 1 << (irq % 32);
|
||||
/* Careful, irq may have cpu field encoded */
|
||||
volatile struct gic_data *gic =
|
||||
get_gic_data(irq & GIC_ACK_IRQ_MASK);
|
||||
|
||||
gic->cpu->eoi = irq;
|
||||
}
|
||||
|
||||
void gic_ack_and_mask(l4id_t irq)
|
||||
{
|
||||
gic_ack_irq(irq);
|
||||
//printk("disable/eoi irq %d\n", irq);
|
||||
gic_mask_irq(irq);
|
||||
gic_eoi_irq(irq);
|
||||
}
|
||||
|
||||
void gic_set_pending(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5; /* irq / 32 */
|
||||
gic->dist->set_pending[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
void gic_clear_pending(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5; /* irq / 32 */
|
||||
|
||||
gic->dist->clr_pending[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
|
||||
void gic_cpu_init(int idx, unsigned long base)
|
||||
{
|
||||
struct gic_cpu *cpu;
|
||||
cpu = gic_data[idx].cpu = (struct gic_cpu *)base;
|
||||
volatile struct gic_cpu *cpu;
|
||||
|
||||
gic_data[idx].cpu = (struct gic_cpu *)base;
|
||||
|
||||
cpu = gic_data[idx].cpu;
|
||||
|
||||
/* Disable */
|
||||
cpu->control = 0;
|
||||
|
||||
/* Set */
|
||||
cpu->prio_mask = 0xf0;
|
||||
cpu->bin_point = 3;
|
||||
|
||||
/* Enable */
|
||||
cpu->control = 1;
|
||||
}
|
||||
|
||||
void gic_dist_init(int idx, unsigned long base)
|
||||
{
|
||||
int i, irqs_per_word; /* Interrupts per word */
|
||||
struct gic_dist *dist;
|
||||
dist = gic_data[idx].dist = (struct gic_dist *)(base);
|
||||
volatile struct gic_dist *dist;
|
||||
int irqs_per_word;
|
||||
int nirqs;
|
||||
|
||||
/* Surely disable GIC */
|
||||
gic_data[idx].dist = (struct gic_dist *)(base);
|
||||
|
||||
dist = gic_data[idx].dist;
|
||||
|
||||
/* Disable gic */
|
||||
dist->control = 0;
|
||||
|
||||
/* 32*(N+1) interrupts supported */
|
||||
int nirqs = 32 * ((dist->type & 0x1f) + 1);
|
||||
nirqs = 32 * ((dist->type & 0x1f) + 1);
|
||||
if (nirqs > IRQS_MAX)
|
||||
nirqs = IRQS_MAX;
|
||||
|
||||
/* Clear all interrupts */
|
||||
/* Disable all interrupts */
|
||||
irqs_per_word = 32;
|
||||
for(i = 0; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 0; i < nirqs; i += irqs_per_word)
|
||||
dist->clr_en[i/irqs_per_word] = 0xffffffff;
|
||||
}
|
||||
|
||||
/* Clear all pending interrupts */
|
||||
for(i = 0; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 0; i < nirqs; i += irqs_per_word)
|
||||
dist->clr_pending[i/irqs_per_word] = 0xffffffff;
|
||||
}
|
||||
|
||||
/* Set all irqs as normal priority, 8 bits per interrupt */
|
||||
irqs_per_word = 4;
|
||||
for(i = 32; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 32; i < nirqs; i += irqs_per_word)
|
||||
dist->priority[i/irqs_per_word] = 0xa0a0a0a0;
|
||||
}
|
||||
|
||||
/* Set all target to cpu0, 8 bits per interrupt */
|
||||
for(i = 32; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 32; i < nirqs; i += irqs_per_word)
|
||||
dist->target[i/irqs_per_word] = 0x01010101;
|
||||
}
|
||||
|
||||
/* Configure all to be level-sensitive, 2 bits per interrupt */
|
||||
irqs_per_word = 16;
|
||||
for(i = 32; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 32; i < nirqs; i += irqs_per_word)
|
||||
dist->config[i/irqs_per_word] = 0x00000000;
|
||||
}
|
||||
|
||||
/* Enable GIC Distributor */
|
||||
dist->control = 1;
|
||||
@@ -143,24 +162,28 @@ void gic_dist_init(int idx, unsigned long base)
|
||||
|
||||
|
||||
/* Some functions, may be helpful */
|
||||
void gic_set_target(u32 irq, u32 cpu)
|
||||
void gic_set_target(l4id_t irq, u32 cpu)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
cpu &= 0xF;
|
||||
irq &= 0xFF;
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 2; /* offset = irq / 4 */
|
||||
u32 offset = irq >> 2; /* irq / 4 */
|
||||
|
||||
if (cpu > 1) {
|
||||
printk("Setting irqs to reach multiple cpu targets requires a"
|
||||
"lock on the irq controller\n"
|
||||
"GIC is a racy hardware in this respect\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
gic->dist->target[offset] |= (cpu << ((irq % 4) * 8));
|
||||
}
|
||||
|
||||
u32 gic_get_target(u32 irq)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
unsigned int target;
|
||||
irq &= 0xFF;
|
||||
u32 offset = irq >> 2; /* offset = irq / 4 */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
target = gic->dist->target[offset];
|
||||
u32 offset = irq >> 2; /* irq / 4 */
|
||||
unsigned int target = gic->dist->target[offset];
|
||||
|
||||
BUG_ON(irq > 0xFF);
|
||||
target >>= ((irq % 4) * 8);
|
||||
|
||||
return target & 0xFF;
|
||||
@@ -168,54 +191,44 @@ u32 gic_get_target(u32 irq)
|
||||
|
||||
void gic_set_priority(u32 irq, u32 prio)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
prio &= 0xF;
|
||||
irq &= 0xFF;
|
||||
u32 offset = irq >> 3; /* offset = irq / 8 */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 3; /* irq / 8 */
|
||||
|
||||
BUG_ON(prio > 0xF);
|
||||
BUG_ON(irq > 0xFF);
|
||||
|
||||
/* target = cpu << ((irq % 4) * 4) */
|
||||
gic->dist->target[offset] |= (prio << (irq & 0x1C));
|
||||
}
|
||||
|
||||
u32 gic_get_priority(u32 irq)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
irq &= 0xFF;
|
||||
u32 offset = irq >> 3; /* offset = irq / 8 */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
return gic->dist->target[offset] & (irq & 0xFC);
|
||||
u32 offset = irq >> 3; /* offset = irq / 8 */
|
||||
u32 prio = gic->dist->target[offset] & (irq & 0xFC);
|
||||
|
||||
return prio;
|
||||
}
|
||||
|
||||
#define TO_MANY 0 /* to all specified in a CPU mask */
|
||||
#define TO_OTHERS 1 /* all but me */
|
||||
#define TO_SELF 2 /* just to the requesting CPU */
|
||||
#define IPI_CPU_SHIFT 16
|
||||
|
||||
#define CPU_MASK_BIT 16
|
||||
#define TYPE_MASK_BIT 24
|
||||
|
||||
void gic_send_ipi(int cpu, int ipi_cmd)
|
||||
void gic_send_ipi(int cpumask, int ipi_cmd)
|
||||
{
|
||||
/* if cpu is 0, then ipi is sent to self
|
||||
* if cpu has exactly 1 bit set, the ipi to just that core
|
||||
* if cpu has a mask, sent to all but current
|
||||
*/
|
||||
struct gic_dist *dist = gic_data[0].dist;
|
||||
|
||||
ipi_cmd &= 0xf;
|
||||
cpu &= 0xff;
|
||||
|
||||
dsb();
|
||||
|
||||
if (cpu == 0) /* Self */
|
||||
dist->soft_int = (TO_SELF << 24) | ipi_cmd;
|
||||
else if ((cpu & (cpu-1)) == 0) /* Exactly to one CPU */
|
||||
dist->soft_int = (TO_MANY << 24) | (cpu << 16) | ipi_cmd;
|
||||
else /* All but me */
|
||||
dist->soft_int = (TO_OTHERS << 24) | (cpu << 16) | ipi_cmd;
|
||||
volatile struct gic_dist *dist = gic_data[0].dist;
|
||||
unsigned int ipi_word = (cpumask << IPI_CPU_SHIFT) | ipi_cmd;
|
||||
|
||||
dist->soft_int = ipi_word;
|
||||
}
|
||||
|
||||
/* Make the generic code happy :) */
|
||||
void gic_print_cpu()
|
||||
{
|
||||
volatile struct gic_cpu *cpu = gic_data[0].cpu;
|
||||
|
||||
printk("GIC CPU%d highest pending: %d\n", smp_get_cpuid(), cpu->high_pending);
|
||||
printk("GIC CPU%d running: %d\n", smp_get_cpuid(), cpu->running);
|
||||
}
|
||||
|
||||
/* Make the generic code happy */
|
||||
void gic_dummy_init()
|
||||
{
|
||||
|
||||
|
||||
@@ -121,6 +121,7 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
/* Add the address space to container space list */
|
||||
address_space_add(task->space);
|
||||
|
||||
#if 0
|
||||
printk("%s: Mapping 0x%lx bytes (%lu pages) "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, pager->memsize,
|
||||
@@ -131,6 +132,58 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_RWX, TASK_PGD(task));
|
||||
#else
|
||||
/*
|
||||
* Map pager with appropriate section flags
|
||||
* We do page_align_down() to do a page alignment for
|
||||
* various kinds of sections, this automatically
|
||||
* takes care of the case where we have different kinds of
|
||||
* data lying on same page, eg: RX, RO etc.
|
||||
* Here one assumption made is, starting of first
|
||||
* RW section will be already page aligned, if this is
|
||||
* not true then we have to take special care of this.
|
||||
*/
|
||||
if(pager->rx_sections_end >= pager->rw_sections_start) {
|
||||
pager->rx_sections_end = page_align(pager->rx_sections_end);
|
||||
pager->rw_sections_start = page_align(pager->rw_sections_start);
|
||||
}
|
||||
|
||||
unsigned long size = 0;
|
||||
if((size = page_align_up(pager->rx_sections_end) -
|
||||
page_align_up(pager->rx_sections_start))) {
|
||||
add_mapping_pgd(page_align_up(pager->rx_sections_start -
|
||||
pager->start_vma +
|
||||
pager->start_lma),
|
||||
page_align_up(pager->rx_sections_start),
|
||||
size, MAP_USR_RX, TASK_PGD(task));
|
||||
|
||||
printk("%s: Mapping 0x%lx bytes as RX "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, size,
|
||||
page_align_up(pager->rx_sections_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
page_align_up(pager->rx_sections_start),
|
||||
cont->name);
|
||||
}
|
||||
|
||||
if((size = page_align_up(pager->rw_sections_end) -
|
||||
page_align_up(pager->rw_sections_start))) {
|
||||
add_mapping_pgd(page_align_up(pager->rw_sections_start -
|
||||
pager->start_vma +
|
||||
pager->start_lma),
|
||||
page_align_up(pager->rw_sections_start),
|
||||
size, MAP_USR_RW, TASK_PGD(task));
|
||||
|
||||
printk("%s: Mapping 0x%lx bytes as RW "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, size,
|
||||
page_align_up(pager->rw_sections_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
page_align_up(pager->rw_sections_start),
|
||||
cont->name);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Move capability list from dummy to task's space cap list */
|
||||
cap_list_move(&task->space->cap_list, ¤t->cap_list);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Generic kernel irq handling.
|
||||
*
|
||||
* Copyright (C) 2007 - 2009 Bahadir Balban
|
||||
* Copyright (C) 2007 - 2010 Bahadir Balban
|
||||
*/
|
||||
#include <l4/config.h>
|
||||
#include <l4/macros.h>
|
||||
@@ -127,10 +127,21 @@ l4id_t global_irq_index(void)
|
||||
return IRQ_NIL;
|
||||
}
|
||||
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
void do_irq(void)
|
||||
{
|
||||
l4id_t irq_index = global_irq_index();
|
||||
struct irq_desc *this_irq = irq_desc_array + irq_index;
|
||||
struct irq_desc *this_irq;
|
||||
|
||||
if (irq_index == IRQ_SPURIOUS) {
|
||||
printk("CPU%d: FATAL: Spurious irq\n", smp_get_cpuid());
|
||||
BUG();
|
||||
}
|
||||
|
||||
// printk("CPU%d: Received irq %d\n", smp_get_cpuid(), irq_index);
|
||||
|
||||
this_irq = irq_desc_array + irq_index;
|
||||
|
||||
system_account_irq();
|
||||
|
||||
@@ -148,16 +159,10 @@ void do_irq(void)
|
||||
/* Handle the irq */
|
||||
BUG_ON(!this_irq->handler);
|
||||
if (this_irq->handler(this_irq) != IRQ_HANDLED) {
|
||||
printk("Spurious or broken irq\n");
|
||||
printk("CPU%d: FATAL: Spurious or broken irq\n",
|
||||
smp_get_cpuid());
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not enable irq if user wants to do it explicitely
|
||||
*/
|
||||
if (!this_irq->user_ack)
|
||||
irq_enable(irq_index);
|
||||
irq_enable(irq_index);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -503,6 +503,10 @@ int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
|
||||
pager->start_lma = __pfn_to_addr(pinfo->pager_lma);
|
||||
pager->start_vma = __pfn_to_addr(pinfo->pager_vma);
|
||||
pager->memsize = __pfn_to_addr(pinfo->pager_size);
|
||||
pager->rw_sections_start = pinfo->rw_sections_start;
|
||||
pager->rw_sections_end = pinfo->rw_sections_end;
|
||||
pager->rx_sections_start = pinfo->rx_sections_start;
|
||||
pager->rx_sections_end = pinfo->rx_sections_end;
|
||||
|
||||
/* Copy all cinfo structures into real capabilities */
|
||||
for (int i = 0; i < pinfo->ncaps; i++) {
|
||||
|
||||
@@ -119,12 +119,14 @@ void sched_init()
|
||||
|
||||
sched->rq_runnable = &sched->sched_rq[0];
|
||||
sched->rq_expired = &sched->sched_rq[1];
|
||||
sched->rq_rt_runnable = &sched->sched_rq[2];
|
||||
sched->rq_rt_expired = &sched->sched_rq[3];
|
||||
sched->prio_total = TASK_PRIO_TOTAL;
|
||||
sched->idle_task = current;
|
||||
}
|
||||
|
||||
/* Swap runnable and expired runqueues. */
|
||||
static void sched_rq_swap_runqueues(void)
|
||||
static void sched_rq_swap_queues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
@@ -136,6 +138,18 @@ static void sched_rq_swap_runqueues(void)
|
||||
per_cpu(scheduler).rq_expired = temp;
|
||||
}
|
||||
|
||||
static void sched_rq_swap_rtqueues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
BUG_ON(list_empty(&per_cpu(scheduler).rq_rt_expired->task_list));
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = per_cpu(scheduler).rq_rt_runnable;
|
||||
per_cpu(scheduler).rq_rt_runnable = per_cpu(scheduler).rq_rt_expired;
|
||||
per_cpu(scheduler).rq_rt_expired = temp;
|
||||
}
|
||||
|
||||
/* Set policy on where to add tasks in the runqueue */
|
||||
#define RQ_ADD_BEHIND 0
|
||||
#define RQ_ADD_FRONT 1
|
||||
@@ -185,6 +199,28 @@ static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
sched_unlock_runqueues(sched, irqflags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_run_task(struct ktcb *task, struct scheduler *sched)
|
||||
{
|
||||
if (task->flags & TASK_REALTIME)
|
||||
sched_rq_add_task(task, sched->rq_rt_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(task, sched->rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_expire_task(struct ktcb *task, struct scheduler *sched)
|
||||
{
|
||||
|
||||
if (task->flags & TASK_REALTIME)
|
||||
sched_rq_add_task(current, sched->rq_rt_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(current, sched->rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
void sched_init_task(struct ktcb *task, int prio)
|
||||
{
|
||||
@@ -196,6 +232,27 @@ void sched_init_task(struct ktcb *task, int prio)
|
||||
task->flags |= TASK_RESUMING;
|
||||
}
|
||||
|
||||
/* Synchronously resumes a task */
|
||||
void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
||||
schedule();
|
||||
}
|
||||
|
||||
/*
|
||||
* Asynchronously resumes a task.
|
||||
* The task will run in the future, but at
|
||||
* the scheduler's discretion. It is possible that current
|
||||
* task wakes itself up via this function in the scheduler().
|
||||
*/
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
||||
}
|
||||
|
||||
/*
|
||||
* Takes all the action that will make a task sleep
|
||||
* in the scheduler. If the task is woken up before
|
||||
@@ -210,37 +267,10 @@ void sched_prepare_sleep()
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Synchronously resumes a task */
|
||||
void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task,
|
||||
per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
|
||||
/*
|
||||
* Asynchronously resumes a task.
|
||||
* The task will run in the future, but at
|
||||
* the scheduler's discretion. It is possible that current
|
||||
* task wakes itself up via this function in the scheduler().
|
||||
*/
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task,
|
||||
per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
// printk("CPU%d: Resuming task %d with affinity %d\n", smp_get_cpuid(), task->tid, task->affinity);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: Could do these as sched_prepare_suspend()
|
||||
* + schedule() or need_resched = 1
|
||||
* preempt_enable/disable()'s are for avoiding the
|
||||
* entry to scheduler during this period - but this
|
||||
* is only true for current cpu.
|
||||
*/
|
||||
void sched_suspend_sync(void)
|
||||
{
|
||||
@@ -282,6 +312,11 @@ static inline void context_switch(struct ktcb *next)
|
||||
system_account_context_switch();
|
||||
|
||||
/* Flush caches and everything */
|
||||
BUG_ON(!current);
|
||||
BUG_ON(!current->space);
|
||||
BUG_ON(!next);
|
||||
BUG_ON(!next->space);
|
||||
BUG_ON(!next->space);
|
||||
if (current->space->spid != next->space->spid)
|
||||
arch_space_switch(next);
|
||||
|
||||
@@ -306,6 +341,107 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
|
||||
CONFIG_SCHED_TICKS * task->priority / prio_total;
|
||||
}
|
||||
|
||||
/*
|
||||
* Select a real-time task 1/8th of any one selection
|
||||
*/
|
||||
static inline int sched_select_rt(struct scheduler *sched)
|
||||
{
|
||||
int ctr = sched->task_select_ctr++ & 0xF;
|
||||
|
||||
if (ctr == 0 || ctr == 8 || ctr == 15)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Selection happens as follows:
|
||||
*
|
||||
* A real-time task is chosen %87.5 of the time. This is evenly
|
||||
* distributed to a given interval.
|
||||
*
|
||||
* Idle task is run once when it is explicitly suggested (e.g.
|
||||
* for cleanup after a task exited) but only when no real-time
|
||||
* tasks are in the queues.
|
||||
*
|
||||
* And idle task is otherwise run only when no other tasks are
|
||||
* runnable.
|
||||
*/
|
||||
struct ktcb *sched_select_next(void)
|
||||
{
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
int realtime = sched_select_rt(sched);
|
||||
struct ktcb *next = 0;
|
||||
|
||||
for (;;) {
|
||||
|
||||
/* Decision to run an RT task? */
|
||||
if (realtime && sched->rq_rt_runnable->total > 0) {
|
||||
/* Get a real-time task, if available */
|
||||
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (realtime && sched->rq_rt_expired->total > 0) {
|
||||
/* Swap real-time queues */
|
||||
sched_rq_swap_rtqueues();
|
||||
/* Get a real-time task */
|
||||
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
/* Idle flagged for run? */
|
||||
} else if (sched->flags & SCHED_RUN_IDLE) {
|
||||
/* Clear idle flag */
|
||||
sched->flags &= ~SCHED_RUN_IDLE;
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else if (sched->rq_runnable->total > 0) {
|
||||
/* Get a regular runnable task, if available */
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (sched->rq_expired->total > 0) {
|
||||
/* Swap queues and retry if not */
|
||||
sched_rq_swap_queues();
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (in_process_context()) {
|
||||
/* No runnable task. Do idle if in process context */
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Nobody is runnable. Irq calls must return
|
||||
* to interrupted current process to run idle task
|
||||
*/
|
||||
next = current;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Prepare next runnable task right before switching to it */
|
||||
void sched_prepare_next(struct ktcb *next)
|
||||
{
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING)
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
|
||||
/* Zero ticks indicates task hasn't ran since last rq swap */
|
||||
if (next->ticks_left == 0) {
|
||||
/*
|
||||
* Redistribute timeslice. We do this as each task
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
/* Reinitialise task's schedule granularity boundary */
|
||||
next->sched_granule = SCHED_GRANULARITY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tasks come here, either by setting need_resched (via next irq),
|
||||
@@ -359,13 +495,9 @@ void schedule()
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current,
|
||||
per_cpu(scheduler).rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
sched_run_task(current, &per_cpu(scheduler));
|
||||
else
|
||||
sched_rq_add_task(current,
|
||||
per_cpu(scheduler).rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
sched_expire_task(current, &per_cpu(scheduler));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -391,52 +523,17 @@ void schedule()
|
||||
sched_suspend_async();
|
||||
}
|
||||
|
||||
/* Simpler task pick up loop. May put in sched_pick_next() */
|
||||
for (;;) {
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
|
||||
/* If we or a child has just exited, run idle task once for clean up */
|
||||
if (current->flags & TASK_EXITED) {
|
||||
current->flags &= ~TASK_EXITED;
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else if (sched->rq_runnable->total > 0) {
|
||||
/* Get a runnable task, if available */
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (sched->rq_expired->total > 0) {
|
||||
/* Swap queues and retry if not */
|
||||
sched_rq_swap_runqueues();
|
||||
continue;
|
||||
} else if (in_process_context()) {
|
||||
/* Do idle task if no runnable tasks and in process */
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else {
|
||||
/* Irq calls must return to interrupted current process */
|
||||
next = current;
|
||||
break;
|
||||
}
|
||||
/* Hint scheduler to run idle asap to free task */
|
||||
if (current->flags & TASK_EXITED) {
|
||||
current->flags &= ~TASK_EXITED;
|
||||
per_cpu(scheduler).flags |= SCHED_RUN_IDLE;
|
||||
}
|
||||
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING)
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
/* Decide on next runnable task */
|
||||
next = sched_select_next();
|
||||
|
||||
/* Zero ticks indicates task hasn't ran since last rq swap */
|
||||
if (next->ticks_left == 0) {
|
||||
/*
|
||||
* Redistribute timeslice. We do this as each task
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
/* Reinitialise task's schedule granularity boundary */
|
||||
next->sched_granule = SCHED_GRANULARITY;
|
||||
/* Prepare next task for running */
|
||||
sched_prepare_next(next);
|
||||
|
||||
/* Finish */
|
||||
disable_irqs();
|
||||
|
||||
@@ -34,7 +34,6 @@ void tcb_init(struct ktcb *new)
|
||||
|
||||
spin_lock_init(&new->thread_lock);
|
||||
|
||||
init_ktcb_list(&new->child_exit_list);
|
||||
cap_list_init(&new->cap_list);
|
||||
|
||||
/* Initialise task's scheduling state and parameters. */
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/api/syscall.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_GLUE(ipi.h) /*FIXME: Remove this */
|
||||
|
||||
/* TODO:
|
||||
* 1) Add RTC support.
|
||||
@@ -141,13 +142,23 @@ void update_process_times(void)
|
||||
need_resched = 1;
|
||||
}
|
||||
|
||||
|
||||
int do_timer_irq(void)
|
||||
{
|
||||
increase_jiffies();
|
||||
update_process_times();
|
||||
update_system_time();
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
smp_send_ipi(cpu_mask_others(), IPI_TIMER_EVENT);
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Secondary cpus call this */
|
||||
int secondary_timer_irq(void)
|
||||
{
|
||||
update_process_times();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c', 'cache.c', 'debug.c']
|
||||
|
||||
for name, val in symbols:
|
||||
if 'CONFIG_SMP' == name:
|
||||
src_local += ['smp.c', 'ipi.c', 'smp_test.c']
|
||||
src_local += ['smp.c', 'ipi.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -143,23 +143,13 @@ void setup_idle_task()
|
||||
/* Initialize space caps list */
|
||||
cap_list_init(¤t->space->cap_list);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Unneeded stuff
|
||||
*/
|
||||
/*
|
||||
* Set up idle context.
|
||||
*/
|
||||
current->context.spsr = ARM_MODE_SVC;
|
||||
current->context.pc = (u32)idle_task;
|
||||
current->context.sp = (u32)align((unsigned long)current + PAGE_SIZE,
|
||||
STACK_ALIGNMENT);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FIXME: This must go to kernel resources init.
|
||||
*/
|
||||
|
||||
/* Init scheduler structs */
|
||||
sched_init_task(current, TASK_PRIO_NORMAL);
|
||||
|
||||
/*
|
||||
* If using split page tables, kernel
|
||||
* resources must point at the global pgd
|
||||
@@ -236,18 +226,18 @@ void start_kernel(void)
|
||||
|
||||
sched_init();
|
||||
|
||||
/* Try to initialize secondary cores if there are any */
|
||||
smp_start_cores();
|
||||
|
||||
/* Remove one-to-one kernel mapping */
|
||||
remove_initial_mapping();
|
||||
|
||||
/*
|
||||
* Map and enable high vector page.
|
||||
* Faults can be handled after here.
|
||||
*/
|
||||
vectors_init();
|
||||
|
||||
/* Try to initialize secondary cores if there are any */
|
||||
smp_start_cores();
|
||||
|
||||
/* Remove one-to-one kernel mapping */
|
||||
remove_initial_mapping();
|
||||
|
||||
/* Remap 1MB kernel sections as 4Kb pages. */
|
||||
remap_as_pages((void *)page_align(_start_kernel),
|
||||
(void *)page_align_up(_end_kernel));
|
||||
|
||||
@@ -10,9 +10,32 @@
|
||||
#include INC_GLUE(smp.h)
|
||||
#include INC_SUBARCH(cpu.h)
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include <l4/generic/time.h>
|
||||
|
||||
/* This should be in a file something like exception.S */
|
||||
int ipi_handler(struct irq_desc *desc)
|
||||
{
|
||||
int ipi_event = (desc - irq_desc_array) / sizeof(struct irq_desc);
|
||||
|
||||
// printk("CPU%d: entered IPI%d\n", smp_get_cpuid(),
|
||||
// (desc - irq_desc_array) / sizeof(struct irq_desc));
|
||||
|
||||
switch (ipi_event) {
|
||||
case IPI_TIMER_EVENT:
|
||||
// printk("CPU%d: Handling timer ipi\n", smp_get_cpuid());
|
||||
secondary_timer_irq();
|
||||
break;
|
||||
default:
|
||||
printk("CPU%d: IPI with no meaning: %d\n",
|
||||
smp_get_cpuid(), ipi_event);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void smp_send_ipi(unsigned int cpumask, int ipi_num)
|
||||
{
|
||||
gic_send_ipi(cpumask, ipi_num);
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
unsigned long secondary_run_signal;
|
||||
|
||||
unsigned long secondary_ready_signal;
|
||||
|
||||
void __smp_start(void);
|
||||
|
||||
@@ -35,14 +35,17 @@ void smp_start_cores(void)
|
||||
arm_smp_inval_icache_entirely();
|
||||
|
||||
/* Start other cpus */
|
||||
for (int i = 1; i < CONFIG_NCPU; i++) {
|
||||
printk("%s: Bringing up CPU%d\n", __KERNELNAME__, i);
|
||||
if ((platform_smp_start(i, smp_start_func)) < 0) {
|
||||
for (int cpu = 1; cpu < CONFIG_NCPU; cpu++) {
|
||||
printk("%s: Bringing up CPU%d\n", __KERNELNAME__, cpu);
|
||||
if ((platform_smp_start(cpu, smp_start_func)) < 0) {
|
||||
printk("FATAL: Could not start secondary cpu. "
|
||||
"cpu=%d\n", i);
|
||||
"cpu=%d\n", cpu);
|
||||
BUG();
|
||||
}
|
||||
wfi(); /* wait for other cpu send IPI to core0 */
|
||||
|
||||
/* Wait for this particular secondary to become ready */
|
||||
while(!(secondary_ready_signal & CPUID_TO_MASK(cpu)))
|
||||
dmb();
|
||||
}
|
||||
|
||||
scu_print_state();
|
||||
@@ -50,12 +53,11 @@ void smp_start_cores(void)
|
||||
|
||||
void init_smp(void)
|
||||
{
|
||||
/* Start_secondary_cpus */
|
||||
if (CONFIG_NCPU > 1) {
|
||||
|
||||
/* This sets IPI function pointer at bare minimum */
|
||||
platform_smp_init(CONFIG_NCPU);
|
||||
}
|
||||
/* Start_secondary_cpus */
|
||||
if (CONFIG_NCPU > 1) {
|
||||
/* This sets IPI function pointer at bare minimum */
|
||||
platform_smp_init(CONFIG_NCPU);
|
||||
}
|
||||
}
|
||||
|
||||
void secondary_setup_idle_task(void)
|
||||
@@ -122,9 +124,9 @@ void smp_secondary_init(void)
|
||||
|
||||
sched_init();
|
||||
|
||||
dsb();
|
||||
|
||||
gic_send_ipi(CPUID_TO_MASK(0), 0);
|
||||
/* Signal primary that we are ready */
|
||||
dmb();
|
||||
secondary_ready_signal |= cpu_mask_self();
|
||||
|
||||
/*
|
||||
* Wait for the first runnable task to become available
|
||||
|
||||
@@ -449,6 +449,6 @@ int printk(char *format, ...)
|
||||
|
||||
va_end(args);
|
||||
return i;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
*/
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include <l4/platform/realview/irq.h>
|
||||
#include <l4/generic/irq.h>
|
||||
|
||||
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
|
||||
@@ -61,3 +62,26 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
[IRQ_TIMER0] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
},
|
||||
[IRQ_TIMER1] = {
|
||||
.name = "Timer1",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_user_handler,
|
||||
},
|
||||
[IRQ_KEYBOARD0] = {
|
||||
.name = "Keyboard",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_keyboard_user_handler,
|
||||
},
|
||||
[IRQ_MOUSE0] = {
|
||||
.name = "Mouse",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_mouse_user_handler,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -17,6 +17,25 @@
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_GLUE(smp.h)
|
||||
|
||||
/*
|
||||
* FIXME: This is not a platform specific
|
||||
* call, we will move this out later
|
||||
*/
|
||||
void device_cap_init(struct kernel_resources *kres, int devtype,
|
||||
int devnum, unsigned long base)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
cap_set_devtype(cap, devtype);
|
||||
cap_set_devnum(cap, devnum);
|
||||
cap->start = __pfn(base);
|
||||
cap->end = cap->start + 1;
|
||||
cap->size = cap->end - cap->start;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, &kres->devmem_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* The devices that are used by the kernel are mapped
|
||||
* independent of these capabilities, but these provide a
|
||||
@@ -24,45 +43,12 @@
|
||||
*/
|
||||
int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
{
|
||||
struct capability *uart[4], *timer[4];
|
||||
|
||||
/* Setup capabilities for userspace uarts and timers */
|
||||
uart[1] = alloc_bootmem(sizeof(*uart[1]), 0);
|
||||
uart[1]->start = __pfn(PLATFORM_UART1_BASE);
|
||||
uart[1]->end = uart[1]->start + 1;
|
||||
uart[1]->size = uart[1]->end - uart[1]->start;
|
||||
cap_set_devtype(uart[1], CAP_DEVTYPE_UART);
|
||||
cap_set_devnum(uart[1], 1);
|
||||
link_init(&uart[1]->list);
|
||||
cap_list_insert(uart[1], &kres->devmem_free);
|
||||
|
||||
uart[2] = alloc_bootmem(sizeof(*uart[2]), 0);
|
||||
uart[2]->start = __pfn(PLATFORM_UART2_BASE);
|
||||
uart[2]->end = uart[2]->start + 1;
|
||||
uart[2]->size = uart[2]->end - uart[2]->start;
|
||||
cap_set_devtype(uart[2], CAP_DEVTYPE_UART);
|
||||
cap_set_devnum(uart[2], 2);
|
||||
link_init(&uart[2]->list);
|
||||
cap_list_insert(uart[2], &kres->devmem_free);
|
||||
|
||||
uart[3] = alloc_bootmem(sizeof(*uart[3]), 0);
|
||||
uart[3]->start = __pfn(PLATFORM_UART3_BASE);
|
||||
uart[3]->end = uart[3]->start + 1;
|
||||
uart[3]->size = uart[3]->end - uart[3]->start;
|
||||
cap_set_devtype(uart[3], CAP_DEVTYPE_UART);
|
||||
cap_set_devnum(uart[3], 3);
|
||||
link_init(&uart[3]->list);
|
||||
cap_list_insert(uart[3], &kres->devmem_free);
|
||||
|
||||
/* Setup timer1 capability as free */
|
||||
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
|
||||
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
|
||||
timer[1]->end = timer[1]->start + 1;
|
||||
timer[1]->size = timer[1]->end - timer[1]->start;
|
||||
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
|
||||
cap_set_devnum(timer[1], 1);
|
||||
link_init(&timer[1]->list);
|
||||
cap_list_insert(timer[1], &kres->devmem_free);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -98,5 +84,16 @@ void init_platform_irq_controller()
|
||||
|
||||
void init_platform_devices()
|
||||
{
|
||||
/* TIMER23 */
|
||||
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* KEYBOARD - KMI0 */
|
||||
add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* MOUSE - KMI1 */
|
||||
add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_PLAT(timer.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/drivers/irq/pl190/pl190_vic.h>
|
||||
|
||||
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
|
||||
@@ -68,8 +69,18 @@ static int platform_timer_user_handler(struct irq_desc *desc)
|
||||
/*
|
||||
* Keyboard handler for userspace
|
||||
*/
|
||||
#define PL050_KMICR 0x00
|
||||
#define PL050_KMI_RXINTR (1 << 0x4)
|
||||
|
||||
static int platform_keyboard_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
@@ -79,6 +90,13 @@ static int platform_keyboard_user_handler(struct irq_desc *desc)
|
||||
*/
|
||||
static int platform_mouse_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx mouse interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_MOUSE0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
@@ -92,25 +110,21 @@ struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
.user_ack = 0,
|
||||
},
|
||||
[IRQ_TIMER1] = {
|
||||
.name = "Timer1",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_user_handler,
|
||||
.user_ack = 0,
|
||||
},
|
||||
[IRQ_KEYBOARD0] = {
|
||||
.name = "Keyboard",
|
||||
.chip = &irq_chip_array[1],
|
||||
.handler = platform_keyboard_user_handler,
|
||||
.user_ack = 1,
|
||||
},
|
||||
[IRQ_MOUSE0] = {
|
||||
.name = "Mouse",
|
||||
.chip = &irq_chip_array[1],
|
||||
.handler = platform_mouse_user_handler,
|
||||
.user_ack = 1,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -103,18 +103,24 @@ void init_platform_irq_controller()
|
||||
irq_controllers_init();
|
||||
}
|
||||
|
||||
/*
|
||||
* Add userspace devices here as you develop
|
||||
* their irq handlers,
|
||||
* Only the devices to which kernel has to do
|
||||
* anything needs to be mapped, rest will be
|
||||
* mapped in userspace by user
|
||||
*/
|
||||
/* Add userspace devices here as you develop their irq handlers */
|
||||
void init_platform_devices()
|
||||
{
|
||||
/* TIMER23 */
|
||||
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* KEYBOARD - KMI0 */
|
||||
add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* MOUSE - KMI1 */
|
||||
add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* CLCD */
|
||||
add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
}
|
||||
|
||||
/* If these bits are off, 32Khz OSC source is used */
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
|
||||
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
|
||||
|
||||
@@ -26,7 +28,35 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
|
||||
.read_irq = gic_read_irq,
|
||||
.ack_and_mask = gic_ack_and_mask,
|
||||
.unmask = gic_unmask_irq,
|
||||
.set_cpu = gic_set_target,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Built-in irq handlers initialised at compile time.
|
||||
* Else register with register_irq()
|
||||
*/
|
||||
struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
[IRQ_TIMER0] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
},
|
||||
[IRQ_TIMER1] = {
|
||||
.name = "Timer1",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_user_handler,
|
||||
},
|
||||
[IRQ_KEYBOARD0] = {
|
||||
.name = "Keyboard",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_keyboard_user_handler,
|
||||
},
|
||||
[IRQ_MOUSE0] = {
|
||||
.name = "Mouse",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_mouse_user_handler,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -19,6 +19,25 @@
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
/*
|
||||
* FIXME: This is not a platform specific
|
||||
* call, we will move this out later
|
||||
*/
|
||||
void device_cap_init(struct kernel_resources *kres, int devtype,
|
||||
int devnum, unsigned long base)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
cap_set_devtype(cap, devtype);
|
||||
cap_set_devnum(cap, devnum);
|
||||
cap->start = __pfn(base);
|
||||
cap->end = cap->start + 1;
|
||||
cap->size = cap->end - cap->start;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, &kres->devmem_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* The devices that are used by the kernel are mapped
|
||||
* independent of these capabilities, but these provide a
|
||||
@@ -26,17 +45,13 @@
|
||||
*/
|
||||
int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
{
|
||||
struct capability *timer[2];
|
||||
|
||||
/* Setup timer1 capability as free */
|
||||
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
|
||||
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
|
||||
timer[1]->end = timer[1]->start + 1;
|
||||
timer[1]->size = timer[1]->end - timer[1]->start;
|
||||
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
|
||||
cap_set_devnum(timer[1], 1);
|
||||
link_init(&timer[1]->list);
|
||||
cap_list_insert(timer[1], &kres->devmem_free);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_CLCD, 0, PLATFORM_CLCD0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -54,5 +69,21 @@ void init_platform_irq_controller()
|
||||
|
||||
void init_platform_devices()
|
||||
{
|
||||
/* TIMER23 */
|
||||
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* KEYBOARD - KMI0 */
|
||||
add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* MOUSE - KMI1 */
|
||||
add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* CLCD */
|
||||
add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -7,24 +7,61 @@
|
||||
#include <l4/generic/time.h>
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_PLAT(irq.h)
|
||||
#include <l4/platform/realview/timer.h>
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
|
||||
static int platform_timer_handler(struct irq_desc *desc)
|
||||
/*
|
||||
* Timer handler for userspace
|
||||
*/
|
||||
int platform_timer_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/* Ack the device irq */
|
||||
timer_irq_clear(PLATFORM_TIMER1_VBASE);
|
||||
|
||||
/* Notify the userspace */
|
||||
irq_thread_notify(desc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keyboard handler for userspace
|
||||
*/
|
||||
#define PL050_KMICR 0x00
|
||||
#define PL050_KMI_RXINTR (1 << 0x4)
|
||||
int platform_keyboard_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mouse handler for userspace
|
||||
*/
|
||||
int platform_mouse_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int platform_timer_handler(struct irq_desc *desc)
|
||||
{
|
||||
timer_irq_clear(PLATFORM_TIMER0_VBASE);
|
||||
|
||||
return do_timer_irq();
|
||||
}
|
||||
|
||||
/*
|
||||
* Built-in irq handlers initialised at compile time.
|
||||
* Else register with register_irq()
|
||||
*/
|
||||
struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
[IRQ_TIMER0] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* Author: Bahadir Balban
|
||||
*/
|
||||
#include <l4/platform/realview/timer.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_SUBARCH(perfmon.h)
|
||||
|
||||
@@ -4,13 +4,14 @@
|
||||
* Copyright (C) 2009 B Labs Ltd.
|
||||
*/
|
||||
#include <l4/platform/realview/uart.h>
|
||||
#include <l4/platform/realview/timer.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_GLUE(smp.h)
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/platform.h>
|
||||
#include <l4/generic/smp.h>
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_ARCH(io.h)
|
||||
|
||||
@@ -36,6 +37,9 @@ void platform_timer_start(void)
|
||||
/* Enable irq line for TIMER0 */
|
||||
irq_enable(IRQ_TIMER0);
|
||||
|
||||
/* Set cpu to all cpus for timer0 */
|
||||
// irq_set_cpu(IRQ_TIMER0, cpu_all_mask());
|
||||
|
||||
/* Enable timer */
|
||||
timer_start(PLATFORM_TIMER0_VBASE);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/generic/space.h>
|
||||
|
||||
|
||||
extern struct irq_desc irq_desc_array[IRQS_MAX];
|
||||
|
||||
/* Print some SCU information */
|
||||
@@ -49,16 +48,14 @@ void scu_init(void)
|
||||
|
||||
void platform_smp_init(int ncpus)
|
||||
{
|
||||
unsigned int i;
|
||||
/* Add GIC SoftIRQ (aka IPI) */
|
||||
for (int i = 0; i < 16; i++) {
|
||||
strncpy(irq_desc_array[i].name, "SoftInt", 8);
|
||||
irq_desc_array[i].chip = &irq_chip_array[0];
|
||||
irq_desc_array[i].handler = &ipi_handler;
|
||||
}
|
||||
|
||||
/* Add GIC SoftIRQ (aka IPI) */
|
||||
for (i = 0; i <= 15; i++) {
|
||||
strncpy(irq_desc_array[i].name, "SoftInt", 8);
|
||||
irq_desc_array[i].chip = &irq_chip_array[0];
|
||||
irq_desc_array[i].handler = &ipi_handler;
|
||||
}
|
||||
|
||||
add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE,
|
||||
add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
@@ -74,7 +71,7 @@ int platform_smp_start(int cpu, void (*smp_start_func)(int))
|
||||
dsb(); /* Make sure the write occurs */
|
||||
|
||||
/* Wake up other core who is waiting on a WFI. */
|
||||
gic_send_ipi(CPUID_TO_MASK(cpu), 1);
|
||||
gic_send_ipi(CPUID_TO_MASK(cpu), 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -82,13 +79,4 @@ int platform_smp_start(int cpu, void (*smp_start_func)(int))
|
||||
void secondary_init_platform(void)
|
||||
{
|
||||
gic_cpu_init(0, GIC0_CPU_VBASE);
|
||||
gic_ack_irq(1);
|
||||
|
||||
gic_set_target(IRQ_TIMER0, 1 << smp_get_cpuid());
|
||||
}
|
||||
|
||||
void arch_send_ipi(u32 cpu, int cmd)
|
||||
{
|
||||
gic_send_ipi(cpu, cmd);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user