mirror of
https://github.com/drasko/codezero.git
synced 2026-01-11 18:33:16 +01:00
Changes between 16 March 2010 - 6 April 2010
Mutex system call fixed for multiple contenders Userspace irq support extended to keyboard/mouse. Scheduler modified for real-time irq tasks
This commit is contained in:
@@ -17,6 +17,7 @@ gcc_arch_flag = config.gcc_arch_flag
|
||||
all_syms = config.all
|
||||
builddir='build/codezero/'
|
||||
|
||||
|
||||
# Generate kernel linker script at runtime using template file.
|
||||
def generate_kernel_linker_script(target, source, env):
|
||||
linker_in = source[0]
|
||||
@@ -43,13 +44,13 @@ def generate_kernel_phys_linker_script(target, source, env):
|
||||
"-I%s -imacros l4/macros.h -imacros %s -imacros %s -C -P %s -o %s" % \
|
||||
('include', 'l4/platform/'+ platform + '/offsets.h', \
|
||||
'l4/glue/' + arch + '/memlayout.h', phys_linker_in, phys_linker_out)
|
||||
print cmd
|
||||
os.system(cmd)
|
||||
|
||||
create_kernel_phys_linker = Command(join(builddir, 'include/physlink.lds'), \
|
||||
join(PROJROOT, 'include/physlink.lds.in'), \
|
||||
join(PROJROOT, 'include/l4/arch/arm/linker.lds.in'), \
|
||||
generate_kernel_phys_linker_script)
|
||||
'''
|
||||
|
||||
env = Environment(CC = config.toolchain_kernel + 'gcc',
|
||||
AR = config.toolchain_kernel + 'ar',
|
||||
RANLIB = config.toolchain_kernel + 'ranlib',
|
||||
@@ -58,7 +59,7 @@ env = Environment(CC = config.toolchain_kernel + 'gcc',
|
||||
CCFLAGS = ['-g', '-nostdlib', '-ffreestanding', '-std=gnu99', '-Wall', \
|
||||
'-Werror', '-march=' + gcc_arch_flag],
|
||||
LINKFLAGS = ['-nostdlib', '-T' + join(builddir, 'include/l4/arch/arm/linker.lds')],
|
||||
ASFLAGS = ['-D__ASSEMBLY__'],
|
||||
ASFLAGS = ['-D__ASSEMBLY__', '-march=' + gcc_arch_flag],
|
||||
PROGSUFFIX = '.elf', # The suffix to use for final executable
|
||||
ENV = {'PATH' : os.environ['PATH']}, # Inherit shell path
|
||||
LIBS = 'gcc', # libgcc.a - This is required for division routines.
|
||||
|
||||
@@ -24,7 +24,7 @@ env = Environment(CC = config.toolchain_userspace + 'gcc',
|
||||
CCFLAGS = ['-g', '-nostdlib', '-ffreestanding', '-std=gnu99', '-Wall',
|
||||
'-Werror', '-march=' + gcc_arch_flag],
|
||||
LINKFLAGS = ['-nostdlib'],
|
||||
ASFLAGS = ['-D__ASSEMBLY__'],
|
||||
ASFLAGS = ['-D__ASSEMBLY__', '-march=' + gcc_arch_flag],
|
||||
ENV = {'PATH' : os.environ['PATH']}, # Inherit shell path
|
||||
LIBS = 'gcc', # libgcc.a - This is required for division routines.
|
||||
CPPPATH = "#include",
|
||||
|
||||
@@ -15,6 +15,10 @@ class Container:
|
||||
self.pager_lma = 0
|
||||
self.pager_vma = 0
|
||||
self.pager_size = 0
|
||||
self.pager_rw_section_start = 0
|
||||
self.pager_rw_section_end = 0
|
||||
self.pager_rx_section_start = 0
|
||||
self.pager_rx_section_end = 0
|
||||
self.pager_task_region_start = 0
|
||||
self.pager_task_region_end = 0
|
||||
self.pager_shm_region_start = 0
|
||||
@@ -72,6 +76,8 @@ class configuration:
|
||||
self.toolchain_userspace = None
|
||||
self.toolchain_kernel = None
|
||||
self.all = []
|
||||
self.smp = False
|
||||
self.ncpu = 0
|
||||
self.containers = []
|
||||
self.ncontainers = 0
|
||||
|
||||
@@ -87,6 +93,13 @@ class configuration:
|
||||
return parts[1], parts[2]
|
||||
return None
|
||||
|
||||
# Check if SMP enable, and get NCPU if SMP
|
||||
def get_ncpu(self, name, value):
|
||||
if name[:len("CONFIG_SMP")] == "CONFIG_SMP":
|
||||
self.smp = bool(value)
|
||||
if name[:len("CONFIG_NCPU")] == "CONFIG_NCPU":
|
||||
self.ncpu = int(value)
|
||||
|
||||
# Extract architecture from a name value pair
|
||||
def get_arch(self, name, val):
|
||||
if name[:len("CONFIG_ARCH_")] == "CONFIG_ARCH_":
|
||||
|
||||
@@ -40,6 +40,7 @@ def cml2_header_to_symbols(cml2_header, config):
|
||||
config.get_arch(name, value)
|
||||
config.get_subarch(name, value)
|
||||
config.get_platform(name, value)
|
||||
config.get_ncpu(name, value)
|
||||
config.get_ncontainers(name, value)
|
||||
config.get_container_parameters(name, value)
|
||||
config.get_toolchain(name, value)
|
||||
|
||||
@@ -140,6 +140,12 @@ int keyboard_irq_handler(void *arg)
|
||||
while (data--)
|
||||
if ((c = kmi_keyboard_read(keyboard->base, &keyboard->state)))
|
||||
printf("%c", c);
|
||||
|
||||
/*
|
||||
* Kernel has disabled irq for keyboard
|
||||
* We need to enable it
|
||||
*/
|
||||
kmi_rx_irq_enable(keyboard->base);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +180,12 @@ int mouse_irq_handler(void *arg)
|
||||
while (data--)
|
||||
if ((c = kmi_data_read(mouse->base)))
|
||||
printf("mouse data: %d\n", c);
|
||||
|
||||
/*
|
||||
* Kernel has disabled irq for mouse
|
||||
* We need to enable it
|
||||
*/
|
||||
kmi_rx_irq_enable(mouse->base);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ struct sleeper_task {
|
||||
struct wake_task_list {
|
||||
struct link head;
|
||||
struct link *end; /* optimization */
|
||||
struct l4_mutex lock; /* lock for sanity of head */
|
||||
struct l4_mutex wake_list_lock; /* lock for sanity of head */
|
||||
};
|
||||
|
||||
#define BUCKET_BASE_LEVEL_BITS 8
|
||||
@@ -77,7 +77,7 @@ struct timer {
|
||||
unsigned long base; /* Virtual base address */
|
||||
unsigned int count; /* Counter/jiffies */
|
||||
struct sleeper_task_bucket task_list; /* List of sleeping tasks */
|
||||
struct l4_mutex lock; /* Lock for sleeper_task_bucket */
|
||||
struct l4_mutex task_list_lock; /* Lock for sleeper_task_bucket */
|
||||
struct capability cap; /* Capability describing timer */
|
||||
};
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ static int total_caps = 0;
|
||||
|
||||
/* Total number of timer chips being handled by us */
|
||||
#define TIMERS_TOTAL 1
|
||||
static struct timer timer[TIMERS_TOTAL];
|
||||
static struct timer global_timer[TIMERS_TOTAL];
|
||||
|
||||
/* Deafult timer to be used for sleep/wake etc purposes */
|
||||
#define SLEEP_WAKE_TIMER 0
|
||||
@@ -85,7 +85,7 @@ void timer_struct_init(struct timer* timer, unsigned long base)
|
||||
timer->base = base;
|
||||
timer->count = 0;
|
||||
timer->slot = 0;
|
||||
l4_mutex_init(&timer->lock);
|
||||
l4_mutex_init(&timer->task_list_lock);
|
||||
|
||||
for (int i = 0; i < BUCKET_BASE_LEVEL_SIZE ; ++i) {
|
||||
link_init(&timer->task_list.bucket_level0[i]);
|
||||
@@ -106,7 +106,7 @@ void wake_task_list_init(void)
|
||||
{
|
||||
link_init(&wake_tasks.head);
|
||||
wake_tasks.end = &wake_tasks.head;
|
||||
l4_mutex_init(&wake_tasks.lock);
|
||||
l4_mutex_init(&wake_tasks.wake_list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -140,7 +140,7 @@ struct link* find_bucket_list(unsigned long seconds)
|
||||
struct link *vector;
|
||||
struct sleeper_task_bucket *bucket;
|
||||
|
||||
bucket = &timer[SLEEP_WAKE_TIMER].task_list;
|
||||
bucket = &global_timer[SLEEP_WAKE_TIMER].task_list;
|
||||
|
||||
/*
|
||||
* TODO: Check if we have already surpassed seconds
|
||||
@@ -172,8 +172,8 @@ int timer_probe_devices(void)
|
||||
/* Match device type */
|
||||
if (cap_devtype(&caparray[i]) == CAP_DEVTYPE_TIMER) {
|
||||
/* Copy to correct device index */
|
||||
memcpy(&timer[cap_devnum(&caparray[i]) - 1].cap,
|
||||
&caparray[i], sizeof(timer[0].cap));
|
||||
memcpy(&global_timer[cap_devnum(&caparray[i]) - 1].cap,
|
||||
&caparray[i], sizeof(global_timer[0].cap));
|
||||
timers++;
|
||||
}
|
||||
}
|
||||
@@ -196,8 +196,11 @@ int timer_irq_handler(void *arg)
|
||||
struct link *vector;
|
||||
const int slot = 0;
|
||||
|
||||
/* Initialise timer */
|
||||
timer_init(timer->base);
|
||||
/*
|
||||
* Initialise timer
|
||||
* 1 interrupt per second
|
||||
*/
|
||||
timer_init(timer->base, 1000000);
|
||||
|
||||
/* Register self for timer irq, using notify slot 0 */
|
||||
if ((err = l4_irq_control(IRQ_CONTROL_REGISTER, slot,
|
||||
@@ -221,34 +224,32 @@ int timer_irq_handler(void *arg)
|
||||
BUG();
|
||||
}
|
||||
|
||||
//printf("Got irq(count 0x%x)\n", timer->count);
|
||||
/*
|
||||
* Update timer count
|
||||
* TODO: Overflow check, we have 1 interrupt/sec from timer
|
||||
* with 32bit count it will take 9years to overflow
|
||||
*/
|
||||
timer->count += count;
|
||||
printf("Got timer irq, current count = 0x%x\n", timer->count);
|
||||
|
||||
/* find bucket list of taks to be woken for current count */
|
||||
vector = find_bucket_list(timer->count);
|
||||
|
||||
if (!list_empty(vector)) {
|
||||
/* Removing tasks from sleeper list */
|
||||
l4_mutex_lock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
l4_mutex_lock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
task_list = list_detach(vector);
|
||||
l4_mutex_unlock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
l4_mutex_unlock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
|
||||
/* Add tasks to wake_task_list */
|
||||
l4_mutex_lock(&wake_tasks.lock);
|
||||
list_attach(task_list,
|
||||
&wake_tasks.head, wake_tasks.end);
|
||||
l4_mutex_unlock(&wake_tasks.lock);
|
||||
l4_mutex_lock(&wake_tasks.wake_list_lock);
|
||||
list_attach(task_list, &wake_tasks.head, wake_tasks.end);
|
||||
l4_mutex_unlock(&wake_tasks.wake_list_lock);
|
||||
|
||||
/*
|
||||
* Send ipc to handle_request
|
||||
* thread to send wake signals
|
||||
*/
|
||||
printf("sending ipc %d to thread %d\n", L4_IPC_TAG_TIMER_WAKE_THREADS, tid_ipc_handler);
|
||||
l4_send(tid_ipc_handler,L4_IPC_TAG_TIMER_WAKE_THREADS);
|
||||
}
|
||||
}
|
||||
@@ -266,17 +267,16 @@ void task_wake(void)
|
||||
list_foreach_removable_struct(struct_ptr, temp_ptr,
|
||||
&wake_tasks.head, list) {
|
||||
/* Remove task from wake list */
|
||||
l4_mutex_lock(&wake_tasks.lock);
|
||||
l4_mutex_lock(&wake_tasks.wake_list_lock);
|
||||
list_remove(&struct_ptr->list);
|
||||
l4_mutex_unlock(&wake_tasks.lock);
|
||||
l4_mutex_unlock(&wake_tasks.wake_list_lock);
|
||||
|
||||
/* Set sender correctly */
|
||||
l4_set_sender(struct_ptr->tid);
|
||||
|
||||
#if 0
|
||||
printf("waking thread at time %x\n",
|
||||
(unsigned int)timer[SLEEP_WAKE_TIMER].count);
|
||||
#endif
|
||||
printf("%s : Waking thread 0x%x at time 0x%x\n", __CONTAINER_NAME__,
|
||||
struct_ptr->tid, global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
/* send wake ipc */
|
||||
if ((ret = l4_ipc_return(struct_ptr->retval)) < 0) {
|
||||
printf("%s: IPC return error: %d.\n",
|
||||
@@ -302,17 +302,17 @@ int timer_setup_devices(void)
|
||||
|
||||
for (int i = 0; i < TIMERS_TOTAL; i++) {
|
||||
/* initialize timer */
|
||||
timer_struct_init(&timer[i],(unsigned long)l4_new_virtual(1) );
|
||||
timer_struct_init(&global_timer[i],(unsigned long)l4_new_virtual(1) );
|
||||
|
||||
/* Map timer to a virtual address region */
|
||||
if (IS_ERR(l4_map((void *)__pfn_to_addr(timer[i].cap.start),
|
||||
(void *)timer[i].base, timer[i].cap.size,
|
||||
if (IS_ERR(l4_map((void *)__pfn_to_addr(global_timer[i].cap.start),
|
||||
(void *)global_timer[i].base, global_timer[i].cap.size,
|
||||
MAP_USR_IO,
|
||||
self_tid()))) {
|
||||
printf("%s: FATAL: Failed to map TIMER device "
|
||||
"%d to a virtual address\n",
|
||||
__CONTAINER_NAME__,
|
||||
cap_devnum(&timer[i].cap));
|
||||
cap_devnum(&global_timer[i].cap));
|
||||
BUG();
|
||||
}
|
||||
|
||||
@@ -323,7 +323,7 @@ int timer_setup_devices(void)
|
||||
* itself as its irq handler, initiate the timer and
|
||||
* wait on irqs.
|
||||
*/
|
||||
if ((err = thread_create(timer_irq_handler, &timer[i],
|
||||
if ((err = thread_create(timer_irq_handler, &global_timer[i],
|
||||
TC_SHARE_SPACE,
|
||||
&tptr)) < 0) {
|
||||
printf("FATAL: Creation of irq handler "
|
||||
@@ -404,13 +404,23 @@ void task_sleep(l4id_t tid, unsigned long seconds, int ret)
|
||||
struct link *vector;
|
||||
|
||||
/* can overflow happen here?, timer is in 32bit mode */
|
||||
seconds += timer[SLEEP_WAKE_TIMER].count;
|
||||
seconds += global_timer[SLEEP_WAKE_TIMER].count;
|
||||
|
||||
printf("sleep wake timer lock is present at address %lx\n",
|
||||
( (unsigned long)&global_timer[SLEEP_WAKE_TIMER].task_list_lock.lock));
|
||||
|
||||
vector = find_bucket_list(seconds);
|
||||
|
||||
l4_mutex_lock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
printf("Acquiring lock for sleep wake timer\n");
|
||||
l4_mutex_lock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
printf("got lock for sleep wake timer\n");
|
||||
|
||||
list_insert(&task->list, vector);
|
||||
l4_mutex_unlock(&timer[SLEEP_WAKE_TIMER].lock);
|
||||
|
||||
printf("Releasing lock for sleep wake timer\n");
|
||||
l4_mutex_unlock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock);
|
||||
printf("released lock for sleep wake timer\n");
|
||||
|
||||
}
|
||||
|
||||
void handle_requests(void)
|
||||
@@ -448,7 +458,11 @@ void handle_requests(void)
|
||||
switch (tag) {
|
||||
/* Return time in seconds, since the timer was started */
|
||||
case L4_IPC_TAG_TIMER_GETTIME:
|
||||
mr[0] = timer[SLEEP_WAKE_TIMER].count;
|
||||
printf("%s: Got get time request from thread 0x%x "
|
||||
" at time = 0x%x\n", __CONTAINER_NAME__,
|
||||
senderid, global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
write_mr(2, global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
/* Reply */
|
||||
if ((ret = l4_ipc_return(ret)) < 0) {
|
||||
@@ -458,8 +472,11 @@ void handle_requests(void)
|
||||
break;
|
||||
|
||||
case L4_IPC_TAG_TIMER_SLEEP:
|
||||
printf("%s: Got sleep request from thread 0x%x, duration %d\n", __CONTAINER_NAME__,
|
||||
senderid, mr[0]);
|
||||
printf("%s: Got sleep request from thread 0x%x "
|
||||
"for 0x%x seconds at 0x%x seconds\n",
|
||||
__CONTAINER_NAME__, senderid, mr[0],
|
||||
global_timer[SLEEP_WAKE_TIMER].count);
|
||||
|
||||
if (mr[0] > 0) {
|
||||
task_sleep(senderid, mr[0], ret);
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ struct keyboard_state{
|
||||
};
|
||||
|
||||
/* Common functions */
|
||||
void kmi_irq_handler(unsigned long base);
|
||||
void kmi_rx_irq_enable(unsigned long base);
|
||||
int kmi_data_read(unsigned long base);
|
||||
|
||||
/* Keyboard specific calls */
|
||||
|
||||
@@ -17,7 +17,7 @@ void timer_load(u32 val, unsigned long timer_base);
|
||||
u32 timer_read(unsigned long timer_base);
|
||||
void timer_stop(unsigned long timer_base);
|
||||
void timer_init_oneshot(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base);
|
||||
void timer_init(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base, u32 load_value);
|
||||
void timer_init(unsigned long timer_base, u32 load_value);
|
||||
|
||||
#endif /* __LIBDEV_TIMER_H__ */
|
||||
|
||||
@@ -135,7 +135,7 @@ struct keyboard_key keymap_uk2[256] = {
|
||||
/* 40 */ {';',':',0,0},
|
||||
/* 41 */ {'\'','@',0,0},
|
||||
/* 42 */ {0,0,0,0},
|
||||
/* 43 */ {KEYCODE_RETURN,0,KEYCODE_ENTER,0},
|
||||
/* 43 */ {'\n','\n',KEYCODE_ENTER,0},
|
||||
/* 44 */ {KEYCODE_LSHIFT,0,0,0},
|
||||
/* 45 */ {'\\','|',0,0},
|
||||
/* 46 */ {'z','Z',0,0},
|
||||
|
||||
@@ -9,11 +9,10 @@
|
||||
#include "kmi.h"
|
||||
#include "keymap.h"
|
||||
|
||||
/*
|
||||
* Reading Rx data automatically clears the RXITR
|
||||
*/
|
||||
void kmi_irq_handler(unsigned long base)
|
||||
/* Enable Rx irq */
|
||||
void kmi_rx_irq_enable(unsigned long base)
|
||||
{
|
||||
*(volatile unsigned long *)(base + PL050_KMICR) = KMI_RXINTR;
|
||||
}
|
||||
|
||||
int kmi_data_read(unsigned long base)
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
#define MOUSE_DATA_ENABLE 0xF4 // Mouse enable
|
||||
|
||||
/* Common functions */
|
||||
void kmi_irq_handler(unsigned long base);
|
||||
void kmi_rx_irq_enable(unsigned long base);
|
||||
int kmi_data_read(unsigned long base);
|
||||
|
||||
/* Keyboard specific calls */
|
||||
|
||||
@@ -36,7 +36,7 @@ void timer_stop(unsigned long timer_base)
|
||||
write(0, timer_base + SP804_CTRL);
|
||||
}
|
||||
|
||||
void timer_init_periodic(unsigned long timer_base)
|
||||
void timer_init_periodic(unsigned long timer_base, u32 load_value)
|
||||
{
|
||||
volatile u32 reg = read(timer_base + SP804_CTRL);
|
||||
|
||||
@@ -44,8 +44,11 @@ void timer_init_periodic(unsigned long timer_base)
|
||||
|
||||
write(reg, timer_base + SP804_CTRL);
|
||||
|
||||
/* 1 tick per usec, 1 irq per msec */
|
||||
timer_load(1000, timer_base);
|
||||
if (load_value)
|
||||
timer_load(load_value, timer_base);
|
||||
else
|
||||
/* 1 tick per usec, 1 irq per msec */
|
||||
timer_load(1000, timer_base);
|
||||
}
|
||||
|
||||
void timer_init_oneshot(unsigned long timer_base)
|
||||
@@ -58,8 +61,8 @@ void timer_init_oneshot(unsigned long timer_base)
|
||||
write(reg, timer_base + SP804_CTRL);
|
||||
}
|
||||
|
||||
void timer_init(unsigned long timer_base)
|
||||
void timer_init(unsigned long timer_base, u32 load_value)
|
||||
{
|
||||
timer_stop(timer_base);
|
||||
timer_init_periodic(timer_base);
|
||||
timer_init_periodic(timer_base, load_value);
|
||||
}
|
||||
|
||||
@@ -56,8 +56,8 @@ void timer_start(unsigned long timer_base);
|
||||
void timer_load(u32 loadval, unsigned long timer_base);
|
||||
u32 timer_read(unsigned long timer_base);
|
||||
void timer_stop(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base);
|
||||
void timer_init_periodic(unsigned long timer_base, u32 load_value);
|
||||
void timer_init_oneshot(unsigned long timer_base);
|
||||
void timer_init(unsigned long timer_base);
|
||||
void timer_init(unsigned long timer_base, u32 load_value);
|
||||
|
||||
#endif /* __SP804_TIMER_H__ */
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <l4/api/mutex.h>
|
||||
|
||||
struct l4_mutex {
|
||||
unsigned int lock;
|
||||
int lock;
|
||||
} __attribute__((aligned(sizeof(int))));
|
||||
|
||||
|
||||
@@ -28,11 +28,15 @@ int l4_mutex_unlock(struct l4_mutex *m);
|
||||
#define L4_MUTEX_CONTENDED -1
|
||||
#define L4_MUTEX_SUCCESS 0
|
||||
|
||||
/* Mutex states - Any valid tid value is a locked state */
|
||||
/*
|
||||
* Mutex states:
|
||||
* Unlocked = -1, locked = 0, anything above 0 tells
|
||||
* number of contended threads
|
||||
*/
|
||||
#define L4_MUTEX_LOCKED 0
|
||||
#define L4_MUTEX_UNLOCKED -1
|
||||
#define L4_MUTEX(m) \
|
||||
struct l4_mutex m = { L4_MUTEX_UNLOCKED }
|
||||
|
||||
|
||||
|
||||
#endif /* __L4_MUTEX_H__ */
|
||||
|
||||
@@ -15,9 +15,10 @@ int __l4_mutex_lock(void *m, l4id_t tid)
|
||||
loop:
|
||||
__asm__ __volatile__(
|
||||
"ldrex %0, [%1]\n"
|
||||
: "=r"(tmp)
|
||||
: "=&r"(tmp)
|
||||
: "r"(m)
|
||||
);
|
||||
: "memory"
|
||||
);
|
||||
|
||||
if(tmp != L4_MUTEX_UNLOCKED)
|
||||
ret = L4_MUTEX_CONTENDED;
|
||||
@@ -79,19 +80,19 @@ int __l4_mutex_unlock(void *m, l4id_t tid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u8 l4_atomic_dest_readb(u8 *location)
|
||||
u8 l4_atomic_dest_readb(unsigned long *location)
|
||||
{
|
||||
unsigned int tmp, res;
|
||||
__asm__ __volatile__ (
|
||||
"1: \n"
|
||||
"ldrex %0, [%2] \n"
|
||||
"strex %1, %3, [%2] \n"
|
||||
"teq %1, #0 \n"
|
||||
"bne 1b \n"
|
||||
"1: \n"
|
||||
" ldrex %0, [%2] \n"
|
||||
" strex %1, %3, [%2] \n"
|
||||
" teq %1, #0 \n"
|
||||
" bne 1b \n"
|
||||
: "=&r"(tmp), "=&r"(res)
|
||||
: "r"(location), "r"(0)
|
||||
: "cc", "memory"
|
||||
);
|
||||
);
|
||||
|
||||
return (u8)tmp;
|
||||
}
|
||||
|
||||
@@ -7,42 +7,35 @@
|
||||
|
||||
/*
|
||||
* @r0 = address of mutex word
|
||||
* @r1 = unique tid of current thread
|
||||
*/
|
||||
BEGIN_PROC(__l4_mutex_lock)
|
||||
1:
|
||||
ldrex r2, [r0] @ Load value
|
||||
cmp r2, #L4_MUTEX_UNLOCKED @ Decide what state lock will be if we succeed in a store
|
||||
movne r2, #L4_MUTEX_CONTENDED
|
||||
moveq r2, #L4_MUTEX_SUCCESS
|
||||
ldrex r1, [r0] @ Load value
|
||||
add r1, r1, #1 @ Add 1
|
||||
strex r3, r1, [r0] @ Store prospective lock state
|
||||
cmp r3, #0 @ If not successful
|
||||
@ No WFE. Whatif this were between 2 threads running on the same cpu
|
||||
bne 1b @ Retry and decide again on the prospective lock state.
|
||||
bne 1b @ Retry and decide again on the prospective lock state. No WFE as this would be a problem on single cpu
|
||||
dsb
|
||||
|
||||
cmp r1, #L4_MUTEX_LOCKED @ We succeeded in store, but are we a locker or a contender?
|
||||
movne r2, #L4_MUTEX_CONTENDED
|
||||
moveq r2, #L4_MUTEX_SUCCESS
|
||||
mov r0, r2
|
||||
mov pc, lr
|
||||
END_PROC(__l4_mutex_lock)
|
||||
|
||||
/*
|
||||
* @r0 = address of mutex word
|
||||
* @r1 = unique tid of current thread
|
||||
*/
|
||||
BEGIN_PROC(__l4_mutex_unlock)
|
||||
dsb
|
||||
push {r4}
|
||||
mov r4, #L4_MUTEX_UNLOCKED
|
||||
mov r3, #L4_MUTEX_UNLOCKED
|
||||
1:
|
||||
ldrex r2, [r0]
|
||||
cmp r2, r1
|
||||
moveq r3, #L4_MUTEX_SUCCESS
|
||||
movne r3, #L4_MUTEX_CONTENDED
|
||||
strex r2, r4, [r0]
|
||||
ldrex r1, [r0]
|
||||
strex r2, r3, [r0]
|
||||
cmp r2, #0
|
||||
bne 1b
|
||||
mov r0, r3
|
||||
pop {r4}
|
||||
mov r0, r1
|
||||
mov pc, lr
|
||||
END_PROC(__l4_mutex_unlock)
|
||||
|
||||
|
||||
|
||||
@@ -51,8 +51,8 @@
|
||||
* - Whether this is the best design - time will tell.
|
||||
*/
|
||||
|
||||
extern int __l4_mutex_lock(void *word, l4id_t tid);
|
||||
extern int __l4_mutex_unlock(void *word, l4id_t tid);
|
||||
extern int __l4_mutex_lock(void *word);
|
||||
extern int __l4_mutex_unlock(void *word);
|
||||
|
||||
void l4_mutex_init(struct l4_mutex *m)
|
||||
{
|
||||
@@ -61,10 +61,9 @@ void l4_mutex_init(struct l4_mutex *m)
|
||||
|
||||
int l4_mutex_lock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
int err;
|
||||
|
||||
while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED) {
|
||||
while(__l4_mutex_lock(&m->lock) != L4_MUTEX_SUCCESS) {
|
||||
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_LOCK)) < 0) {
|
||||
printf("%s: Error: %d\n", __FUNCTION__, err);
|
||||
return err;
|
||||
@@ -75,15 +74,14 @@ int l4_mutex_lock(struct l4_mutex *m)
|
||||
|
||||
int l4_mutex_unlock(struct l4_mutex *m)
|
||||
{
|
||||
l4id_t tid = self_tid();
|
||||
int err;
|
||||
int err, contended;
|
||||
|
||||
if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED) {
|
||||
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK)) < 0) {
|
||||
if ((contended = __l4_mutex_unlock(m))) {
|
||||
if ((err = l4_mutex_control(&m->lock,
|
||||
contended | L4_MUTEX_UNLOCK)) < 0) {
|
||||
printf("%s: Error: %d\n", __FUNCTION__, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,22 @@
|
||||
#define MUTEX_CONTROL_LOCK L4_MUTEX_LOCK
|
||||
#define MUTEX_CONTROL_UNLOCK L4_MUTEX_UNLOCK
|
||||
|
||||
#define MUTEX_CONTROL_OPMASK L4_MUTEX_OPMASK
|
||||
|
||||
#define mutex_operation(x) ((x) & MUTEX_CONTROL_OPMASK)
|
||||
#define mutex_contenders(x) ((x) & ~MUTEX_CONTROL_OPMASK)
|
||||
|
||||
#include <l4/lib/wait.h>
|
||||
#include <l4/lib/list.h>
|
||||
#include <l4/lib/mutex.h>
|
||||
|
||||
/*
|
||||
* Contender threashold is the total number of contenders
|
||||
* who are expected to sleep on the mutex, and will be waited
|
||||
* for a wakeup.
|
||||
*/
|
||||
struct mutex_queue {
|
||||
int contenders;
|
||||
unsigned long physical;
|
||||
struct link list;
|
||||
struct waitqueue_head wqh_contenders;
|
||||
@@ -39,7 +50,8 @@ void init_mutex_queue_head(struct mutex_queue_head *mqhead);
|
||||
|
||||
#endif
|
||||
|
||||
#define L4_MUTEX_LOCK 0
|
||||
#define L4_MUTEX_UNLOCK 1
|
||||
#define L4_MUTEX_OPMASK 0xF0000000
|
||||
#define L4_MUTEX_LOCK 0x10000000
|
||||
#define L4_MUTEX_UNLOCK 0x20000000
|
||||
|
||||
#endif /* __MUTEX_CONTROL_H__*/
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include INC_ARCH(asm.h)
|
||||
|
||||
/* Abort debugging conditions */
|
||||
//#define DEBUG_ABORTS
|
||||
// #define DEBUG_ABORTS
|
||||
#if defined (DEBUG_ABORTS)
|
||||
#define dbg_abort(...) printk(__VA_ARGS__)
|
||||
#else
|
||||
|
||||
@@ -8,7 +8,11 @@
|
||||
#endif
|
||||
|
||||
phys_ram_start = PLATFORM_PHYS_MEM_START;
|
||||
|
||||
#if !defined(kernel_offset)
|
||||
kernel_offset = KERNEL_AREA_START - phys_ram_start;
|
||||
#endif
|
||||
|
||||
kernel_physical = 0x8000 + phys_ram_start;
|
||||
kernel_virtual = kernel_physical + kernel_offset;
|
||||
|
||||
@@ -47,8 +51,8 @@ SECTIONS
|
||||
. = ALIGN(16K);
|
||||
_start_vectors = .;
|
||||
*(.data.vectors)
|
||||
_end_vectors = .;
|
||||
. = ALIGN(4K);
|
||||
_end_vectors = .;
|
||||
_start_kip = .;
|
||||
*(.data.kip)
|
||||
. = ALIGN(4K);
|
||||
@@ -71,9 +75,6 @@ SECTIONS
|
||||
*(.bss)
|
||||
}
|
||||
. = ALIGN(4K);
|
||||
. += PAGE_SIZE * 2; /* This is required as the link counter does not seem
|
||||
* to increment for the bss section
|
||||
* TODO: Change this with PAGE_SIZE */
|
||||
|
||||
/* Below part is to be discarded after boot */
|
||||
_start_init = .;
|
||||
|
||||
@@ -88,4 +88,8 @@ u32 gic_get_priority(u32 irq);
|
||||
|
||||
void gic_dummy_init(void);
|
||||
|
||||
void gic_eoi_irq(l4id_t irq);
|
||||
|
||||
void gic_print_cpu(void);
|
||||
|
||||
#endif /* __GIC_H__ */
|
||||
|
||||
@@ -32,6 +32,16 @@ struct pager {
|
||||
unsigned long stack_address;
|
||||
unsigned long memsize;
|
||||
struct cap_list cap_list;
|
||||
|
||||
/*
|
||||
* Section markings,
|
||||
* We dont care for other types of sections,
|
||||
* RO will be included inside RX.
|
||||
*/
|
||||
unsigned long rw_sections_start;
|
||||
unsigned long rw_sections_end;
|
||||
unsigned long rx_sections_start;
|
||||
unsigned long rx_sections_end;
|
||||
};
|
||||
|
||||
|
||||
@@ -72,6 +82,16 @@ struct pager_info {
|
||||
unsigned long start_address;
|
||||
unsigned long stack_address;
|
||||
|
||||
/*
|
||||
* Section markings,
|
||||
* We dont care for other types of sections,
|
||||
* RO will be included inside RX.
|
||||
*/
|
||||
unsigned long rw_sections_start;
|
||||
unsigned long rw_sections_end;
|
||||
unsigned long rx_sections_start;
|
||||
unsigned long rx_sections_end;
|
||||
|
||||
/* Number of capabilities defined */
|
||||
int ncaps;
|
||||
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
/*
|
||||
* Generic irq handling definitions.
|
||||
*
|
||||
* Copyright (C) 2007 Bahadir Balban
|
||||
* Copyright (C) 2010 B Labs Ltd.
|
||||
*/
|
||||
#ifndef __GENERIC_IRQ_H__
|
||||
#define __GENERIC_IRQ_H__
|
||||
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/lib/wait.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_ARCH(types.h)
|
||||
|
||||
/* Represents none or spurious irq */
|
||||
#define IRQ_NIL 0xFFFFFFFF
|
||||
#define IRQ_NIL 0xFFFFFFFF /* -1 */
|
||||
#define IRQ_SPURIOUS 0xFFFFFFFE /* -2 */
|
||||
|
||||
/* Successful irq handling state */
|
||||
#define IRQ_HANDLED 0
|
||||
@@ -23,6 +25,7 @@ struct irq_chip_ops {
|
||||
l4id_t (*read_irq)(void *data);
|
||||
irq_op_t ack_and_mask;
|
||||
irq_op_t unmask;
|
||||
void (*set_cpu)(l4id_t irq, unsigned int cpumask);
|
||||
};
|
||||
|
||||
struct irq_chip {
|
||||
@@ -47,9 +50,6 @@ struct irq_desc {
|
||||
/* Notification slot for this irq */
|
||||
int task_notify_slot;
|
||||
|
||||
/* If user will ack this irq */
|
||||
int user_ack;
|
||||
|
||||
/* Waitqueue head for this irq */
|
||||
struct waitqueue_head wqh_irq;
|
||||
|
||||
@@ -72,10 +72,17 @@ static inline void irq_disable(int irq_index)
|
||||
{
|
||||
struct irq_desc *this_irq = irq_desc_array + irq_index;
|
||||
struct irq_chip *this_chip = this_irq->chip;
|
||||
|
||||
this_chip->ops.ack_and_mask(irq_index - this_chip->start);
|
||||
}
|
||||
|
||||
static inline void irq_set_cpu(int irq_index, unsigned int cpumask)
|
||||
{
|
||||
struct irq_desc *this_irq = irq_desc_array + irq_index;
|
||||
struct irq_chip *this_chip = this_irq->chip;
|
||||
|
||||
this_chip->ops.set_cpu(irq_index - this_chip->start, cpumask);
|
||||
}
|
||||
|
||||
int irq_register(struct ktcb *task, int notify_slot, l4id_t irq_index);
|
||||
int irq_thread_notify(struct irq_desc *desc);
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ static inline struct ktcb *current_task(void)
|
||||
#define current current_task()
|
||||
#define need_resched (current->ts_need_resched)
|
||||
|
||||
#define SCHED_RQ_TOTAL 2
|
||||
#define SCHED_RQ_TOTAL 4
|
||||
|
||||
/* A basic runqueue */
|
||||
struct runqueue {
|
||||
@@ -52,11 +52,28 @@ struct runqueue {
|
||||
unsigned int total; /* Total tasks */
|
||||
};
|
||||
|
||||
/*
|
||||
* Hints and flags to scheduler
|
||||
*/
|
||||
enum sched_flags {
|
||||
/* Schedule idle at a convenient time */
|
||||
SCHED_RUN_IDLE = (1 << 0),
|
||||
};
|
||||
|
||||
/* Contains per-container scheduling structures */
|
||||
struct scheduler {
|
||||
unsigned int flags;
|
||||
unsigned int task_select_ctr;
|
||||
struct runqueue sched_rq[SCHED_RQ_TOTAL];
|
||||
|
||||
/* Regular runqueues */
|
||||
struct runqueue *rq_runnable;
|
||||
struct runqueue *rq_expired;
|
||||
|
||||
/* Real-time runqueues */
|
||||
struct runqueue *rq_rt_runnable;
|
||||
struct runqueue *rq_rt_expired;
|
||||
|
||||
struct ktcb *idle_task;
|
||||
|
||||
/* Total priority of all tasks in container */
|
||||
|
||||
@@ -20,4 +20,31 @@
|
||||
#define smp_get_cpuid() 0
|
||||
#endif
|
||||
|
||||
/* All cpus in the SMP system */
|
||||
static inline unsigned int cpu_mask_all(void)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
|
||||
for (int i = 0; i < CONFIG_NCPU; i++)
|
||||
mask |= (1 << i);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* All but not self */
|
||||
static inline unsigned int cpu_mask_others(void)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
|
||||
for (int i = 0; i < CONFIG_NCPU; i++)
|
||||
if (i != smp_get_cpuid())
|
||||
mask |= (1 << i);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* Only self */
|
||||
static inline unsigned int cpu_mask_self(void)
|
||||
{
|
||||
return 1 << smp_get_cpuid();
|
||||
}
|
||||
|
||||
#endif /* __GENERIC_SMP_H__ */
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#define TASK_SUSPENDING (1 << 1)
|
||||
#define TASK_RESUMING (1 << 2)
|
||||
#define TASK_PENDING_SIGNAL (TASK_SUSPENDING)
|
||||
#define TASK_REALTIME (1 << 5)
|
||||
|
||||
/*
|
||||
* This is to indicate a task (either current or one of
|
||||
@@ -109,7 +110,6 @@ struct ktcb {
|
||||
enum task_state state;
|
||||
|
||||
struct link task_list; /* Global task list. */
|
||||
struct ktcb_list child_exit_list;
|
||||
|
||||
/* UTCB related, see utcb.txt in docs */
|
||||
unsigned long utcb_address; /* Virtual ref to task's utcb area */
|
||||
|
||||
@@ -16,5 +16,6 @@ struct timeval {
|
||||
extern volatile u32 jiffies;
|
||||
|
||||
int do_timer_irq(void);
|
||||
int secondary_timer_irq(void);
|
||||
|
||||
#endif /* __GENERIC_TIME_H__ */
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
/*
|
||||
* Copyright (C) 2010 B Labs Ltd.
|
||||
*
|
||||
* By Bahadir Balban
|
||||
*/
|
||||
#ifndef __IPI_H__
|
||||
#define __IPI_H__
|
||||
|
||||
/*
|
||||
* Copyright 2010 B Labs.Ltd.
|
||||
*
|
||||
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
|
||||
*
|
||||
* Description:
|
||||
*/
|
||||
|
||||
|
||||
#include <l4/generic/irq.h>
|
||||
|
||||
int ipi_handler(struct irq_desc *desc);
|
||||
|
||||
|
||||
#define IPI_TIMER_EVENT 0
|
||||
|
||||
#endif /* __IPI_H__ */
|
||||
|
||||
@@ -34,7 +34,7 @@ static inline void smp_start_cores(void) {}
|
||||
|
||||
void init_smp(void);
|
||||
void arch_smp_spin(void);
|
||||
void arch_send_ipi(u32 cpu, int ipi);
|
||||
void smp_send_ipi(unsigned int cpumask, int ipi_num);
|
||||
void platform_smp_init(int ncpus);
|
||||
int platform_smp_start(int cpu, void (*start)(int));
|
||||
void secondary_init_platform(void);
|
||||
|
||||
@@ -69,10 +69,13 @@
|
||||
#if defined (CONFIG_CPU_ARM11MPCORE) || defined (CONFIG_CPU_CORTEXA9)
|
||||
#define IRQ_TIMER0 MPCORE_GIC_IRQ_TIMER01
|
||||
#define IRQ_TIMER1 MPCORE_GIC_IRQ_TIMER23
|
||||
#define IRQ_KEYBOARD0 MPCORE_GIC_IRQ_KMI0
|
||||
#define IRQ_MOUSE0 MPCORE_GIC_IRQ_KMI1
|
||||
#else
|
||||
#define IRQ_TIMER0 EB_IRQ_TIMER01
|
||||
#define IRQ_TIMER1 EB_IRQ_TIMER23
|
||||
#define IRQ_KEYBOARD0 EB_IRQ_KMI0
|
||||
#define IRQ_MOUSE0 EB_IRQ_KMI1
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* __PLATFORM_IRQ_H__ */
|
||||
|
||||
@@ -19,7 +19,11 @@
|
||||
#define PLATFORM_GIC3_BASE 0x10060000 /* GIC 3 */
|
||||
#define PLATFORM_GIC4_BASE 0x10070000 /* GIC 4 */
|
||||
|
||||
#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (13 * DEVICE_PAGE))
|
||||
/*
|
||||
* Virtual device offsets for EB platform - starting from
|
||||
* the last common realview virtual device offset
|
||||
*/
|
||||
#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (14 * DEVICE_PAGE))
|
||||
|
||||
#if defined (CONFIG_CPU_CORTEXA9)
|
||||
#define MPCORE_PRIVATE_BASE 0x1F000000
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#define PLATFORM_TIMER2_BASE 0x10018000 /* TIMER 4-5 */
|
||||
#define PLATFORM_TIMER3_BASE 0x10019000 /* TIMER 6-7 */
|
||||
#define PLATFORM_SYSCTRL1_BASE 0x1001A000 /* System controller 1 */
|
||||
#define PLATFORM_CLCD0_BASE 0x10020000 /* CLCD */
|
||||
#define PLATFORM_GIC0_BASE 0x1E000000 /* GIC 0 */
|
||||
#define PLATFORM_GIC1_BASE 0x1E010000 /* GIC 1 */
|
||||
#define PLATFORM_GIC2_BASE 0x1E020000 /* GIC 2 */
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#ifndef __PB926_PLATFORM_H__
|
||||
#define __PB926_PLATFORM_H__
|
||||
/*
|
||||
* Platform specific ties between drivers and generic APIs used by the kernel.
|
||||
* E.g. system timer and console.
|
||||
@@ -7,6 +5,9 @@
|
||||
* Copyright (C) Bahadir Balban 2007
|
||||
*/
|
||||
|
||||
#ifndef __PB926_PLATFORM_H__
|
||||
#define __PB926_PLATFORM_H__
|
||||
|
||||
void platform_timer_start(void);
|
||||
|
||||
#endif /* __PB926_PLATFORM_H__ */
|
||||
|
||||
@@ -24,5 +24,8 @@
|
||||
#define IRQ_TIMER2 73
|
||||
#define IRQ_TIMER3 74
|
||||
|
||||
#define IRQ_KEYBOARD0 52
|
||||
#define IRQ_MOUSE0 53
|
||||
|
||||
#endif /* __PLATFORM_IRQ_H__ */
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#define PLATFORM_TIMER2_BASE 0x10018000 /* Timers 4 and 5 */
|
||||
#define PLATFORM_TIMER3_BASE 0x10019000 /* Timers 6 and 7 */
|
||||
#define PLATFORM_SYSCTRL1_BASE 0x1001A000 /* System controller1 */
|
||||
#define PLATFORM_CLCD0_BASE 0x10020000 /* CLCD */
|
||||
#define PLATFORM_GIC1_BASE 0x1E000000 /* GIC 1 */
|
||||
#define PLATFORM_GIC2_BASE 0x1E010000 /* GIC 2 */
|
||||
#define PLATFORM_GIC3_BASE 0x1E020000 /* GIC 3 */
|
||||
|
||||
@@ -20,14 +20,16 @@
|
||||
#define IRQ_UART1 38
|
||||
#define IRQ_UART2 39
|
||||
#define IRQ_UART3 40
|
||||
|
||||
#define IRQ_KEYBOARD0 44
|
||||
#define IRQ_MOUSE0 45
|
||||
#define IRQ_CLCD0 46
|
||||
|
||||
/*
|
||||
* Interrupt Distribution:
|
||||
* 0-31: SI, provided by distributed interrupt controller
|
||||
* 32-63: Externel peripheral interrupts
|
||||
* 64-71: Tile site interrupt
|
||||
* 72-95: Externel peripheral interrupts
|
||||
* Versatile Express A9 Interrupt Distribution:
|
||||
* 0 - 31: SI, provided by distributed interrupt controller
|
||||
* 32 - 74: Irqs from Motherboard (0 - 42)
|
||||
* 75- 81: Test chip interrupts
|
||||
*/
|
||||
|
||||
#endif /* __PLATFORM_IRQ_H__ */
|
||||
|
||||
@@ -22,14 +22,24 @@
|
||||
#define PLATFORM_TIMER3_BASE 0x10019000 /* Timers 2 and 3 */
|
||||
#define PLATFORM_SYSCTRL1_BASE 0x1001A000 /* System controller1 */
|
||||
|
||||
#define PLATFORM_CLCD0_BASE 0x1001F000 /* CLCD */
|
||||
|
||||
#define PLATFORM_GIC0_BASE 0x1E000000 /* GIC 0 */
|
||||
|
||||
#define MPCORE_PRIVATE_BASE 0x1E000000
|
||||
#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (13 * DEVICE_PAGE))
|
||||
|
||||
#define SCU_BASE MPCORE_PRIVATE_BASE
|
||||
#define SCU_VBASE MPCORE_PRIVATE_VBASE
|
||||
#define GIC0_CPU_VBASE (MPCORE_PRIVATE_VBASE + 0x100)
|
||||
#define GIC0_DIST_VBASE (MPCORE_PRIVATE_VBASE + 0x1000)
|
||||
|
||||
/*
|
||||
* Virtual device offsets for Versatile Express A9
|
||||
* Offsets start from the last common realview virtual
|
||||
* device offset
|
||||
*/
|
||||
#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (14 * DEVICE_PAGE))
|
||||
|
||||
/* Add userspace devices here as they become necessary for irqs */
|
||||
|
||||
#endif /* __PLATFORM_PBA9_OFFSETS_H__ */
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
*/
|
||||
#define PLATFORM_SYSTEM_REGISTERS 0x10000000 /* System registers */
|
||||
#define PLATFORM_SYSCTRL_BASE 0x10001000 /* System controller0 */
|
||||
#define PLATFORM_KEYBOARD0_BASE 0x10006000 /* Keyboard */
|
||||
#define PLATFORM_MOUSE0_BASE 0x10007000 /* Mouse */
|
||||
#define PLATFORM_UART0_BASE 0x10009000 /* Console port (UART0) */
|
||||
#define PLATFORM_UART1_BASE 0x1000A000 /* Console port (UART1) */
|
||||
#define PLATFORM_UART2_BASE 0x1000B000 /* Console port (UART2) */
|
||||
@@ -43,12 +45,15 @@
|
||||
#define PLATFORM_TIMER0_VBASE (IO_AREA0_VADDR + (4 * DEVICE_PAGE))
|
||||
#define PLATFORM_GIC0_VBASE (IO_AREA0_VADDR + (5 * DEVICE_PAGE))
|
||||
#define PLATFORM_GIC1_VBASE (IO_AREA0_VADDR + (7 * DEVICE_PAGE))
|
||||
#define PLATFORM_GIC2_VBASE (IO_AREA0_VADDR + (9 * DEVICE_PAGE))
|
||||
#define PLATFORM_GIC3_VBASE (IO_AREA0_VADDR + (11 * DEVICE_PAGE))
|
||||
#define PLATFORM_GIC2_VBASE (IO_AREA0_VADDR + (8 * DEVICE_PAGE))
|
||||
#define PLATFORM_GIC3_VBASE (IO_AREA0_VADDR + (9 * DEVICE_PAGE))
|
||||
|
||||
/* Add userspace devices here as they become necessary for irqs */
|
||||
|
||||
/* Add size of various user space devices, to be used in capability generation */
|
||||
#define PLATFORM_TIMER1_VBASE (IO_AREA0_VADDR + (10 * DEVICE_PAGE))
|
||||
#define PLATFORM_KEYBOARD0_VBASE (IO_AREA0_VADDR + (11 * DEVICE_PAGE))
|
||||
#define PLATFORM_MOUSE0_VBASE (IO_AREA0_VADDR + (12 * DEVICE_PAGE))
|
||||
#define PLATFORM_CLCD0_VBASE (IO_AREA0_VADDR + (13 * DEVICE_PAGE))
|
||||
|
||||
/* The SP810 system controller offsets */
|
||||
#define SP810_BASE PLATFORM_SYSCTRL_VBASE
|
||||
@@ -59,6 +64,9 @@
|
||||
#define PLATFORM_UART2_SIZE DEVICE_PAGE
|
||||
#define PLATFORM_UART3_SIZE DEVICE_PAGE
|
||||
#define PLATFORM_TIMER1_SIZE DEVICE_PAGE
|
||||
#define PLATFORM_KEYBOARD0_SIZE DEVICE_PAGE
|
||||
#define PLATFORM_MOUSE0_SIZE DEVICE_PAGE
|
||||
#define PLATFORM_CLCD0_SIZE DEVICE_PAGE
|
||||
|
||||
#endif /* __PLATFORM_REALVIEW_OFFSETS_H__ */
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
import os, sys, shelve, glob
|
||||
from os.path import join
|
||||
from tools.pyelf.elfsize import *
|
||||
from tools.pyelf.elf_section_info import *
|
||||
|
||||
PROJRELROOT = '../../'
|
||||
|
||||
@@ -22,6 +23,11 @@ from scripts.linux.build_atags import *
|
||||
from pack import *
|
||||
from packall import *
|
||||
|
||||
def fill_pager_section_markers(cont, pager_binary):
|
||||
cont.pager_rw_section_start, cont.pager_rw_section_end, \
|
||||
cont.pager_rx_section_start, cont.pager_rx_section_end = \
|
||||
elf_loadable_section_info(join(PROJROOT, pager_binary))
|
||||
|
||||
def build_linux_container(config, projpaths, container):
|
||||
linux_builder = LinuxBuilder(projpaths, container)
|
||||
linux_builder.build_linux(config)
|
||||
@@ -33,9 +39,12 @@ def build_linux_container(config, projpaths, container):
|
||||
|
||||
# Calculate and store size of pager
|
||||
pager_binary = \
|
||||
"cont" + str(container.id) + "/linux/linux-2.6.33/linux.elf"
|
||||
join(BUILDDIR, "cont" + str(container.id) +
|
||||
"/linux/linux-2.6.33/linux.elf")
|
||||
config.containers[container.id].pager_size = \
|
||||
conv_hex(elf_binary_size(join(BUILDDIR, pager_binary)))
|
||||
conv_hex(elf_binary_size(pager_binary))
|
||||
|
||||
fill_pager_section_markers(config.containers[container.id], pager_binary)
|
||||
|
||||
linux_container_packer = \
|
||||
LinuxContainerPacker(container, linux_builder, \
|
||||
@@ -70,9 +79,13 @@ def build_posix_container(config, projpaths, container):
|
||||
os.path.walk(builddir, glob_by_walk, ['*.elf', images])
|
||||
|
||||
# Calculate and store size of pager
|
||||
pager_binary = "cont" + str(container.id) + "/posix/mm0/mm0.elf"
|
||||
pager_binary = join(BUILDDIR,
|
||||
"cont" + str(container.id) + "/posix/mm0/mm0.elf")
|
||||
config.containers[container.id].pager_size = \
|
||||
conv_hex(elf_binary_size(join(BUILDDIR, pager_binary)))
|
||||
conv_hex(elf_binary_size(pager_binary))
|
||||
|
||||
print 'Find markers for ' + pager_binary
|
||||
fill_pager_section_markers(config.containers[container.id], pager_binary)
|
||||
|
||||
container_packer = DefaultContainerPacker(container, images)
|
||||
return container_packer.pack_container(config)
|
||||
@@ -89,9 +102,11 @@ def build_default_container(config, projpaths, container):
|
||||
os.path.walk(projdir, glob_by_walk, ['*.elf', images])
|
||||
|
||||
# Calculate and store size of pager
|
||||
pager_binary = "conts/" + container.name + "/main.elf"
|
||||
pager_binary = join(PROJROOT, "conts/" + container.name + "/main.elf")
|
||||
config.containers[container.id].pager_size = \
|
||||
conv_hex(elf_binary_size(join(PROJROOT, pager_binary)))
|
||||
conv_hex(elf_binary_size(pager_binary))
|
||||
|
||||
fill_pager_section_markers(config.containers[container.id], pager_binary)
|
||||
|
||||
container_packer = DefaultContainerPacker(container, images)
|
||||
return container_packer.pack_container(config)
|
||||
|
||||
@@ -67,6 +67,10 @@ pager_start = \
|
||||
\t\t\t.pager_lma = __pfn(CONFIG_CONT%(cn)d_PAGER_LOAD_ADDR),
|
||||
\t\t\t.pager_vma = __pfn(CONFIG_CONT%(cn)d_PAGER_VIRT_ADDR),
|
||||
\t\t\t.pager_size = __pfn(page_align_up(CONT%(cn)d_PAGER_MAPSIZE)),
|
||||
\t\t\t.rw_sections_start = %(rw_sec_start)s,
|
||||
\t\t\t.rw_sections_end = %(rw_sec_end)s,
|
||||
\t\t\t.rx_sections_start = %(rx_sec_start)s,
|
||||
\t\t\t.rx_sections_end = %(rx_sec_end)s,
|
||||
\t\t\t.ncaps = %(caps)d,
|
||||
\t\t\t.caps = {
|
||||
'''
|
||||
@@ -160,7 +164,12 @@ def generate_kernel_cinfo(config, cinfo_path):
|
||||
# Currently only these are considered as capabilities
|
||||
total_caps = c.virt_regions + c.phys_regions + len(c.caps)
|
||||
fbody += cinfo_start % (c.id, c.name)
|
||||
fbody += pager_start % { 'cn' : c.id, 'caps' : total_caps}
|
||||
fbody += pager_start % { 'cn' : c.id, 'caps' : total_caps,
|
||||
'rw_sec_start' : hex(c.pager_rw_section_start),
|
||||
'rw_sec_end' : hex(c.pager_rw_section_end),
|
||||
'rx_sec_start' : hex(c.pager_rx_section_start),
|
||||
'rx_sec_end' : hex(c.pager_rx_section_end),
|
||||
}
|
||||
cap_index = 0
|
||||
for mem_index in range(c.virt_regions):
|
||||
fbody += cap_virtmem % { 'capidx' : cap_index, 'cn' : c.id, 'vn' : mem_index }
|
||||
|
||||
@@ -23,12 +23,13 @@ from config.configuration import *
|
||||
map_list = (['EB', 'ARM1136', 'realview-eb', 'arm1136'],
|
||||
['EB', 'ARM11MPCORE', 'realview-eb-mpcore', 'arm11mpcore'],
|
||||
['EB', 'CORTEXA8', 'realview-eb', 'cortex-a8'],
|
||||
['EB', 'CORTEXA9', 'realview-pbx-a9', 'cortex-a9'],
|
||||
['PB926', 'ARM926', 'versatilepb', 'arm926'],
|
||||
['BEAGLE', 'CORTEXA8', 'beagle', 'cortex-a8'],
|
||||
['PBA9', 'CORTEXA9', 'realview-pbx-a9', 'cortex-a9'],
|
||||
['PBA8', 'CORTEXA8', 'realview-pb-a8', 'cortex-a8'])
|
||||
|
||||
data = \
|
||||
data_up = \
|
||||
'''
|
||||
cd build
|
||||
qemu-system-arm -s -S -kernel final.elf -nographic -M %s -cpu %s &
|
||||
@@ -36,6 +37,14 @@ arm-none-insight ; pkill qemu-system-arm
|
||||
cd ..
|
||||
'''
|
||||
|
||||
data_smp = \
|
||||
'''
|
||||
cd build
|
||||
qemu-system-arm -s -S -kernel final.elf -smp %d -nographic -M %s -cpu %s &
|
||||
arm-none-insight ; pkill qemu-system-arm
|
||||
cd ..
|
||||
'''
|
||||
|
||||
def build_qemu_cmdline_script():
|
||||
build_tools_folder = 'tools'
|
||||
qemu_cmd_file = join(build_tools_folder, 'run-qemu-insight')
|
||||
@@ -44,10 +53,14 @@ def build_qemu_cmdline_script():
|
||||
config = configuration_retrieve()
|
||||
cpu = config.cpu.upper()
|
||||
platform = config.platform.upper()
|
||||
smp = config.smp
|
||||
ncpu = config.ncpu
|
||||
|
||||
# Find appropriate flags
|
||||
for platform_type, cpu_type, mflag, cpuflag in map_list:
|
||||
for platform_type, cpu_type, m_flag, cpu_flag in map_list:
|
||||
if platform_type == platform and cpu_type == cpu:
|
||||
mflag = m_flag
|
||||
cpuflag = cpu_flag
|
||||
break
|
||||
|
||||
if not mflag or not cpuflag:
|
||||
@@ -57,9 +70,16 @@ def build_qemu_cmdline_script():
|
||||
if os.path.exists(build_tools_folder) is False:
|
||||
os.system("mkdir " + build_tools_folder)
|
||||
|
||||
# Special case for EB+A9(non-smp)
|
||||
if platform == 'EB' and cpu == 'CORTEXA9' and smp == False:
|
||||
mflag = 'realview-eb'
|
||||
|
||||
# Write run-qemu-insight file
|
||||
with open(qemu_cmd_file, 'w+') as f:
|
||||
f.write(data % (mflag, cpuflag))
|
||||
if smp == False:
|
||||
f.write(data_up % (mflag, cpuflag))
|
||||
else:
|
||||
f.write(data_smp % (ncpu, mflag, cpuflag))
|
||||
|
||||
os.system("chmod +x " + qemu_cmd_file)
|
||||
|
||||
|
||||
@@ -91,6 +91,9 @@ int irq_control_register(struct ktcb *task, int slot, l4id_t irqnum)
|
||||
if ((err = irq_register(current, slot, irqnum)) < 0)
|
||||
return err;
|
||||
|
||||
/* Make thread a real-time task */
|
||||
current->flags |= TASK_REALTIME;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -111,16 +114,6 @@ int irq_wait(l4id_t irq_index)
|
||||
if ((ret = tcb_check_and_lazy_map_utcb(current, 1)) < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* In case user has asked for unmasking the irq only after
|
||||
* user hanlder is done, unmask the irq
|
||||
*
|
||||
* FIXME: This is not the correct place for this call,
|
||||
* fix this.
|
||||
*/
|
||||
if (desc->user_ack)
|
||||
irq_enable(irq_index);
|
||||
|
||||
/* Wait until the irq changes slot value */
|
||||
WAIT_EVENT(&desc->wqh_irq,
|
||||
utcb->notify[desc->task_notify_slot] != 0,
|
||||
|
||||
191
src/api/mutex.c
191
src/api/mutex.c
@@ -102,16 +102,56 @@ void mutex_control_delete(struct mutex_queue *mq)
|
||||
}
|
||||
|
||||
/*
|
||||
* A contended thread is expected to show up with the
|
||||
* contended mutex address here.
|
||||
* Here's how this whole mutex implementation works:
|
||||
*
|
||||
* (1) The mutex is converted into its physical form and
|
||||
* searched for in the existing mutex list. If it does not
|
||||
* appear there, it gets added.
|
||||
* (2) The thread is put to sleep in the mutex wait queue
|
||||
* until a wake up event occurs. If there is already an asleep
|
||||
* lock holder (i.e. unlocker) that is woken up and we return.
|
||||
* A thread who locked a user mutex learns how many
|
||||
* contentions were on it as it unlocks it. It is obliged to
|
||||
* go to the kernel to wake that many threads up.
|
||||
*
|
||||
* Each contender sleeps in the kernel, but the time
|
||||
* of arrival in the kernel by both the unlocker or
|
||||
* contenders is asynchronous.
|
||||
*
|
||||
* Mutex queue scenarios at any one time:
|
||||
*
|
||||
* 1) There may be multiple contenders waiting for
|
||||
* an earlier lock holder:
|
||||
*
|
||||
* Lock holders waitqueue: Empty
|
||||
* Contenders waitqueue: C - C - C - C
|
||||
* Contenders to wake up: 0
|
||||
*
|
||||
* The lock holder would wake up that many contenders that it counted
|
||||
* earlier in userspace as it released the lock.
|
||||
*
|
||||
* 2) There may be one lock holder waiting for contenders to arrive:
|
||||
*
|
||||
* Lock holders waitqueue: LH
|
||||
* Contenders waitqueue: Empty
|
||||
* Contenders to wake up: 5
|
||||
*
|
||||
* As each contender comes in, the contenders value is reduced, and
|
||||
* when it becomes zero, the lock holder is woken up and mutex
|
||||
* deleted.
|
||||
*
|
||||
* 3) Occasionally multiple lock holders who just released the lock
|
||||
* make it to the kernel before any contenders:
|
||||
*
|
||||
* Contenders: Empty
|
||||
* Lock holders: LH
|
||||
* Contenders to wake up: 5
|
||||
*
|
||||
* -> New Lock holder arrives.
|
||||
*
|
||||
* As soon as the above occurs, the new LH wakes up the waiting one,
|
||||
* increments the contenders by its own contender count and starts
|
||||
* waiting. The scenario transitions to Scenario (2) in this case.
|
||||
*
|
||||
* The asynchronous nature of contender and lock holder arrivals make
|
||||
* for many possibilities, but what matters is the same number of
|
||||
* wake ups must occur as the number of contended waits.
|
||||
*/
|
||||
|
||||
int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_address)
|
||||
{
|
||||
@@ -128,24 +168,27 @@ int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
}
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mqhead, mutex_queue);
|
||||
} else {
|
||||
/* See if there is a lock holder */
|
||||
if (mutex_queue->wqh_holders.sleepers) {
|
||||
/*
|
||||
* If yes, wake it up async and we can *hope*
|
||||
* to acquire the lock before the lock holder
|
||||
*/
|
||||
|
||||
} else if (mutex_queue->wqh_holders.sleepers) {
|
||||
/*
|
||||
* There's a lock holder, so we can consume from
|
||||
* number of contenders since we are one of them.
|
||||
*/
|
||||
mutex_queue->contenders--;
|
||||
|
||||
/* No contenders left as far as current holder is concerned */
|
||||
if (mutex_queue->contenders == 0) {
|
||||
/* Wake up current holder */
|
||||
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
/* There must not be any contenders, delete the mutex */
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Prepare to wait on the contenders queue */
|
||||
@@ -160,22 +203,8 @@ int mutex_control_lock(struct mutex_queue_head *mqhead,
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
|
||||
/*
|
||||
* A thread that has detected a contention on a mutex that
|
||||
* it had locked but has just released is expected to show up with
|
||||
* that mutex here.
|
||||
*
|
||||
* (1) The mutex is converted into its physical form and
|
||||
* searched for in the existing mutex list. If not found,
|
||||
* a new one is created and the thread sleeps there as a lock
|
||||
* holder.
|
||||
* (2) All the threads waiting on this mutex are woken up. This may
|
||||
* cause a thundering herd, but user threads cannot be trusted
|
||||
* to acquire the mutex, waking up all of them increases the
|
||||
* chances that some thread may acquire it.
|
||||
*/
|
||||
int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
unsigned long mutex_address)
|
||||
unsigned long mutex_address, int contenders)
|
||||
{
|
||||
struct mutex_queue *mutex_queue;
|
||||
|
||||
@@ -190,6 +219,9 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Set new or increment the contenders value */
|
||||
mutex_queue->contenders = contenders;
|
||||
|
||||
/* Add the queue to mutex queue list */
|
||||
mutex_control_add(mqhead, mutex_queue);
|
||||
|
||||
@@ -206,51 +238,67 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
|
||||
/* Set new or increment the contenders value */
|
||||
mutex_queue->contenders += contenders;
|
||||
|
||||
/* Wake up holders if any, and take wake up responsibility */
|
||||
if (mutex_queue->wqh_holders.sleepers)
|
||||
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
||||
|
||||
/*
|
||||
* Note, the mutex in userspace was left free before the
|
||||
* syscall was entered.
|
||||
*
|
||||
* It is possible that a thread has acquired it, another
|
||||
* contended on it and the holder made it to the kernel
|
||||
* quicker than us. We detect this situation here.
|
||||
* Now wake up as many contenders as possible, otherwise
|
||||
* go to sleep on holders queue
|
||||
*/
|
||||
if (mutex_queue->wqh_holders.sleepers) {
|
||||
/*
|
||||
* Let the first holder do all the waking up
|
||||
*/
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
while (mutex_queue->contenders &&
|
||||
mutex_queue->wqh_contenders.sleepers) {
|
||||
/* Reduce total contenders to be woken up */
|
||||
mutex_queue->contenders--;
|
||||
|
||||
/* Wake up a contender who made it to kernel */
|
||||
wake_up(&mutex_queue->wqh_contenders, WAKEUP_ASYNC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Found it, if it exists, there are contenders,
|
||||
* now wake all of them up in FIFO order.
|
||||
* FIXME: Make sure this is FIFO order. It doesn't seem so.
|
||||
* Are we done with all? Leave.
|
||||
*
|
||||
* Not enough contenders? Go to sleep and wait for a new
|
||||
* contender rendezvous.
|
||||
*/
|
||||
wake_up_all(&mutex_queue->wqh_contenders, WAKEUP_ASYNC);
|
||||
if (mutex_queue->contenders == 0) {
|
||||
/* Delete only if no more contenders */
|
||||
if (mutex_queue->wqh_contenders.sleepers == 0) {
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
}
|
||||
|
||||
/* Since noone is left, delete the mutex queue */
|
||||
mutex_control_remove(mqhead, mutex_queue);
|
||||
mutex_control_delete(mutex_queue);
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
} else {
|
||||
/* Prepare to wait on the lock holders queue */
|
||||
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
||||
|
||||
/* Prepare to wait */
|
||||
wait_on_prepare(&mutex_queue->wqh_holders, &wq);
|
||||
|
||||
/* Release lock first */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
|
||||
/* Initiate prepared wait */
|
||||
return wait_on_prepared_wait();
|
||||
}
|
||||
|
||||
/* Release lock and return */
|
||||
mutex_queue_head_unlock(mqhead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
int sys_mutex_control(unsigned long mutex_address, int mutex_flags)
|
||||
{
|
||||
unsigned long mutex_physical;
|
||||
int ret = 0;
|
||||
int mutex_op = mutex_operation(mutex_flags);
|
||||
int contenders = mutex_contenders(mutex_flags);
|
||||
int ret;
|
||||
|
||||
// printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
|
||||
|
||||
/* Check valid operation */
|
||||
if (mutex_op != MUTEX_CONTROL_LOCK &&
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK) {
|
||||
printk("Invalid args to %s.\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
//printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
|
||||
|
||||
/* Check valid user virtual address */
|
||||
if (KERN_ADDR(mutex_address)) {
|
||||
@@ -258,6 +306,10 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mutex_op != MUTEX_CONTROL_LOCK &&
|
||||
mutex_op != MUTEX_CONTROL_UNLOCK)
|
||||
return -EPERM;
|
||||
|
||||
if ((ret = cap_mutex_check(mutex_address, mutex_op)) < 0)
|
||||
return ret;
|
||||
|
||||
@@ -278,11 +330,8 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op)
|
||||
break;
|
||||
case MUTEX_CONTROL_UNLOCK:
|
||||
ret = mutex_control_unlock(&curcont->mutex_queue_head,
|
||||
mutex_physical);
|
||||
mutex_physical, contenders);
|
||||
break;
|
||||
default:
|
||||
printk("%s: Invalid operands\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -494,10 +494,22 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids)
|
||||
MAP_USR_RW, 1)) < 0)
|
||||
return err;
|
||||
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE)
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) {
|
||||
if (!(task = tcb_find(ids->tid)))
|
||||
return -ESRCH;
|
||||
|
||||
/*
|
||||
* Tasks may only operate on their children. They may
|
||||
* also destroy themselves or any children.
|
||||
*/
|
||||
if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY &&
|
||||
!task_is_child(task) && task != current)
|
||||
return -EPERM;
|
||||
if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY
|
||||
&& !task_is_child(task))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if ((err = cap_thread_check(task, flags, ids)) < 0)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -325,7 +325,7 @@ extern int current_irq_nest_count;
|
||||
*/
|
||||
void irq_overnest_error(void)
|
||||
{
|
||||
dprintk("Irqs nested beyond limit. Current count: ",
|
||||
printk("Irqs nested beyond limit. Current count: %d",
|
||||
current_irq_nest_count);
|
||||
print_early("System halted...\n");
|
||||
while(1)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Import('env')
|
||||
|
||||
# The set of source files associated with this SConscript file.
|
||||
src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c']
|
||||
src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c', 'atomic.S']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -58,59 +58,59 @@ int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
|
||||
|
||||
/* Aborts that can't be handled by a pager yet: */
|
||||
case DABT_TERMINAL:
|
||||
dprintk("Terminal fault dabt %x", far);
|
||||
dprintk("Terminal fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_VECTOR:
|
||||
dprintk("Vector abort (obsolete!) %x", far);
|
||||
dprintk("Vector abort (obsolete!) ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_ALIGN:
|
||||
dprintk("Alignment fault dabt %x", far);
|
||||
dprintk("Alignment fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_XLATE_LEVEL1:
|
||||
dprintk("External LVL1 translation fault %x", far);
|
||||
dprintk("External LVL1 translation fault ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_XLATE_LEVEL2:
|
||||
dprintk("External LVL2 translation fault %x", far);
|
||||
dprintk("External LVL2 translation fault ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_DOMAIN_SECT:
|
||||
dprintk("Section domain fault dabt %x", far);
|
||||
dprintk("Section domain fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_DOMAIN_PAGE:
|
||||
dprintk("Page domain fault dabt %x", far);
|
||||
dprintk("Page domain fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_PERM_SECT:
|
||||
dprintk("Section permission fault dabt %x", far);
|
||||
dprintk("Section permission fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_LFETCH_SECT:
|
||||
dprintk("External section linefetch "
|
||||
"fault dabt %x", far);
|
||||
"fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_LFETCH_PAGE:
|
||||
dprintk("Page perm fault dabt %x", far);
|
||||
dprintk("Page perm fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_NON_LFETCH_SECT:
|
||||
dprintk("External section non-linefetch "
|
||||
"fault dabt %x ", far);
|
||||
"fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
case DABT_EXT_NON_LFETCH_PAGE:
|
||||
dprintk("External page non-linefetch "
|
||||
"fault dabt %x ", far);
|
||||
"fault dabt ", far);
|
||||
ret = -EABORT;
|
||||
break;
|
||||
default:
|
||||
dprintk("FATAL: Unrecognised/Unknown "
|
||||
"data abort %x ", far);
|
||||
"data abort ", far);
|
||||
dprintk("FATAL: FSR code: ", fsr);
|
||||
ret = -EABORT;
|
||||
}
|
||||
@@ -122,7 +122,7 @@ int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
|
||||
*/
|
||||
if (is_kernel_address(faulted_pc)) {
|
||||
dprintk("Unhandled kernel data "
|
||||
"abort at address %x",
|
||||
"abort at address ",
|
||||
faulted_pc);
|
||||
ret = -EABORT;
|
||||
}
|
||||
|
||||
@@ -30,33 +30,6 @@ void irq_local_restore(unsigned long state)
|
||||
);
|
||||
}
|
||||
|
||||
u8 l4_atomic_dest_readb(unsigned long *location)
|
||||
{
|
||||
#if 0
|
||||
unsigned int tmp;
|
||||
__asm__ __volatile__ (
|
||||
"swpb r0, r2, [r1] \n"
|
||||
: "=r"(tmp)
|
||||
: "r"(location), "r"(0)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return (u8)tmp;
|
||||
#endif
|
||||
|
||||
unsigned int tmp;
|
||||
unsigned long state;
|
||||
irq_local_disable_save(&state);
|
||||
|
||||
tmp = *location;
|
||||
*location = 0;
|
||||
|
||||
irq_local_restore(state);
|
||||
|
||||
return (u8)tmp;
|
||||
|
||||
}
|
||||
|
||||
int irqs_enabled(void)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
@@ -372,9 +372,9 @@ void arch_space_switch(struct ktcb *to)
|
||||
void idle_task(void)
|
||||
{
|
||||
while(1) {
|
||||
/* Do maintenance */
|
||||
tcb_delete_zombies();
|
||||
|
||||
// printk("Idle task.\n");
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include INC_ARCH(asm.h)
|
||||
#include INC_ARCH(asm-macros.S)
|
||||
|
||||
.balign 4096
|
||||
.section .data.vectors
|
||||
__vector_vaddr:
|
||||
|
||||
@@ -896,5 +895,4 @@ __irq_stack: .space 128
|
||||
__fiq_stack: .space 128
|
||||
__und_stack: .space 128
|
||||
|
||||
.balign 4096
|
||||
|
||||
|
||||
@@ -1,25 +1,30 @@
|
||||
/*
|
||||
* PLXXX Generic Interrupt Controller support.
|
||||
* Generic Interrupt Controller support.
|
||||
*
|
||||
* This is more ARM Realview EB/PB
|
||||
* Copyright (C) 2009-2010 B Labs Ltd.
|
||||
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk>
|
||||
*
|
||||
* Authors: Prem Mallappa, Bahadir Balban
|
||||
*/
|
||||
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/generic/irq.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_SUBARCH(mmu_ops.h) /* for dmb/dsb() */
|
||||
#include INC_SUBARCH(mmu_ops.h)
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include <l4/generic/smp.h>
|
||||
|
||||
#define GIC_ACK_IRQ_MASK 0x1FF
|
||||
#define GIC_ACK_CPU_MASK 0xE00
|
||||
#define GIC_IRQ_SPURIOUS 0x3FF
|
||||
|
||||
volatile struct gic_data gic_data[IRQ_CHIPS_MAX];
|
||||
|
||||
static inline struct gic_data *get_gic_data(l4id_t irq)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_array[irq].chip;
|
||||
volatile struct irq_chip *chip = irq_desc_array[irq].chip;
|
||||
|
||||
if (chip)
|
||||
return (struct gic_data *)irq_desc_array[irq].chip->data;
|
||||
return (struct gic_data *)chip->data;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@@ -27,115 +32,129 @@ static inline struct gic_data *get_gic_data(l4id_t irq)
|
||||
/* Returns the irq number on this chip converting the irq bitvector */
|
||||
l4id_t gic_read_irq(void *data)
|
||||
{
|
||||
int irq;
|
||||
volatile struct gic_data *gic = (struct gic_data *)data;
|
||||
irq = gic->cpu->ack & 0x1ff;
|
||||
l4id_t irq = gic->cpu->ack;
|
||||
|
||||
if (irq == 1023)
|
||||
return -1023; /* Spurious */
|
||||
/* This is an IPI - EOI it here, since it requires cpu field */
|
||||
if ((irq & GIC_ACK_IRQ_MASK) < 16) {
|
||||
gic_eoi_irq(irq);
|
||||
/* Get the actual irq number */
|
||||
irq &= GIC_ACK_IRQ_MASK;
|
||||
}
|
||||
|
||||
/* Detect GIC spurious magic value and return generic one */
|
||||
if (irq == GIC_IRQ_SPURIOUS)
|
||||
return IRQ_SPURIOUS;
|
||||
return irq;
|
||||
}
|
||||
|
||||
void gic_mask_irq(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5; /* irq / 32 */
|
||||
|
||||
gic->dist->clr_en[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
void gic_unmask_irq(l4id_t irq)
|
||||
{
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5 ; /* irq / 32 */
|
||||
|
||||
u32 offset = irq >> 5 ; /* offset = irq / 32 */
|
||||
gic->dist->set_en[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
void gic_ack_irq(l4id_t irq)
|
||||
void gic_eoi_irq(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
gic->dist->clr_en[offset] = 1 << (irq % 32);
|
||||
/* Careful, irq may have cpu field encoded */
|
||||
volatile struct gic_data *gic =
|
||||
get_gic_data(irq & GIC_ACK_IRQ_MASK);
|
||||
|
||||
gic->cpu->eoi = irq;
|
||||
}
|
||||
|
||||
void gic_ack_and_mask(l4id_t irq)
|
||||
{
|
||||
gic_ack_irq(irq);
|
||||
//printk("disable/eoi irq %d\n", irq);
|
||||
gic_mask_irq(irq);
|
||||
gic_eoi_irq(irq);
|
||||
}
|
||||
|
||||
void gic_set_pending(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5; /* irq / 32 */
|
||||
gic->dist->set_pending[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
void gic_clear_pending(l4id_t irq)
|
||||
{
|
||||
u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 5; /* irq / 32 */
|
||||
|
||||
gic->dist->clr_pending[offset] = 1 << (irq % 32);
|
||||
}
|
||||
|
||||
|
||||
void gic_cpu_init(int idx, unsigned long base)
|
||||
{
|
||||
struct gic_cpu *cpu;
|
||||
cpu = gic_data[idx].cpu = (struct gic_cpu *)base;
|
||||
volatile struct gic_cpu *cpu;
|
||||
|
||||
gic_data[idx].cpu = (struct gic_cpu *)base;
|
||||
|
||||
cpu = gic_data[idx].cpu;
|
||||
|
||||
/* Disable */
|
||||
cpu->control = 0;
|
||||
|
||||
/* Set */
|
||||
cpu->prio_mask = 0xf0;
|
||||
cpu->bin_point = 3;
|
||||
|
||||
/* Enable */
|
||||
cpu->control = 1;
|
||||
}
|
||||
|
||||
void gic_dist_init(int idx, unsigned long base)
|
||||
{
|
||||
int i, irqs_per_word; /* Interrupts per word */
|
||||
struct gic_dist *dist;
|
||||
dist = gic_data[idx].dist = (struct gic_dist *)(base);
|
||||
volatile struct gic_dist *dist;
|
||||
int irqs_per_word;
|
||||
int nirqs;
|
||||
|
||||
/* Surely disable GIC */
|
||||
gic_data[idx].dist = (struct gic_dist *)(base);
|
||||
|
||||
dist = gic_data[idx].dist;
|
||||
|
||||
/* Disable gic */
|
||||
dist->control = 0;
|
||||
|
||||
/* 32*(N+1) interrupts supported */
|
||||
int nirqs = 32 * ((dist->type & 0x1f) + 1);
|
||||
nirqs = 32 * ((dist->type & 0x1f) + 1);
|
||||
if (nirqs > IRQS_MAX)
|
||||
nirqs = IRQS_MAX;
|
||||
|
||||
/* Clear all interrupts */
|
||||
/* Disable all interrupts */
|
||||
irqs_per_word = 32;
|
||||
for(i = 0; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 0; i < nirqs; i += irqs_per_word)
|
||||
dist->clr_en[i/irqs_per_word] = 0xffffffff;
|
||||
}
|
||||
|
||||
/* Clear all pending interrupts */
|
||||
for(i = 0; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 0; i < nirqs; i += irqs_per_word)
|
||||
dist->clr_pending[i/irqs_per_word] = 0xffffffff;
|
||||
}
|
||||
|
||||
/* Set all irqs as normal priority, 8 bits per interrupt */
|
||||
irqs_per_word = 4;
|
||||
for(i = 32; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 32; i < nirqs; i += irqs_per_word)
|
||||
dist->priority[i/irqs_per_word] = 0xa0a0a0a0;
|
||||
}
|
||||
|
||||
/* Set all target to cpu0, 8 bits per interrupt */
|
||||
for(i = 32; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 32; i < nirqs; i += irqs_per_word)
|
||||
dist->target[i/irqs_per_word] = 0x01010101;
|
||||
}
|
||||
|
||||
/* Configure all to be level-sensitive, 2 bits per interrupt */
|
||||
irqs_per_word = 16;
|
||||
for(i = 32; i < nirqs ; i+=irqs_per_word) {
|
||||
for (int i = 32; i < nirqs; i += irqs_per_word)
|
||||
dist->config[i/irqs_per_word] = 0x00000000;
|
||||
}
|
||||
|
||||
/* Enable GIC Distributor */
|
||||
dist->control = 1;
|
||||
@@ -143,24 +162,28 @@ void gic_dist_init(int idx, unsigned long base)
|
||||
|
||||
|
||||
/* Some functions, may be helpful */
|
||||
void gic_set_target(u32 irq, u32 cpu)
|
||||
void gic_set_target(l4id_t irq, u32 cpu)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
cpu &= 0xF;
|
||||
irq &= 0xFF;
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 2; /* offset = irq / 4 */
|
||||
u32 offset = irq >> 2; /* irq / 4 */
|
||||
|
||||
if (cpu > 1) {
|
||||
printk("Setting irqs to reach multiple cpu targets requires a"
|
||||
"lock on the irq controller\n"
|
||||
"GIC is a racy hardware in this respect\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
gic->dist->target[offset] |= (cpu << ((irq % 4) * 8));
|
||||
}
|
||||
|
||||
u32 gic_get_target(u32 irq)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
unsigned int target;
|
||||
irq &= 0xFF;
|
||||
u32 offset = irq >> 2; /* offset = irq / 4 */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
target = gic->dist->target[offset];
|
||||
u32 offset = irq >> 2; /* irq / 4 */
|
||||
unsigned int target = gic->dist->target[offset];
|
||||
|
||||
BUG_ON(irq > 0xFF);
|
||||
target >>= ((irq % 4) * 8);
|
||||
|
||||
return target & 0xFF;
|
||||
@@ -168,54 +191,44 @@ u32 gic_get_target(u32 irq)
|
||||
|
||||
void gic_set_priority(u32 irq, u32 prio)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
prio &= 0xF;
|
||||
irq &= 0xFF;
|
||||
u32 offset = irq >> 3; /* offset = irq / 8 */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
u32 offset = irq >> 3; /* irq / 8 */
|
||||
|
||||
BUG_ON(prio > 0xF);
|
||||
BUG_ON(irq > 0xFF);
|
||||
|
||||
/* target = cpu << ((irq % 4) * 4) */
|
||||
gic->dist->target[offset] |= (prio << (irq & 0x1C));
|
||||
}
|
||||
|
||||
u32 gic_get_priority(u32 irq)
|
||||
{
|
||||
/* cpu is a mask, not cpu number */
|
||||
irq &= 0xFF;
|
||||
u32 offset = irq >> 3; /* offset = irq / 8 */
|
||||
volatile struct gic_data *gic = get_gic_data(irq);
|
||||
return gic->dist->target[offset] & (irq & 0xFC);
|
||||
u32 offset = irq >> 3; /* offset = irq / 8 */
|
||||
u32 prio = gic->dist->target[offset] & (irq & 0xFC);
|
||||
|
||||
return prio;
|
||||
}
|
||||
|
||||
#define TO_MANY 0 /* to all specified in a CPU mask */
|
||||
#define TO_OTHERS 1 /* all but me */
|
||||
#define TO_SELF 2 /* just to the requesting CPU */
|
||||
#define IPI_CPU_SHIFT 16
|
||||
|
||||
#define CPU_MASK_BIT 16
|
||||
#define TYPE_MASK_BIT 24
|
||||
|
||||
void gic_send_ipi(int cpu, int ipi_cmd)
|
||||
void gic_send_ipi(int cpumask, int ipi_cmd)
|
||||
{
|
||||
/* if cpu is 0, then ipi is sent to self
|
||||
* if cpu has exactly 1 bit set, the ipi to just that core
|
||||
* if cpu has a mask, sent to all but current
|
||||
*/
|
||||
struct gic_dist *dist = gic_data[0].dist;
|
||||
|
||||
ipi_cmd &= 0xf;
|
||||
cpu &= 0xff;
|
||||
|
||||
dsb();
|
||||
|
||||
if (cpu == 0) /* Self */
|
||||
dist->soft_int = (TO_SELF << 24) | ipi_cmd;
|
||||
else if ((cpu & (cpu-1)) == 0) /* Exactly to one CPU */
|
||||
dist->soft_int = (TO_MANY << 24) | (cpu << 16) | ipi_cmd;
|
||||
else /* All but me */
|
||||
dist->soft_int = (TO_OTHERS << 24) | (cpu << 16) | ipi_cmd;
|
||||
volatile struct gic_dist *dist = gic_data[0].dist;
|
||||
unsigned int ipi_word = (cpumask << IPI_CPU_SHIFT) | ipi_cmd;
|
||||
|
||||
dist->soft_int = ipi_word;
|
||||
}
|
||||
|
||||
/* Make the generic code happy :) */
|
||||
void gic_print_cpu()
|
||||
{
|
||||
volatile struct gic_cpu *cpu = gic_data[0].cpu;
|
||||
|
||||
printk("GIC CPU%d highest pending: %d\n", smp_get_cpuid(), cpu->high_pending);
|
||||
printk("GIC CPU%d running: %d\n", smp_get_cpuid(), cpu->running);
|
||||
}
|
||||
|
||||
/* Make the generic code happy */
|
||||
void gic_dummy_init()
|
||||
{
|
||||
|
||||
|
||||
@@ -121,6 +121,7 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
/* Add the address space to container space list */
|
||||
address_space_add(task->space);
|
||||
|
||||
#if 0
|
||||
printk("%s: Mapping 0x%lx bytes (%lu pages) "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, pager->memsize,
|
||||
@@ -131,6 +132,58 @@ int init_pager(struct pager *pager, struct container *cont)
|
||||
add_mapping_pgd(pager->start_lma, pager->start_vma,
|
||||
page_align_up(pager->memsize),
|
||||
MAP_USR_RWX, TASK_PGD(task));
|
||||
#else
|
||||
/*
|
||||
* Map pager with appropriate section flags
|
||||
* We do page_align_down() to do a page alignment for
|
||||
* various kinds of sections, this automatically
|
||||
* takes care of the case where we have different kinds of
|
||||
* data lying on same page, eg: RX, RO etc.
|
||||
* Here one assumption made is, starting of first
|
||||
* RW section will be already page aligned, if this is
|
||||
* not true then we have to take special care of this.
|
||||
*/
|
||||
if(pager->rx_sections_end >= pager->rw_sections_start) {
|
||||
pager->rx_sections_end = page_align(pager->rx_sections_end);
|
||||
pager->rw_sections_start = page_align(pager->rw_sections_start);
|
||||
}
|
||||
|
||||
unsigned long size = 0;
|
||||
if((size = page_align_up(pager->rx_sections_end) -
|
||||
page_align_up(pager->rx_sections_start))) {
|
||||
add_mapping_pgd(page_align_up(pager->rx_sections_start -
|
||||
pager->start_vma +
|
||||
pager->start_lma),
|
||||
page_align_up(pager->rx_sections_start),
|
||||
size, MAP_USR_RX, TASK_PGD(task));
|
||||
|
||||
printk("%s: Mapping 0x%lx bytes as RX "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, size,
|
||||
page_align_up(pager->rx_sections_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
page_align_up(pager->rx_sections_start),
|
||||
cont->name);
|
||||
}
|
||||
|
||||
if((size = page_align_up(pager->rw_sections_end) -
|
||||
page_align_up(pager->rw_sections_start))) {
|
||||
add_mapping_pgd(page_align_up(pager->rw_sections_start -
|
||||
pager->start_vma +
|
||||
pager->start_lma),
|
||||
page_align_up(pager->rw_sections_start),
|
||||
size, MAP_USR_RW, TASK_PGD(task));
|
||||
|
||||
printk("%s: Mapping 0x%lx bytes as RW "
|
||||
"from 0x%lx to 0x%lx for %s\n",
|
||||
__KERNELNAME__, size,
|
||||
page_align_up(pager->rw_sections_start -
|
||||
pager->start_vma + pager->start_lma),
|
||||
page_align_up(pager->rw_sections_start),
|
||||
cont->name);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Move capability list from dummy to task's space cap list */
|
||||
cap_list_move(&task->space->cap_list, ¤t->cap_list);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Generic kernel irq handling.
|
||||
*
|
||||
* Copyright (C) 2007 - 2009 Bahadir Balban
|
||||
* Copyright (C) 2007 - 2010 Bahadir Balban
|
||||
*/
|
||||
#include <l4/config.h>
|
||||
#include <l4/macros.h>
|
||||
@@ -127,10 +127,21 @@ l4id_t global_irq_index(void)
|
||||
return IRQ_NIL;
|
||||
}
|
||||
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
void do_irq(void)
|
||||
{
|
||||
l4id_t irq_index = global_irq_index();
|
||||
struct irq_desc *this_irq = irq_desc_array + irq_index;
|
||||
struct irq_desc *this_irq;
|
||||
|
||||
if (irq_index == IRQ_SPURIOUS) {
|
||||
printk("CPU%d: FATAL: Spurious irq\n", smp_get_cpuid());
|
||||
BUG();
|
||||
}
|
||||
|
||||
// printk("CPU%d: Received irq %d\n", smp_get_cpuid(), irq_index);
|
||||
|
||||
this_irq = irq_desc_array + irq_index;
|
||||
|
||||
system_account_irq();
|
||||
|
||||
@@ -148,16 +159,10 @@ void do_irq(void)
|
||||
/* Handle the irq */
|
||||
BUG_ON(!this_irq->handler);
|
||||
if (this_irq->handler(this_irq) != IRQ_HANDLED) {
|
||||
printk("Spurious or broken irq\n");
|
||||
printk("CPU%d: FATAL: Spurious or broken irq\n",
|
||||
smp_get_cpuid());
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not enable irq if user wants to do it explicitely
|
||||
*/
|
||||
if (!this_irq->user_ack)
|
||||
irq_enable(irq_index);
|
||||
irq_enable(irq_index);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -503,6 +503,10 @@ int copy_pager_info(struct pager *pager, struct pager_info *pinfo)
|
||||
pager->start_lma = __pfn_to_addr(pinfo->pager_lma);
|
||||
pager->start_vma = __pfn_to_addr(pinfo->pager_vma);
|
||||
pager->memsize = __pfn_to_addr(pinfo->pager_size);
|
||||
pager->rw_sections_start = pinfo->rw_sections_start;
|
||||
pager->rw_sections_end = pinfo->rw_sections_end;
|
||||
pager->rx_sections_start = pinfo->rx_sections_start;
|
||||
pager->rx_sections_end = pinfo->rx_sections_end;
|
||||
|
||||
/* Copy all cinfo structures into real capabilities */
|
||||
for (int i = 0; i < pinfo->ncaps; i++) {
|
||||
|
||||
@@ -119,12 +119,14 @@ void sched_init()
|
||||
|
||||
sched->rq_runnable = &sched->sched_rq[0];
|
||||
sched->rq_expired = &sched->sched_rq[1];
|
||||
sched->rq_rt_runnable = &sched->sched_rq[2];
|
||||
sched->rq_rt_expired = &sched->sched_rq[3];
|
||||
sched->prio_total = TASK_PRIO_TOTAL;
|
||||
sched->idle_task = current;
|
||||
}
|
||||
|
||||
/* Swap runnable and expired runqueues. */
|
||||
static void sched_rq_swap_runqueues(void)
|
||||
static void sched_rq_swap_queues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
@@ -136,6 +138,18 @@ static void sched_rq_swap_runqueues(void)
|
||||
per_cpu(scheduler).rq_expired = temp;
|
||||
}
|
||||
|
||||
static void sched_rq_swap_rtqueues(void)
|
||||
{
|
||||
struct runqueue *temp;
|
||||
|
||||
BUG_ON(list_empty(&per_cpu(scheduler).rq_rt_expired->task_list));
|
||||
|
||||
/* Queues are swapped and expired list becomes runnable */
|
||||
temp = per_cpu(scheduler).rq_rt_runnable;
|
||||
per_cpu(scheduler).rq_rt_runnable = per_cpu(scheduler).rq_rt_expired;
|
||||
per_cpu(scheduler).rq_rt_expired = temp;
|
||||
}
|
||||
|
||||
/* Set policy on where to add tasks in the runqueue */
|
||||
#define RQ_ADD_BEHIND 0
|
||||
#define RQ_ADD_FRONT 1
|
||||
@@ -185,6 +199,28 @@ static inline void sched_rq_remove_task(struct ktcb *task)
|
||||
sched_unlock_runqueues(sched, irqflags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_run_task(struct ktcb *task, struct scheduler *sched)
|
||||
{
|
||||
if (task->flags & TASK_REALTIME)
|
||||
sched_rq_add_task(task, sched->rq_rt_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(task, sched->rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
static inline void
|
||||
sched_expire_task(struct ktcb *task, struct scheduler *sched)
|
||||
{
|
||||
|
||||
if (task->flags & TASK_REALTIME)
|
||||
sched_rq_add_task(current, sched->rq_rt_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
else
|
||||
sched_rq_add_task(current, sched->rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
}
|
||||
|
||||
void sched_init_task(struct ktcb *task, int prio)
|
||||
{
|
||||
@@ -196,6 +232,27 @@ void sched_init_task(struct ktcb *task, int prio)
|
||||
task->flags |= TASK_RESUMING;
|
||||
}
|
||||
|
||||
/* Synchronously resumes a task */
|
||||
void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
||||
schedule();
|
||||
}
|
||||
|
||||
/*
|
||||
* Asynchronously resumes a task.
|
||||
* The task will run in the future, but at
|
||||
* the scheduler's discretion. It is possible that current
|
||||
* task wakes itself up via this function in the scheduler().
|
||||
*/
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_run_task(task, &per_cpu_byid(scheduler, task->affinity));
|
||||
}
|
||||
|
||||
/*
|
||||
* Takes all the action that will make a task sleep
|
||||
* in the scheduler. If the task is woken up before
|
||||
@@ -210,37 +267,10 @@ void sched_prepare_sleep()
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Synchronously resumes a task */
|
||||
void sched_resume_sync(struct ktcb *task)
|
||||
{
|
||||
BUG_ON(task == current);
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task,
|
||||
per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
schedule();
|
||||
}
|
||||
|
||||
/*
|
||||
* Asynchronously resumes a task.
|
||||
* The task will run in the future, but at
|
||||
* the scheduler's discretion. It is possible that current
|
||||
* task wakes itself up via this function in the scheduler().
|
||||
*/
|
||||
void sched_resume_async(struct ktcb *task)
|
||||
{
|
||||
task->state = TASK_RUNNABLE;
|
||||
sched_rq_add_task(task,
|
||||
per_cpu_byid(scheduler,
|
||||
task->affinity).rq_runnable,
|
||||
RQ_ADD_FRONT);
|
||||
// printk("CPU%d: Resuming task %d with affinity %d\n", smp_get_cpuid(), task->tid, task->affinity);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: Could do these as sched_prepare_suspend()
|
||||
* + schedule() or need_resched = 1
|
||||
* preempt_enable/disable()'s are for avoiding the
|
||||
* entry to scheduler during this period - but this
|
||||
* is only true for current cpu.
|
||||
*/
|
||||
void sched_suspend_sync(void)
|
||||
{
|
||||
@@ -282,6 +312,11 @@ static inline void context_switch(struct ktcb *next)
|
||||
system_account_context_switch();
|
||||
|
||||
/* Flush caches and everything */
|
||||
BUG_ON(!current);
|
||||
BUG_ON(!current->space);
|
||||
BUG_ON(!next);
|
||||
BUG_ON(!next->space);
|
||||
BUG_ON(!next->space);
|
||||
if (current->space->spid != next->space->spid)
|
||||
arch_space_switch(next);
|
||||
|
||||
@@ -306,6 +341,107 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total)
|
||||
CONFIG_SCHED_TICKS * task->priority / prio_total;
|
||||
}
|
||||
|
||||
/*
|
||||
* Select a real-time task 1/8th of any one selection
|
||||
*/
|
||||
static inline int sched_select_rt(struct scheduler *sched)
|
||||
{
|
||||
int ctr = sched->task_select_ctr++ & 0xF;
|
||||
|
||||
if (ctr == 0 || ctr == 8 || ctr == 15)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Selection happens as follows:
|
||||
*
|
||||
* A real-time task is chosen %87.5 of the time. This is evenly
|
||||
* distributed to a given interval.
|
||||
*
|
||||
* Idle task is run once when it is explicitly suggested (e.g.
|
||||
* for cleanup after a task exited) but only when no real-time
|
||||
* tasks are in the queues.
|
||||
*
|
||||
* And idle task is otherwise run only when no other tasks are
|
||||
* runnable.
|
||||
*/
|
||||
struct ktcb *sched_select_next(void)
|
||||
{
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
int realtime = sched_select_rt(sched);
|
||||
struct ktcb *next = 0;
|
||||
|
||||
for (;;) {
|
||||
|
||||
/* Decision to run an RT task? */
|
||||
if (realtime && sched->rq_rt_runnable->total > 0) {
|
||||
/* Get a real-time task, if available */
|
||||
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (realtime && sched->rq_rt_expired->total > 0) {
|
||||
/* Swap real-time queues */
|
||||
sched_rq_swap_rtqueues();
|
||||
/* Get a real-time task */
|
||||
next = link_to_struct(sched->rq_rt_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
/* Idle flagged for run? */
|
||||
} else if (sched->flags & SCHED_RUN_IDLE) {
|
||||
/* Clear idle flag */
|
||||
sched->flags &= ~SCHED_RUN_IDLE;
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else if (sched->rq_runnable->total > 0) {
|
||||
/* Get a regular runnable task, if available */
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (sched->rq_expired->total > 0) {
|
||||
/* Swap queues and retry if not */
|
||||
sched_rq_swap_queues();
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (in_process_context()) {
|
||||
/* No runnable task. Do idle if in process context */
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Nobody is runnable. Irq calls must return
|
||||
* to interrupted current process to run idle task
|
||||
*/
|
||||
next = current;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Prepare next runnable task right before switching to it */
|
||||
void sched_prepare_next(struct ktcb *next)
|
||||
{
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING)
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
|
||||
/* Zero ticks indicates task hasn't ran since last rq swap */
|
||||
if (next->ticks_left == 0) {
|
||||
/*
|
||||
* Redistribute timeslice. We do this as each task
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
/* Reinitialise task's schedule granularity boundary */
|
||||
next->sched_granule = SCHED_GRANULARITY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tasks come here, either by setting need_resched (via next irq),
|
||||
@@ -359,13 +495,9 @@ void schedule()
|
||||
if (current->state == TASK_RUNNABLE) {
|
||||
sched_rq_remove_task(current);
|
||||
if (current->ticks_left)
|
||||
sched_rq_add_task(current,
|
||||
per_cpu(scheduler).rq_runnable,
|
||||
RQ_ADD_BEHIND);
|
||||
sched_run_task(current, &per_cpu(scheduler));
|
||||
else
|
||||
sched_rq_add_task(current,
|
||||
per_cpu(scheduler).rq_expired,
|
||||
RQ_ADD_BEHIND);
|
||||
sched_expire_task(current, &per_cpu(scheduler));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -391,52 +523,17 @@ void schedule()
|
||||
sched_suspend_async();
|
||||
}
|
||||
|
||||
/* Simpler task pick up loop. May put in sched_pick_next() */
|
||||
for (;;) {
|
||||
struct scheduler *sched = &per_cpu(scheduler);
|
||||
|
||||
/* If we or a child has just exited, run idle task once for clean up */
|
||||
if (current->flags & TASK_EXITED) {
|
||||
current->flags &= ~TASK_EXITED;
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else if (sched->rq_runnable->total > 0) {
|
||||
/* Get a runnable task, if available */
|
||||
next = link_to_struct(sched->rq_runnable->task_list.next,
|
||||
struct ktcb, rq_list);
|
||||
break;
|
||||
} else if (sched->rq_expired->total > 0) {
|
||||
/* Swap queues and retry if not */
|
||||
sched_rq_swap_runqueues();
|
||||
continue;
|
||||
} else if (in_process_context()) {
|
||||
/* Do idle task if no runnable tasks and in process */
|
||||
next = sched->idle_task;
|
||||
break;
|
||||
} else {
|
||||
/* Irq calls must return to interrupted current process */
|
||||
next = current;
|
||||
break;
|
||||
}
|
||||
/* Hint scheduler to run idle asap to free task */
|
||||
if (current->flags & TASK_EXITED) {
|
||||
current->flags &= ~TASK_EXITED;
|
||||
per_cpu(scheduler).flags |= SCHED_RUN_IDLE;
|
||||
}
|
||||
|
||||
/* New tasks affect runqueue total priority. */
|
||||
if (next->flags & TASK_RESUMING)
|
||||
next->flags &= ~TASK_RESUMING;
|
||||
/* Decide on next runnable task */
|
||||
next = sched_select_next();
|
||||
|
||||
/* Zero ticks indicates task hasn't ran since last rq swap */
|
||||
if (next->ticks_left == 0) {
|
||||
/*
|
||||
* Redistribute timeslice. We do this as each task
|
||||
* becomes runnable rather than all at once. It is done
|
||||
* every runqueue swap
|
||||
*/
|
||||
sched_recalc_ticks(next, per_cpu(scheduler).prio_total);
|
||||
next->ticks_left = next->ticks_assigned;
|
||||
}
|
||||
|
||||
/* Reinitialise task's schedule granularity boundary */
|
||||
next->sched_granule = SCHED_GRANULARITY;
|
||||
/* Prepare next task for running */
|
||||
sched_prepare_next(next);
|
||||
|
||||
/* Finish */
|
||||
disable_irqs();
|
||||
|
||||
@@ -34,7 +34,6 @@ void tcb_init(struct ktcb *new)
|
||||
|
||||
spin_lock_init(&new->thread_lock);
|
||||
|
||||
init_ktcb_list(&new->child_exit_list);
|
||||
cap_list_init(&new->cap_list);
|
||||
|
||||
/* Initialise task's scheduling state and parameters. */
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/api/syscall.h>
|
||||
#include <l4/api/errno.h>
|
||||
#include INC_GLUE(ipi.h) /*FIXME: Remove this */
|
||||
|
||||
/* TODO:
|
||||
* 1) Add RTC support.
|
||||
@@ -141,13 +142,23 @@ void update_process_times(void)
|
||||
need_resched = 1;
|
||||
}
|
||||
|
||||
|
||||
int do_timer_irq(void)
|
||||
{
|
||||
increase_jiffies();
|
||||
update_process_times();
|
||||
update_system_time();
|
||||
|
||||
#if defined (CONFIG_SMP)
|
||||
smp_send_ipi(cpu_mask_others(), IPI_TIMER_EVENT);
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Secondary cpus call this */
|
||||
int secondary_timer_irq(void)
|
||||
{
|
||||
update_process_times();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c', 'cache.c', 'debug.c']
|
||||
|
||||
for name, val in symbols:
|
||||
if 'CONFIG_SMP' == name:
|
||||
src_local += ['smp.c', 'ipi.c', 'smp_test.c']
|
||||
src_local += ['smp.c', 'ipi.c']
|
||||
|
||||
obj = env.Object(src_local)
|
||||
Return('obj')
|
||||
|
||||
@@ -143,23 +143,13 @@ void setup_idle_task()
|
||||
/* Initialize space caps list */
|
||||
cap_list_init(¤t->space->cap_list);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Unneeded stuff
|
||||
*/
|
||||
/*
|
||||
* Set up idle context.
|
||||
*/
|
||||
current->context.spsr = ARM_MODE_SVC;
|
||||
current->context.pc = (u32)idle_task;
|
||||
current->context.sp = (u32)align((unsigned long)current + PAGE_SIZE,
|
||||
STACK_ALIGNMENT);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FIXME: This must go to kernel resources init.
|
||||
*/
|
||||
|
||||
/* Init scheduler structs */
|
||||
sched_init_task(current, TASK_PRIO_NORMAL);
|
||||
|
||||
/*
|
||||
* If using split page tables, kernel
|
||||
* resources must point at the global pgd
|
||||
@@ -236,18 +226,18 @@ void start_kernel(void)
|
||||
|
||||
sched_init();
|
||||
|
||||
/* Try to initialize secondary cores if there are any */
|
||||
smp_start_cores();
|
||||
|
||||
/* Remove one-to-one kernel mapping */
|
||||
remove_initial_mapping();
|
||||
|
||||
/*
|
||||
* Map and enable high vector page.
|
||||
* Faults can be handled after here.
|
||||
*/
|
||||
vectors_init();
|
||||
|
||||
/* Try to initialize secondary cores if there are any */
|
||||
smp_start_cores();
|
||||
|
||||
/* Remove one-to-one kernel mapping */
|
||||
remove_initial_mapping();
|
||||
|
||||
/* Remap 1MB kernel sections as 4Kb pages. */
|
||||
remap_as_pages((void *)page_align(_start_kernel),
|
||||
(void *)page_align_up(_end_kernel));
|
||||
|
||||
@@ -10,9 +10,32 @@
|
||||
#include INC_GLUE(smp.h)
|
||||
#include INC_SUBARCH(cpu.h)
|
||||
#include <l4/lib/printk.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include <l4/generic/time.h>
|
||||
|
||||
/* This should be in a file something like exception.S */
|
||||
int ipi_handler(struct irq_desc *desc)
|
||||
{
|
||||
int ipi_event = (desc - irq_desc_array) / sizeof(struct irq_desc);
|
||||
|
||||
// printk("CPU%d: entered IPI%d\n", smp_get_cpuid(),
|
||||
// (desc - irq_desc_array) / sizeof(struct irq_desc));
|
||||
|
||||
switch (ipi_event) {
|
||||
case IPI_TIMER_EVENT:
|
||||
// printk("CPU%d: Handling timer ipi\n", smp_get_cpuid());
|
||||
secondary_timer_irq();
|
||||
break;
|
||||
default:
|
||||
printk("CPU%d: IPI with no meaning: %d\n",
|
||||
smp_get_cpuid(), ipi_event);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void smp_send_ipi(unsigned int cpumask, int ipi_num)
|
||||
{
|
||||
gic_send_ipi(cpumask, ipi_num);
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
unsigned long secondary_run_signal;
|
||||
|
||||
unsigned long secondary_ready_signal;
|
||||
|
||||
void __smp_start(void);
|
||||
|
||||
@@ -35,14 +35,17 @@ void smp_start_cores(void)
|
||||
arm_smp_inval_icache_entirely();
|
||||
|
||||
/* Start other cpus */
|
||||
for (int i = 1; i < CONFIG_NCPU; i++) {
|
||||
printk("%s: Bringing up CPU%d\n", __KERNELNAME__, i);
|
||||
if ((platform_smp_start(i, smp_start_func)) < 0) {
|
||||
for (int cpu = 1; cpu < CONFIG_NCPU; cpu++) {
|
||||
printk("%s: Bringing up CPU%d\n", __KERNELNAME__, cpu);
|
||||
if ((platform_smp_start(cpu, smp_start_func)) < 0) {
|
||||
printk("FATAL: Could not start secondary cpu. "
|
||||
"cpu=%d\n", i);
|
||||
"cpu=%d\n", cpu);
|
||||
BUG();
|
||||
}
|
||||
wfi(); /* wait for other cpu send IPI to core0 */
|
||||
|
||||
/* Wait for this particular secondary to become ready */
|
||||
while(!(secondary_ready_signal & CPUID_TO_MASK(cpu)))
|
||||
dmb();
|
||||
}
|
||||
|
||||
scu_print_state();
|
||||
@@ -50,12 +53,11 @@ void smp_start_cores(void)
|
||||
|
||||
void init_smp(void)
|
||||
{
|
||||
/* Start_secondary_cpus */
|
||||
if (CONFIG_NCPU > 1) {
|
||||
|
||||
/* This sets IPI function pointer at bare minimum */
|
||||
platform_smp_init(CONFIG_NCPU);
|
||||
}
|
||||
/* Start_secondary_cpus */
|
||||
if (CONFIG_NCPU > 1) {
|
||||
/* This sets IPI function pointer at bare minimum */
|
||||
platform_smp_init(CONFIG_NCPU);
|
||||
}
|
||||
}
|
||||
|
||||
void secondary_setup_idle_task(void)
|
||||
@@ -122,9 +124,9 @@ void smp_secondary_init(void)
|
||||
|
||||
sched_init();
|
||||
|
||||
dsb();
|
||||
|
||||
gic_send_ipi(CPUID_TO_MASK(0), 0);
|
||||
/* Signal primary that we are ready */
|
||||
dmb();
|
||||
secondary_ready_signal |= cpu_mask_self();
|
||||
|
||||
/*
|
||||
* Wait for the first runnable task to become available
|
||||
|
||||
@@ -449,6 +449,6 @@ int printk(char *format, ...)
|
||||
|
||||
va_end(args);
|
||||
return i;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
*/
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include INC_PLAT(irq.h)
|
||||
#include <l4/platform/realview/irq.h>
|
||||
#include <l4/generic/irq.h>
|
||||
|
||||
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
|
||||
@@ -61,3 +62,26 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
[IRQ_TIMER0] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
},
|
||||
[IRQ_TIMER1] = {
|
||||
.name = "Timer1",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_user_handler,
|
||||
},
|
||||
[IRQ_KEYBOARD0] = {
|
||||
.name = "Keyboard",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_keyboard_user_handler,
|
||||
},
|
||||
[IRQ_MOUSE0] = {
|
||||
.name = "Mouse",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_mouse_user_handler,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -17,6 +17,25 @@
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_GLUE(smp.h)
|
||||
|
||||
/*
|
||||
* FIXME: This is not a platform specific
|
||||
* call, we will move this out later
|
||||
*/
|
||||
void device_cap_init(struct kernel_resources *kres, int devtype,
|
||||
int devnum, unsigned long base)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
cap_set_devtype(cap, devtype);
|
||||
cap_set_devnum(cap, devnum);
|
||||
cap->start = __pfn(base);
|
||||
cap->end = cap->start + 1;
|
||||
cap->size = cap->end - cap->start;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, &kres->devmem_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* The devices that are used by the kernel are mapped
|
||||
* independent of these capabilities, but these provide a
|
||||
@@ -24,45 +43,12 @@
|
||||
*/
|
||||
int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
{
|
||||
struct capability *uart[4], *timer[4];
|
||||
|
||||
/* Setup capabilities for userspace uarts and timers */
|
||||
uart[1] = alloc_bootmem(sizeof(*uart[1]), 0);
|
||||
uart[1]->start = __pfn(PLATFORM_UART1_BASE);
|
||||
uart[1]->end = uart[1]->start + 1;
|
||||
uart[1]->size = uart[1]->end - uart[1]->start;
|
||||
cap_set_devtype(uart[1], CAP_DEVTYPE_UART);
|
||||
cap_set_devnum(uart[1], 1);
|
||||
link_init(&uart[1]->list);
|
||||
cap_list_insert(uart[1], &kres->devmem_free);
|
||||
|
||||
uart[2] = alloc_bootmem(sizeof(*uart[2]), 0);
|
||||
uart[2]->start = __pfn(PLATFORM_UART2_BASE);
|
||||
uart[2]->end = uart[2]->start + 1;
|
||||
uart[2]->size = uart[2]->end - uart[2]->start;
|
||||
cap_set_devtype(uart[2], CAP_DEVTYPE_UART);
|
||||
cap_set_devnum(uart[2], 2);
|
||||
link_init(&uart[2]->list);
|
||||
cap_list_insert(uart[2], &kres->devmem_free);
|
||||
|
||||
uart[3] = alloc_bootmem(sizeof(*uart[3]), 0);
|
||||
uart[3]->start = __pfn(PLATFORM_UART3_BASE);
|
||||
uart[3]->end = uart[3]->start + 1;
|
||||
uart[3]->size = uart[3]->end - uart[3]->start;
|
||||
cap_set_devtype(uart[3], CAP_DEVTYPE_UART);
|
||||
cap_set_devnum(uart[3], 3);
|
||||
link_init(&uart[3]->list);
|
||||
cap_list_insert(uart[3], &kres->devmem_free);
|
||||
|
||||
/* Setup timer1 capability as free */
|
||||
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
|
||||
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
|
||||
timer[1]->end = timer[1]->start + 1;
|
||||
timer[1]->size = timer[1]->end - timer[1]->start;
|
||||
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
|
||||
cap_set_devnum(timer[1], 1);
|
||||
link_init(&timer[1]->list);
|
||||
cap_list_insert(timer[1], &kres->devmem_free);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -98,5 +84,16 @@ void init_platform_irq_controller()
|
||||
|
||||
void init_platform_devices()
|
||||
{
|
||||
/* TIMER23 */
|
||||
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* KEYBOARD - KMI0 */
|
||||
add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* MOUSE - KMI1 */
|
||||
add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_PLAT(timer.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/drivers/irq/pl190/pl190_vic.h>
|
||||
|
||||
struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
|
||||
@@ -68,8 +69,18 @@ static int platform_timer_user_handler(struct irq_desc *desc)
|
||||
/*
|
||||
* Keyboard handler for userspace
|
||||
*/
|
||||
#define PL050_KMICR 0x00
|
||||
#define PL050_KMI_RXINTR (1 << 0x4)
|
||||
|
||||
static int platform_keyboard_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
@@ -79,6 +90,13 @@ static int platform_keyboard_user_handler(struct irq_desc *desc)
|
||||
*/
|
||||
static int platform_mouse_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx mouse interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_MOUSE0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
@@ -92,25 +110,21 @@ struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
.user_ack = 0,
|
||||
},
|
||||
[IRQ_TIMER1] = {
|
||||
.name = "Timer1",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_user_handler,
|
||||
.user_ack = 0,
|
||||
},
|
||||
[IRQ_KEYBOARD0] = {
|
||||
.name = "Keyboard",
|
||||
.chip = &irq_chip_array[1],
|
||||
.handler = platform_keyboard_user_handler,
|
||||
.user_ack = 1,
|
||||
},
|
||||
[IRQ_MOUSE0] = {
|
||||
.name = "Mouse",
|
||||
.chip = &irq_chip_array[1],
|
||||
.handler = platform_mouse_user_handler,
|
||||
.user_ack = 1,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -103,18 +103,24 @@ void init_platform_irq_controller()
|
||||
irq_controllers_init();
|
||||
}
|
||||
|
||||
/*
|
||||
* Add userspace devices here as you develop
|
||||
* their irq handlers,
|
||||
* Only the devices to which kernel has to do
|
||||
* anything needs to be mapped, rest will be
|
||||
* mapped in userspace by user
|
||||
*/
|
||||
/* Add userspace devices here as you develop their irq handlers */
|
||||
void init_platform_devices()
|
||||
{
|
||||
/* TIMER23 */
|
||||
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* KEYBOARD - KMI0 */
|
||||
add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* MOUSE - KMI1 */
|
||||
add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* CLCD */
|
||||
add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
}
|
||||
|
||||
/* If these bits are off, 32Khz OSC source is used */
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
#include INC_PLAT(irq.h)
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_ARCH(exception.h)
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
|
||||
extern struct gic_data gic_data[IRQ_CHIPS_MAX];
|
||||
|
||||
@@ -26,7 +28,35 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = {
|
||||
.read_irq = gic_read_irq,
|
||||
.ack_and_mask = gic_ack_and_mask,
|
||||
.unmask = gic_unmask_irq,
|
||||
.set_cpu = gic_set_target,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Built-in irq handlers initialised at compile time.
|
||||
* Else register with register_irq()
|
||||
*/
|
||||
struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
[IRQ_TIMER0] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
},
|
||||
[IRQ_TIMER1] = {
|
||||
.name = "Timer1",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_user_handler,
|
||||
},
|
||||
[IRQ_KEYBOARD0] = {
|
||||
.name = "Keyboard",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_keyboard_user_handler,
|
||||
},
|
||||
[IRQ_MOUSE0] = {
|
||||
.name = "Mouse",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_mouse_user_handler,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -19,6 +19,25 @@
|
||||
#include <l4/generic/cap-types.h>
|
||||
#include <l4/drivers/irq/gic/gic.h>
|
||||
|
||||
/*
|
||||
* FIXME: This is not a platform specific
|
||||
* call, we will move this out later
|
||||
*/
|
||||
void device_cap_init(struct kernel_resources *kres, int devtype,
|
||||
int devnum, unsigned long base)
|
||||
{
|
||||
struct capability *cap;
|
||||
|
||||
cap = alloc_bootmem(sizeof(*cap), 0);
|
||||
cap_set_devtype(cap, devtype);
|
||||
cap_set_devnum(cap, devnum);
|
||||
cap->start = __pfn(base);
|
||||
cap->end = cap->start + 1;
|
||||
cap->size = cap->end - cap->start;
|
||||
link_init(&cap->list);
|
||||
cap_list_insert(cap, &kres->devmem_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* The devices that are used by the kernel are mapped
|
||||
* independent of these capabilities, but these provide a
|
||||
@@ -26,17 +45,13 @@
|
||||
*/
|
||||
int platform_setup_device_caps(struct kernel_resources *kres)
|
||||
{
|
||||
struct capability *timer[2];
|
||||
|
||||
/* Setup timer1 capability as free */
|
||||
timer[1] = alloc_bootmem(sizeof(*timer[1]), 0);
|
||||
timer[1]->start = __pfn(PLATFORM_TIMER1_BASE);
|
||||
timer[1]->end = timer[1]->start + 1;
|
||||
timer[1]->size = timer[1]->end - timer[1]->start;
|
||||
cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER);
|
||||
cap_set_devnum(timer[1], 1);
|
||||
link_init(&timer[1]->list);
|
||||
cap_list_insert(timer[1], &kres->devmem_free);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE);
|
||||
device_cap_init(kres, CAP_DEVTYPE_CLCD, 0, PLATFORM_CLCD0_BASE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -54,5 +69,21 @@ void init_platform_irq_controller()
|
||||
|
||||
void init_platform_devices()
|
||||
{
|
||||
/* TIMER23 */
|
||||
add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* KEYBOARD - KMI0 */
|
||||
add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* MOUSE - KMI1 */
|
||||
add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
/* CLCD */
|
||||
add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -7,24 +7,61 @@
|
||||
#include <l4/generic/time.h>
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_PLAT(irq.h)
|
||||
#include <l4/platform/realview/timer.h>
|
||||
#include <l4/lib/bit.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
|
||||
static int platform_timer_handler(struct irq_desc *desc)
|
||||
/*
|
||||
* Timer handler for userspace
|
||||
*/
|
||||
int platform_timer_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/* Ack the device irq */
|
||||
timer_irq_clear(PLATFORM_TIMER1_VBASE);
|
||||
|
||||
/* Notify the userspace */
|
||||
irq_thread_notify(desc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keyboard handler for userspace
|
||||
*/
|
||||
#define PL050_KMICR 0x00
|
||||
#define PL050_KMI_RXINTR (1 << 0x4)
|
||||
int platform_keyboard_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mouse handler for userspace
|
||||
*/
|
||||
int platform_mouse_user_handler(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Disable rx keyboard interrupt.
|
||||
* User will enable this
|
||||
*/
|
||||
clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR,
|
||||
PL050_KMI_RXINTR);
|
||||
|
||||
irq_thread_notify(desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int platform_timer_handler(struct irq_desc *desc)
|
||||
{
|
||||
timer_irq_clear(PLATFORM_TIMER0_VBASE);
|
||||
|
||||
return do_timer_irq();
|
||||
}
|
||||
|
||||
/*
|
||||
* Built-in irq handlers initialised at compile time.
|
||||
* Else register with register_irq()
|
||||
*/
|
||||
struct irq_desc irq_desc_array[IRQS_MAX] = {
|
||||
[IRQ_TIMER0] = {
|
||||
.name = "Timer0",
|
||||
.chip = &irq_chip_array[0],
|
||||
.handler = platform_timer_handler,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* Author: Bahadir Balban
|
||||
*/
|
||||
#include <l4/platform/realview/timer.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
#include <l4/lib/printk.h>
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_SUBARCH(perfmon.h)
|
||||
|
||||
@@ -4,13 +4,14 @@
|
||||
* Copyright (C) 2009 B Labs Ltd.
|
||||
*/
|
||||
#include <l4/platform/realview/uart.h>
|
||||
#include <l4/platform/realview/timer.h>
|
||||
#include <l4/platform/realview/irq.h>
|
||||
#include INC_PLAT(offsets.h)
|
||||
#include INC_GLUE(mapping.h)
|
||||
#include INC_GLUE(smp.h)
|
||||
#include <l4/generic/irq.h>
|
||||
#include <l4/generic/space.h>
|
||||
#include <l4/generic/platform.h>
|
||||
#include <l4/generic/smp.h>
|
||||
#include INC_PLAT(platform.h)
|
||||
#include INC_ARCH(io.h)
|
||||
|
||||
@@ -36,6 +37,9 @@ void platform_timer_start(void)
|
||||
/* Enable irq line for TIMER0 */
|
||||
irq_enable(IRQ_TIMER0);
|
||||
|
||||
/* Set cpu to all cpus for timer0 */
|
||||
// irq_set_cpu(IRQ_TIMER0, cpu_all_mask());
|
||||
|
||||
/* Enable timer */
|
||||
timer_start(PLATFORM_TIMER0_VBASE);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
#include <l4/lib/string.h>
|
||||
#include <l4/generic/space.h>
|
||||
|
||||
|
||||
extern struct irq_desc irq_desc_array[IRQS_MAX];
|
||||
|
||||
/* Print some SCU information */
|
||||
@@ -49,16 +48,14 @@ void scu_init(void)
|
||||
|
||||
void platform_smp_init(int ncpus)
|
||||
{
|
||||
unsigned int i;
|
||||
/* Add GIC SoftIRQ (aka IPI) */
|
||||
for (int i = 0; i < 16; i++) {
|
||||
strncpy(irq_desc_array[i].name, "SoftInt", 8);
|
||||
irq_desc_array[i].chip = &irq_chip_array[0];
|
||||
irq_desc_array[i].handler = &ipi_handler;
|
||||
}
|
||||
|
||||
/* Add GIC SoftIRQ (aka IPI) */
|
||||
for (i = 0; i <= 15; i++) {
|
||||
strncpy(irq_desc_array[i].name, "SoftInt", 8);
|
||||
irq_desc_array[i].chip = &irq_chip_array[0];
|
||||
irq_desc_array[i].handler = &ipi_handler;
|
||||
}
|
||||
|
||||
add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE,
|
||||
add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE,
|
||||
PAGE_SIZE, MAP_IO_DEFAULT);
|
||||
|
||||
}
|
||||
@@ -74,7 +71,7 @@ int platform_smp_start(int cpu, void (*smp_start_func)(int))
|
||||
dsb(); /* Make sure the write occurs */
|
||||
|
||||
/* Wake up other core who is waiting on a WFI. */
|
||||
gic_send_ipi(CPUID_TO_MASK(cpu), 1);
|
||||
gic_send_ipi(CPUID_TO_MASK(cpu), 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -82,13 +79,4 @@ int platform_smp_start(int cpu, void (*smp_start_func)(int))
|
||||
void secondary_init_platform(void)
|
||||
{
|
||||
gic_cpu_init(0, GIC0_CPU_VBASE);
|
||||
gic_ack_irq(1);
|
||||
|
||||
gic_set_target(IRQ_TIMER0, 1 << smp_get_cpuid());
|
||||
}
|
||||
|
||||
void arch_send_ipi(u32 cpu, int cmd)
|
||||
{
|
||||
gic_send_ipi(cpu, cmd);
|
||||
}
|
||||
|
||||
|
||||
@@ -6,4 +6,3 @@ break break_virtual
|
||||
continue
|
||||
sym kernel.elf
|
||||
stepi
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
load/r '/home/bahadir/codezero/build/final.elf'
|
||||
load/ni/np '/home/bahadir/codezero/build/kernel.elf'
|
||||
bexec platform_init
|
||||
bexec smp_start_cores
|
||||
bexec idle_task
|
||||
|
||||
Reference in New Issue
Block a user