diff --git a/SConstruct b/SConstruct index a408669..bb78220 100644 --- a/SConstruct +++ b/SConstruct @@ -17,6 +17,7 @@ gcc_arch_flag = config.gcc_arch_flag all_syms = config.all builddir='build/codezero/' + # Generate kernel linker script at runtime using template file. def generate_kernel_linker_script(target, source, env): linker_in = source[0] @@ -43,13 +44,13 @@ def generate_kernel_phys_linker_script(target, source, env): "-I%s -imacros l4/macros.h -imacros %s -imacros %s -C -P %s -o %s" % \ ('include', 'l4/platform/'+ platform + '/offsets.h', \ 'l4/glue/' + arch + '/memlayout.h', phys_linker_in, phys_linker_out) - print cmd os.system(cmd) create_kernel_phys_linker = Command(join(builddir, 'include/physlink.lds'), \ - join(PROJROOT, 'include/physlink.lds.in'), \ + join(PROJROOT, 'include/l4/arch/arm/linker.lds.in'), \ generate_kernel_phys_linker_script) ''' + env = Environment(CC = config.toolchain_kernel + 'gcc', AR = config.toolchain_kernel + 'ar', RANLIB = config.toolchain_kernel + 'ranlib', @@ -58,7 +59,7 @@ env = Environment(CC = config.toolchain_kernel + 'gcc', CCFLAGS = ['-g', '-nostdlib', '-ffreestanding', '-std=gnu99', '-Wall', \ '-Werror', '-march=' + gcc_arch_flag], LINKFLAGS = ['-nostdlib', '-T' + join(builddir, 'include/l4/arch/arm/linker.lds')], - ASFLAGS = ['-D__ASSEMBLY__'], + ASFLAGS = ['-D__ASSEMBLY__', '-march=' + gcc_arch_flag], PROGSUFFIX = '.elf', # The suffix to use for final executable ENV = {'PATH' : os.environ['PATH']}, # Inherit shell path LIBS = 'gcc', # libgcc.a - This is required for division routines. diff --git a/SConstruct.userlibs b/SConstruct.userlibs index 29a2997..e224114 100644 --- a/SConstruct.userlibs +++ b/SConstruct.userlibs @@ -24,7 +24,7 @@ env = Environment(CC = config.toolchain_userspace + 'gcc', CCFLAGS = ['-g', '-nostdlib', '-ffreestanding', '-std=gnu99', '-Wall', '-Werror', '-march=' + gcc_arch_flag], LINKFLAGS = ['-nostdlib'], - ASFLAGS = ['-D__ASSEMBLY__'], + ASFLAGS = ['-D__ASSEMBLY__', '-march=' + gcc_arch_flag], ENV = {'PATH' : os.environ['PATH']}, # Inherit shell path LIBS = 'gcc', # libgcc.a - This is required for division routines. CPPPATH = "#include", diff --git a/config/configuration.py b/config/configuration.py index 6443353..e995bf4 100644 --- a/config/configuration.py +++ b/config/configuration.py @@ -15,6 +15,10 @@ class Container: self.pager_lma = 0 self.pager_vma = 0 self.pager_size = 0 + self.pager_rw_section_start = 0 + self.pager_rw_section_end = 0 + self.pager_rx_section_start = 0 + self.pager_rx_section_end = 0 self.pager_task_region_start = 0 self.pager_task_region_end = 0 self.pager_shm_region_start = 0 @@ -72,6 +76,8 @@ class configuration: self.toolchain_userspace = None self.toolchain_kernel = None self.all = [] + self.smp = False + self.ncpu = 0 self.containers = [] self.ncontainers = 0 @@ -87,6 +93,13 @@ class configuration: return parts[1], parts[2] return None + # Check if SMP enable, and get NCPU if SMP + def get_ncpu(self, name, value): + if name[:len("CONFIG_SMP")] == "CONFIG_SMP": + self.smp = bool(value) + if name[:len("CONFIG_NCPU")] == "CONFIG_NCPU": + self.ncpu = int(value) + # Extract architecture from a name value pair def get_arch(self, name, val): if name[:len("CONFIG_ARCH_")] == "CONFIG_ARCH_": diff --git a/configure.py b/configure.py index c90d03e..0b0e0cb 100755 --- a/configure.py +++ b/configure.py @@ -40,6 +40,7 @@ def cml2_header_to_symbols(cml2_header, config): config.get_arch(name, value) config.get_subarch(name, value) config.get_platform(name, value) + config.get_ncpu(name, value) config.get_ncontainers(name, value) config.get_container_parameters(name, value) config.get_toolchain(name, value) diff --git a/conts/baremetal/kmi_service/main.c b/conts/baremetal/kmi_service/main.c index e7226aa..88a6bdf 100755 --- a/conts/baremetal/kmi_service/main.c +++ b/conts/baremetal/kmi_service/main.c @@ -140,6 +140,12 @@ int keyboard_irq_handler(void *arg) while (data--) if ((c = kmi_keyboard_read(keyboard->base, &keyboard->state))) printf("%c", c); + + /* + * Kernel has disabled irq for keyboard + * We need to enable it + */ + kmi_rx_irq_enable(keyboard->base); } } @@ -174,6 +180,12 @@ int mouse_irq_handler(void *arg) while (data--) if ((c = kmi_data_read(mouse->base))) printf("mouse data: %d\n", c); + + /* + * Kernel has disabled irq for mouse + * We need to enable it + */ + kmi_rx_irq_enable(mouse->base); } } diff --git a/conts/baremetal/timer_service/include/timer.h b/conts/baremetal/timer_service/include/timer.h index a78052b..6b69efc 100644 --- a/conts/baremetal/timer_service/include/timer.h +++ b/conts/baremetal/timer_service/include/timer.h @@ -19,7 +19,7 @@ struct sleeper_task { struct wake_task_list { struct link head; struct link *end; /* optimization */ - struct l4_mutex lock; /* lock for sanity of head */ + struct l4_mutex wake_list_lock; /* lock for sanity of head */ }; #define BUCKET_BASE_LEVEL_BITS 8 @@ -77,7 +77,7 @@ struct timer { unsigned long base; /* Virtual base address */ unsigned int count; /* Counter/jiffies */ struct sleeper_task_bucket task_list; /* List of sleeping tasks */ - struct l4_mutex lock; /* Lock for sleeper_task_bucket */ + struct l4_mutex task_list_lock; /* Lock for sleeper_task_bucket */ struct capability cap; /* Capability describing timer */ }; diff --git a/conts/baremetal/timer_service/main.c b/conts/baremetal/timer_service/main.c index 4c1f947..b66362d 100644 --- a/conts/baremetal/timer_service/main.c +++ b/conts/baremetal/timer_service/main.c @@ -22,7 +22,7 @@ static int total_caps = 0; /* Total number of timer chips being handled by us */ #define TIMERS_TOTAL 1 -static struct timer timer[TIMERS_TOTAL]; +static struct timer global_timer[TIMERS_TOTAL]; /* Deafult timer to be used for sleep/wake etc purposes */ #define SLEEP_WAKE_TIMER 0 @@ -85,7 +85,7 @@ void timer_struct_init(struct timer* timer, unsigned long base) timer->base = base; timer->count = 0; timer->slot = 0; - l4_mutex_init(&timer->lock); + l4_mutex_init(&timer->task_list_lock); for (int i = 0; i < BUCKET_BASE_LEVEL_SIZE ; ++i) { link_init(&timer->task_list.bucket_level0[i]); @@ -106,7 +106,7 @@ void wake_task_list_init(void) { link_init(&wake_tasks.head); wake_tasks.end = &wake_tasks.head; - l4_mutex_init(&wake_tasks.lock); + l4_mutex_init(&wake_tasks.wake_list_lock); } /* @@ -140,7 +140,7 @@ struct link* find_bucket_list(unsigned long seconds) struct link *vector; struct sleeper_task_bucket *bucket; - bucket = &timer[SLEEP_WAKE_TIMER].task_list; + bucket = &global_timer[SLEEP_WAKE_TIMER].task_list; /* * TODO: Check if we have already surpassed seconds @@ -172,8 +172,8 @@ int timer_probe_devices(void) /* Match device type */ if (cap_devtype(&caparray[i]) == CAP_DEVTYPE_TIMER) { /* Copy to correct device index */ - memcpy(&timer[cap_devnum(&caparray[i]) - 1].cap, - &caparray[i], sizeof(timer[0].cap)); + memcpy(&global_timer[cap_devnum(&caparray[i]) - 1].cap, + &caparray[i], sizeof(global_timer[0].cap)); timers++; } } @@ -196,8 +196,11 @@ int timer_irq_handler(void *arg) struct link *vector; const int slot = 0; - /* Initialise timer */ - timer_init(timer->base); + /* + * Initialise timer + * 1 interrupt per second + */ + timer_init(timer->base, 1000000); /* Register self for timer irq, using notify slot 0 */ if ((err = l4_irq_control(IRQ_CONTROL_REGISTER, slot, @@ -221,34 +224,32 @@ int timer_irq_handler(void *arg) BUG(); } - //printf("Got irq(count 0x%x)\n", timer->count); /* * Update timer count * TODO: Overflow check, we have 1 interrupt/sec from timer * with 32bit count it will take 9years to overflow */ timer->count += count; + printf("Got timer irq, current count = 0x%x\n", timer->count); /* find bucket list of taks to be woken for current count */ vector = find_bucket_list(timer->count); if (!list_empty(vector)) { /* Removing tasks from sleeper list */ - l4_mutex_lock(&timer[SLEEP_WAKE_TIMER].lock); + l4_mutex_lock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock); task_list = list_detach(vector); - l4_mutex_unlock(&timer[SLEEP_WAKE_TIMER].lock); + l4_mutex_unlock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock); /* Add tasks to wake_task_list */ - l4_mutex_lock(&wake_tasks.lock); - list_attach(task_list, - &wake_tasks.head, wake_tasks.end); - l4_mutex_unlock(&wake_tasks.lock); + l4_mutex_lock(&wake_tasks.wake_list_lock); + list_attach(task_list, &wake_tasks.head, wake_tasks.end); + l4_mutex_unlock(&wake_tasks.wake_list_lock); /* * Send ipc to handle_request * thread to send wake signals */ - printf("sending ipc %d to thread %d\n", L4_IPC_TAG_TIMER_WAKE_THREADS, tid_ipc_handler); l4_send(tid_ipc_handler,L4_IPC_TAG_TIMER_WAKE_THREADS); } } @@ -266,17 +267,16 @@ void task_wake(void) list_foreach_removable_struct(struct_ptr, temp_ptr, &wake_tasks.head, list) { /* Remove task from wake list */ - l4_mutex_lock(&wake_tasks.lock); + l4_mutex_lock(&wake_tasks.wake_list_lock); list_remove(&struct_ptr->list); - l4_mutex_unlock(&wake_tasks.lock); + l4_mutex_unlock(&wake_tasks.wake_list_lock); /* Set sender correctly */ l4_set_sender(struct_ptr->tid); -#if 0 - printf("waking thread at time %x\n", - (unsigned int)timer[SLEEP_WAKE_TIMER].count); -#endif + printf("%s : Waking thread 0x%x at time 0x%x\n", __CONTAINER_NAME__, + struct_ptr->tid, global_timer[SLEEP_WAKE_TIMER].count); + /* send wake ipc */ if ((ret = l4_ipc_return(struct_ptr->retval)) < 0) { printf("%s: IPC return error: %d.\n", @@ -302,17 +302,17 @@ int timer_setup_devices(void) for (int i = 0; i < TIMERS_TOTAL; i++) { /* initialize timer */ - timer_struct_init(&timer[i],(unsigned long)l4_new_virtual(1) ); + timer_struct_init(&global_timer[i],(unsigned long)l4_new_virtual(1) ); /* Map timer to a virtual address region */ - if (IS_ERR(l4_map((void *)__pfn_to_addr(timer[i].cap.start), - (void *)timer[i].base, timer[i].cap.size, + if (IS_ERR(l4_map((void *)__pfn_to_addr(global_timer[i].cap.start), + (void *)global_timer[i].base, global_timer[i].cap.size, MAP_USR_IO, self_tid()))) { printf("%s: FATAL: Failed to map TIMER device " "%d to a virtual address\n", __CONTAINER_NAME__, - cap_devnum(&timer[i].cap)); + cap_devnum(&global_timer[i].cap)); BUG(); } @@ -323,7 +323,7 @@ int timer_setup_devices(void) * itself as its irq handler, initiate the timer and * wait on irqs. */ - if ((err = thread_create(timer_irq_handler, &timer[i], + if ((err = thread_create(timer_irq_handler, &global_timer[i], TC_SHARE_SPACE, &tptr)) < 0) { printf("FATAL: Creation of irq handler " @@ -404,13 +404,23 @@ void task_sleep(l4id_t tid, unsigned long seconds, int ret) struct link *vector; /* can overflow happen here?, timer is in 32bit mode */ - seconds += timer[SLEEP_WAKE_TIMER].count; + seconds += global_timer[SLEEP_WAKE_TIMER].count; + + printf("sleep wake timer lock is present at address %lx\n", + ( (unsigned long)&global_timer[SLEEP_WAKE_TIMER].task_list_lock.lock)); vector = find_bucket_list(seconds); - l4_mutex_lock(&timer[SLEEP_WAKE_TIMER].lock); + printf("Acquiring lock for sleep wake timer\n"); + l4_mutex_lock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock); + printf("got lock for sleep wake timer\n"); + list_insert(&task->list, vector); - l4_mutex_unlock(&timer[SLEEP_WAKE_TIMER].lock); + + printf("Releasing lock for sleep wake timer\n"); + l4_mutex_unlock(&global_timer[SLEEP_WAKE_TIMER].task_list_lock); + printf("released lock for sleep wake timer\n"); + } void handle_requests(void) @@ -448,7 +458,11 @@ void handle_requests(void) switch (tag) { /* Return time in seconds, since the timer was started */ case L4_IPC_TAG_TIMER_GETTIME: - mr[0] = timer[SLEEP_WAKE_TIMER].count; + printf("%s: Got get time request from thread 0x%x " + " at time = 0x%x\n", __CONTAINER_NAME__, + senderid, global_timer[SLEEP_WAKE_TIMER].count); + + write_mr(2, global_timer[SLEEP_WAKE_TIMER].count); /* Reply */ if ((ret = l4_ipc_return(ret)) < 0) { @@ -458,8 +472,11 @@ void handle_requests(void) break; case L4_IPC_TAG_TIMER_SLEEP: - printf("%s: Got sleep request from thread 0x%x, duration %d\n", __CONTAINER_NAME__, - senderid, mr[0]); + printf("%s: Got sleep request from thread 0x%x " + "for 0x%x seconds at 0x%x seconds\n", + __CONTAINER_NAME__, senderid, mr[0], + global_timer[SLEEP_WAKE_TIMER].count); + if (mr[0] > 0) { task_sleep(senderid, mr[0], ret); } diff --git a/conts/libdev/include/libdev/kmi.h b/conts/libdev/include/libdev/kmi.h index 3745780..2537bb0 100755 --- a/conts/libdev/include/libdev/kmi.h +++ b/conts/libdev/include/libdev/kmi.h @@ -12,7 +12,7 @@ struct keyboard_state{ }; /* Common functions */ -void kmi_irq_handler(unsigned long base); +void kmi_rx_irq_enable(unsigned long base); int kmi_data_read(unsigned long base); /* Keyboard specific calls */ diff --git a/conts/libdev/include/libdev/timer.h b/conts/libdev/include/libdev/timer.h index 61a4f86..2f1a4f4 100644 --- a/conts/libdev/include/libdev/timer.h +++ b/conts/libdev/include/libdev/timer.h @@ -17,7 +17,7 @@ void timer_load(u32 val, unsigned long timer_base); u32 timer_read(unsigned long timer_base); void timer_stop(unsigned long timer_base); void timer_init_oneshot(unsigned long timer_base); -void timer_init_periodic(unsigned long timer_base); -void timer_init(unsigned long timer_base); +void timer_init_periodic(unsigned long timer_base, u32 load_value); +void timer_init(unsigned long timer_base, u32 load_value); #endif /* __LIBDEV_TIMER_H__ */ diff --git a/conts/libdev/kmi/pl050/keymap.h b/conts/libdev/kmi/pl050/keymap.h index 37be670..8ace0ae 100755 --- a/conts/libdev/kmi/pl050/keymap.h +++ b/conts/libdev/kmi/pl050/keymap.h @@ -135,7 +135,7 @@ struct keyboard_key keymap_uk2[256] = { /* 40 */ {';',':',0,0}, /* 41 */ {'\'','@',0,0}, /* 42 */ {0,0,0,0}, -/* 43 */ {KEYCODE_RETURN,0,KEYCODE_ENTER,0}, +/* 43 */ {'\n','\n',KEYCODE_ENTER,0}, /* 44 */ {KEYCODE_LSHIFT,0,0,0}, /* 45 */ {'\\','|',0,0}, /* 46 */ {'z','Z',0,0}, diff --git a/conts/libdev/kmi/pl050/kmi.c b/conts/libdev/kmi/pl050/kmi.c index 7659d39..545f838 100755 --- a/conts/libdev/kmi/pl050/kmi.c +++ b/conts/libdev/kmi/pl050/kmi.c @@ -9,11 +9,10 @@ #include "kmi.h" #include "keymap.h" -/* - * Reading Rx data automatically clears the RXITR - */ -void kmi_irq_handler(unsigned long base) +/* Enable Rx irq */ +void kmi_rx_irq_enable(unsigned long base) { + *(volatile unsigned long *)(base + PL050_KMICR) = KMI_RXINTR; } int kmi_data_read(unsigned long base) diff --git a/conts/libdev/kmi/pl050/kmi.h b/conts/libdev/kmi/pl050/kmi.h index 6b07b2c..b79e396 100755 --- a/conts/libdev/kmi/pl050/kmi.h +++ b/conts/libdev/kmi/pl050/kmi.h @@ -45,7 +45,7 @@ #define MOUSE_DATA_ENABLE 0xF4 // Mouse enable /* Common functions */ -void kmi_irq_handler(unsigned long base); +void kmi_rx_irq_enable(unsigned long base); int kmi_data_read(unsigned long base); /* Keyboard specific calls */ diff --git a/conts/libdev/timer/sp804/timer.c b/conts/libdev/timer/sp804/timer.c index 9236c65..77504a8 100644 --- a/conts/libdev/timer/sp804/timer.c +++ b/conts/libdev/timer/sp804/timer.c @@ -36,7 +36,7 @@ void timer_stop(unsigned long timer_base) write(0, timer_base + SP804_CTRL); } -void timer_init_periodic(unsigned long timer_base) +void timer_init_periodic(unsigned long timer_base, u32 load_value) { volatile u32 reg = read(timer_base + SP804_CTRL); @@ -44,8 +44,11 @@ void timer_init_periodic(unsigned long timer_base) write(reg, timer_base + SP804_CTRL); - /* 1 tick per usec, 1 irq per msec */ - timer_load(1000, timer_base); + if (load_value) + timer_load(load_value, timer_base); + else + /* 1 tick per usec, 1 irq per msec */ + timer_load(1000, timer_base); } void timer_init_oneshot(unsigned long timer_base) @@ -58,8 +61,8 @@ void timer_init_oneshot(unsigned long timer_base) write(reg, timer_base + SP804_CTRL); } -void timer_init(unsigned long timer_base) +void timer_init(unsigned long timer_base, u32 load_value) { timer_stop(timer_base); - timer_init_periodic(timer_base); + timer_init_periodic(timer_base, load_value); } diff --git a/conts/libdev/timer/sp804/timer.h b/conts/libdev/timer/sp804/timer.h index e8c21c0..904ca22 100644 --- a/conts/libdev/timer/sp804/timer.h +++ b/conts/libdev/timer/sp804/timer.h @@ -56,8 +56,8 @@ void timer_start(unsigned long timer_base); void timer_load(u32 loadval, unsigned long timer_base); u32 timer_read(unsigned long timer_base); void timer_stop(unsigned long timer_base); -void timer_init_periodic(unsigned long timer_base); +void timer_init_periodic(unsigned long timer_base, u32 load_value); void timer_init_oneshot(unsigned long timer_base); -void timer_init(unsigned long timer_base); +void timer_init(unsigned long timer_base, u32 load_value); #endif /* __SP804_TIMER_H__ */ diff --git a/conts/libl4/include/l4lib/mutex.h b/conts/libl4/include/l4lib/mutex.h index 321faa0..bb391e1 100644 --- a/conts/libl4/include/l4lib/mutex.h +++ b/conts/libl4/include/l4lib/mutex.h @@ -14,7 +14,7 @@ #include struct l4_mutex { - unsigned int lock; + int lock; } __attribute__((aligned(sizeof(int)))); @@ -28,11 +28,15 @@ int l4_mutex_unlock(struct l4_mutex *m); #define L4_MUTEX_CONTENDED -1 #define L4_MUTEX_SUCCESS 0 -/* Mutex states - Any valid tid value is a locked state */ +/* + * Mutex states: + * Unlocked = -1, locked = 0, anything above 0 tells + * number of contended threads + */ +#define L4_MUTEX_LOCKED 0 #define L4_MUTEX_UNLOCKED -1 #define L4_MUTEX(m) \ struct l4_mutex m = { L4_MUTEX_UNLOCKED } - #endif /* __L4_MUTEX_H__ */ diff --git a/conts/libl4/src/arch/arm/v6/mutex.c b/conts/libl4/src/arch/arm/v6/mutex.c index 8f1076f..b4c7c03 100644 --- a/conts/libl4/src/arch/arm/v6/mutex.c +++ b/conts/libl4/src/arch/arm/v6/mutex.c @@ -15,9 +15,10 @@ int __l4_mutex_lock(void *m, l4id_t tid) loop: __asm__ __volatile__( "ldrex %0, [%1]\n" - : "=r"(tmp) + : "=&r"(tmp) : "r"(m) - ); + : "memory" + ); if(tmp != L4_MUTEX_UNLOCKED) ret = L4_MUTEX_CONTENDED; @@ -79,19 +80,19 @@ int __l4_mutex_unlock(void *m, l4id_t tid) return ret; } -u8 l4_atomic_dest_readb(u8 *location) +u8 l4_atomic_dest_readb(unsigned long *location) { unsigned int tmp, res; __asm__ __volatile__ ( - "1: \n" - "ldrex %0, [%2] \n" - "strex %1, %3, [%2] \n" - "teq %1, #0 \n" - "bne 1b \n" + "1: \n" + " ldrex %0, [%2] \n" + " strex %1, %3, [%2] \n" + " teq %1, #0 \n" + " bne 1b \n" : "=&r"(tmp), "=&r"(res) : "r"(location), "r"(0) : "cc", "memory" - ); + ); return (u8)tmp; } diff --git a/conts/libl4/src/arch/arm/v7/mutex.S b/conts/libl4/src/arch/arm/v7/mutex.S index 155e581..bd7cf16 100644 --- a/conts/libl4/src/arch/arm/v7/mutex.S +++ b/conts/libl4/src/arch/arm/v7/mutex.S @@ -7,42 +7,35 @@ /* * @r0 = address of mutex word - * @r1 = unique tid of current thread */ BEGIN_PROC(__l4_mutex_lock) 1: - ldrex r2, [r0] @ Load value - cmp r2, #L4_MUTEX_UNLOCKED @ Decide what state lock will be if we succeed in a store - movne r2, #L4_MUTEX_CONTENDED - moveq r2, #L4_MUTEX_SUCCESS + ldrex r1, [r0] @ Load value + add r1, r1, #1 @ Add 1 strex r3, r1, [r0] @ Store prospective lock state cmp r3, #0 @ If not successful - @ No WFE. Whatif this were between 2 threads running on the same cpu - bne 1b @ Retry and decide again on the prospective lock state. + bne 1b @ Retry and decide again on the prospective lock state. No WFE as this would be a problem on single cpu dsb + + cmp r1, #L4_MUTEX_LOCKED @ We succeeded in store, but are we a locker or a contender? + movne r2, #L4_MUTEX_CONTENDED + moveq r2, #L4_MUTEX_SUCCESS mov r0, r2 mov pc, lr END_PROC(__l4_mutex_lock) /* * @r0 = address of mutex word - * @r1 = unique tid of current thread */ BEGIN_PROC(__l4_mutex_unlock) dsb - push {r4} - mov r4, #L4_MUTEX_UNLOCKED + mov r3, #L4_MUTEX_UNLOCKED 1: - ldrex r2, [r0] - cmp r2, r1 - moveq r3, #L4_MUTEX_SUCCESS - movne r3, #L4_MUTEX_CONTENDED - strex r2, r4, [r0] + ldrex r1, [r0] + strex r2, r3, [r0] cmp r2, #0 bne 1b - mov r0, r3 - pop {r4} + mov r0, r1 mov pc, lr END_PROC(__l4_mutex_unlock) - diff --git a/conts/libl4/src/mutex.c b/conts/libl4/src/mutex.c index 8fc0bcb..e213d67 100644 --- a/conts/libl4/src/mutex.c +++ b/conts/libl4/src/mutex.c @@ -51,8 +51,8 @@ * - Whether this is the best design - time will tell. */ -extern int __l4_mutex_lock(void *word, l4id_t tid); -extern int __l4_mutex_unlock(void *word, l4id_t tid); +extern int __l4_mutex_lock(void *word); +extern int __l4_mutex_unlock(void *word); void l4_mutex_init(struct l4_mutex *m) { @@ -61,10 +61,9 @@ void l4_mutex_init(struct l4_mutex *m) int l4_mutex_lock(struct l4_mutex *m) { - l4id_t tid = self_tid(); int err; - while(__l4_mutex_lock(m, tid) == L4_MUTEX_CONTENDED) { + while(__l4_mutex_lock(&m->lock) != L4_MUTEX_SUCCESS) { if ((err = l4_mutex_control(&m->lock, L4_MUTEX_LOCK)) < 0) { printf("%s: Error: %d\n", __FUNCTION__, err); return err; @@ -75,15 +74,14 @@ int l4_mutex_lock(struct l4_mutex *m) int l4_mutex_unlock(struct l4_mutex *m) { - l4id_t tid = self_tid(); - int err; + int err, contended; - if (__l4_mutex_unlock(m, tid) == L4_MUTEX_CONTENDED) { - if ((err = l4_mutex_control(&m->lock, L4_MUTEX_UNLOCK)) < 0) { + if ((contended = __l4_mutex_unlock(m))) { + if ((err = l4_mutex_control(&m->lock, + contended | L4_MUTEX_UNLOCK)) < 0) { printf("%s: Error: %d\n", __FUNCTION__, err); return err; } } return 0; } - diff --git a/include/l4/api/mutex.h b/include/l4/api/mutex.h index 8732a9c..e8ebca2 100644 --- a/include/l4/api/mutex.h +++ b/include/l4/api/mutex.h @@ -7,11 +7,22 @@ #define MUTEX_CONTROL_LOCK L4_MUTEX_LOCK #define MUTEX_CONTROL_UNLOCK L4_MUTEX_UNLOCK +#define MUTEX_CONTROL_OPMASK L4_MUTEX_OPMASK + +#define mutex_operation(x) ((x) & MUTEX_CONTROL_OPMASK) +#define mutex_contenders(x) ((x) & ~MUTEX_CONTROL_OPMASK) + #include #include #include +/* + * Contender threashold is the total number of contenders + * who are expected to sleep on the mutex, and will be waited + * for a wakeup. + */ struct mutex_queue { + int contenders; unsigned long physical; struct link list; struct waitqueue_head wqh_contenders; @@ -39,7 +50,8 @@ void init_mutex_queue_head(struct mutex_queue_head *mqhead); #endif -#define L4_MUTEX_LOCK 0 -#define L4_MUTEX_UNLOCK 1 +#define L4_MUTEX_OPMASK 0xF0000000 +#define L4_MUTEX_LOCK 0x10000000 +#define L4_MUTEX_UNLOCK 0x20000000 #endif /* __MUTEX_CONTROL_H__*/ diff --git a/include/l4/arch/arm/exception.h b/include/l4/arch/arm/exception.h index ca189c2..6b5f5d4 100644 --- a/include/l4/arch/arm/exception.h +++ b/include/l4/arch/arm/exception.h @@ -12,7 +12,7 @@ #include INC_ARCH(asm.h) /* Abort debugging conditions */ -//#define DEBUG_ABORTS +// #define DEBUG_ABORTS #if defined (DEBUG_ABORTS) #define dbg_abort(...) printk(__VA_ARGS__) #else diff --git a/include/l4/arch/arm/linker.lds.in b/include/l4/arch/arm/linker.lds.in index eadfeeb..836e7c2 100644 --- a/include/l4/arch/arm/linker.lds.in +++ b/include/l4/arch/arm/linker.lds.in @@ -8,7 +8,11 @@ #endif phys_ram_start = PLATFORM_PHYS_MEM_START; + +#if !defined(kernel_offset) kernel_offset = KERNEL_AREA_START - phys_ram_start; +#endif + kernel_physical = 0x8000 + phys_ram_start; kernel_virtual = kernel_physical + kernel_offset; @@ -47,8 +51,8 @@ SECTIONS . = ALIGN(16K); _start_vectors = .; *(.data.vectors) - _end_vectors = .; . = ALIGN(4K); + _end_vectors = .; _start_kip = .; *(.data.kip) . = ALIGN(4K); @@ -71,9 +75,6 @@ SECTIONS *(.bss) } . = ALIGN(4K); - . += PAGE_SIZE * 2; /* This is required as the link counter does not seem - * to increment for the bss section - * TODO: Change this with PAGE_SIZE */ /* Below part is to be discarded after boot */ _start_init = .; diff --git a/include/l4/drivers/irq/gic/gic.h b/include/l4/drivers/irq/gic/gic.h index d51cb9f..044db48 100644 --- a/include/l4/drivers/irq/gic/gic.h +++ b/include/l4/drivers/irq/gic/gic.h @@ -88,4 +88,8 @@ u32 gic_get_priority(u32 irq); void gic_dummy_init(void); +void gic_eoi_irq(l4id_t irq); + +void gic_print_cpu(void); + #endif /* __GIC_H__ */ diff --git a/include/l4/generic/container.h b/include/l4/generic/container.h index b28e99f..48f5b9a 100644 --- a/include/l4/generic/container.h +++ b/include/l4/generic/container.h @@ -32,6 +32,16 @@ struct pager { unsigned long stack_address; unsigned long memsize; struct cap_list cap_list; + + /* + * Section markings, + * We dont care for other types of sections, + * RO will be included inside RX. + */ + unsigned long rw_sections_start; + unsigned long rw_sections_end; + unsigned long rx_sections_start; + unsigned long rx_sections_end; }; @@ -72,6 +82,16 @@ struct pager_info { unsigned long start_address; unsigned long stack_address; + /* + * Section markings, + * We dont care for other types of sections, + * RO will be included inside RX. + */ + unsigned long rw_sections_start; + unsigned long rw_sections_end; + unsigned long rx_sections_start; + unsigned long rx_sections_end; + /* Number of capabilities defined */ int ncaps; diff --git a/include/l4/generic/irq.h b/include/l4/generic/irq.h index 26ff8da..dd0c968 100644 --- a/include/l4/generic/irq.h +++ b/include/l4/generic/irq.h @@ -1,18 +1,20 @@ /* * Generic irq handling definitions. * - * Copyright (C) 2007 Bahadir Balban + * Copyright (C) 2010 B Labs Ltd. */ #ifndef __GENERIC_IRQ_H__ #define __GENERIC_IRQ_H__ #include #include +#include #include INC_PLAT(irq.h) #include INC_ARCH(types.h) /* Represents none or spurious irq */ -#define IRQ_NIL 0xFFFFFFFF +#define IRQ_NIL 0xFFFFFFFF /* -1 */ +#define IRQ_SPURIOUS 0xFFFFFFFE /* -2 */ /* Successful irq handling state */ #define IRQ_HANDLED 0 @@ -23,6 +25,7 @@ struct irq_chip_ops { l4id_t (*read_irq)(void *data); irq_op_t ack_and_mask; irq_op_t unmask; + void (*set_cpu)(l4id_t irq, unsigned int cpumask); }; struct irq_chip { @@ -47,9 +50,6 @@ struct irq_desc { /* Notification slot for this irq */ int task_notify_slot; - /* If user will ack this irq */ - int user_ack; - /* Waitqueue head for this irq */ struct waitqueue_head wqh_irq; @@ -72,10 +72,17 @@ static inline void irq_disable(int irq_index) { struct irq_desc *this_irq = irq_desc_array + irq_index; struct irq_chip *this_chip = this_irq->chip; - this_chip->ops.ack_and_mask(irq_index - this_chip->start); } +static inline void irq_set_cpu(int irq_index, unsigned int cpumask) +{ + struct irq_desc *this_irq = irq_desc_array + irq_index; + struct irq_chip *this_chip = this_irq->chip; + + this_chip->ops.set_cpu(irq_index - this_chip->start, cpumask); +} + int irq_register(struct ktcb *task, int notify_slot, l4id_t irq_index); int irq_thread_notify(struct irq_desc *desc); diff --git a/include/l4/generic/scheduler.h b/include/l4/generic/scheduler.h index 844a3bb..b1e1de4 100644 --- a/include/l4/generic/scheduler.h +++ b/include/l4/generic/scheduler.h @@ -42,7 +42,7 @@ static inline struct ktcb *current_task(void) #define current current_task() #define need_resched (current->ts_need_resched) -#define SCHED_RQ_TOTAL 2 +#define SCHED_RQ_TOTAL 4 /* A basic runqueue */ struct runqueue { @@ -52,11 +52,28 @@ struct runqueue { unsigned int total; /* Total tasks */ }; +/* + * Hints and flags to scheduler + */ +enum sched_flags { + /* Schedule idle at a convenient time */ + SCHED_RUN_IDLE = (1 << 0), +}; + /* Contains per-container scheduling structures */ struct scheduler { + unsigned int flags; + unsigned int task_select_ctr; struct runqueue sched_rq[SCHED_RQ_TOTAL]; + + /* Regular runqueues */ struct runqueue *rq_runnable; struct runqueue *rq_expired; + + /* Real-time runqueues */ + struct runqueue *rq_rt_runnable; + struct runqueue *rq_rt_expired; + struct ktcb *idle_task; /* Total priority of all tasks in container */ diff --git a/include/l4/generic/smp.h b/include/l4/generic/smp.h index 31de458..5f3a265 100644 --- a/include/l4/generic/smp.h +++ b/include/l4/generic/smp.h @@ -20,4 +20,31 @@ #define smp_get_cpuid() 0 #endif +/* All cpus in the SMP system */ +static inline unsigned int cpu_mask_all(void) +{ + unsigned int mask = 0; + + for (int i = 0; i < CONFIG_NCPU; i++) + mask |= (1 << i); + return mask; +} + +/* All but not self */ +static inline unsigned int cpu_mask_others(void) +{ + unsigned int mask = 0; + + for (int i = 0; i < CONFIG_NCPU; i++) + if (i != smp_get_cpuid()) + mask |= (1 << i); + return mask; +} + +/* Only self */ +static inline unsigned int cpu_mask_self(void) +{ + return 1 << smp_get_cpuid(); +} + #endif /* __GENERIC_SMP_H__ */ diff --git a/include/l4/generic/tcb.h b/include/l4/generic/tcb.h index 8bf68f5..d187757 100644 --- a/include/l4/generic/tcb.h +++ b/include/l4/generic/tcb.h @@ -29,6 +29,7 @@ #define TASK_SUSPENDING (1 << 1) #define TASK_RESUMING (1 << 2) #define TASK_PENDING_SIGNAL (TASK_SUSPENDING) +#define TASK_REALTIME (1 << 5) /* * This is to indicate a task (either current or one of @@ -109,7 +110,6 @@ struct ktcb { enum task_state state; struct link task_list; /* Global task list. */ - struct ktcb_list child_exit_list; /* UTCB related, see utcb.txt in docs */ unsigned long utcb_address; /* Virtual ref to task's utcb area */ diff --git a/include/l4/generic/time.h b/include/l4/generic/time.h index 8bcc8f8..1f1ac9d 100644 --- a/include/l4/generic/time.h +++ b/include/l4/generic/time.h @@ -16,5 +16,6 @@ struct timeval { extern volatile u32 jiffies; int do_timer_irq(void); +int secondary_timer_irq(void); #endif /* __GENERIC_TIME_H__ */ diff --git a/include/l4/glue/arm/ipi.h b/include/l4/glue/arm/ipi.h index ca44626..de04efd 100644 --- a/include/l4/glue/arm/ipi.h +++ b/include/l4/glue/arm/ipi.h @@ -1,18 +1,16 @@ +/* + * Copyright (C) 2010 B Labs Ltd. + * + * By Bahadir Balban + */ #ifndef __IPI_H__ #define __IPI_H__ -/* - * Copyright 2010 B Labs.Ltd. - * - * Author: Prem Mallappa - * - * Description: - */ - - #include int ipi_handler(struct irq_desc *desc); +#define IPI_TIMER_EVENT 0 + #endif /* __IPI_H__ */ diff --git a/include/l4/glue/arm/smp.h b/include/l4/glue/arm/smp.h index fa5933b..686c7ca 100644 --- a/include/l4/glue/arm/smp.h +++ b/include/l4/glue/arm/smp.h @@ -34,7 +34,7 @@ static inline void smp_start_cores(void) {} void init_smp(void); void arch_smp_spin(void); -void arch_send_ipi(u32 cpu, int ipi); +void smp_send_ipi(unsigned int cpumask, int ipi_num); void platform_smp_init(int ncpus); int platform_smp_start(int cpu, void (*start)(int)); void secondary_init_platform(void); diff --git a/include/l4/platform/eb/irq.h b/include/l4/platform/eb/irq.h index 4a3e3bd..a719910 100644 --- a/include/l4/platform/eb/irq.h +++ b/include/l4/platform/eb/irq.h @@ -69,10 +69,13 @@ #if defined (CONFIG_CPU_ARM11MPCORE) || defined (CONFIG_CPU_CORTEXA9) #define IRQ_TIMER0 MPCORE_GIC_IRQ_TIMER01 #define IRQ_TIMER1 MPCORE_GIC_IRQ_TIMER23 +#define IRQ_KEYBOARD0 MPCORE_GIC_IRQ_KMI0 +#define IRQ_MOUSE0 MPCORE_GIC_IRQ_KMI1 #else #define IRQ_TIMER0 EB_IRQ_TIMER01 #define IRQ_TIMER1 EB_IRQ_TIMER23 +#define IRQ_KEYBOARD0 EB_IRQ_KMI0 +#define IRQ_MOUSE0 EB_IRQ_KMI1 #endif - #endif /* __PLATFORM_IRQ_H__ */ diff --git a/include/l4/platform/eb/offsets.h b/include/l4/platform/eb/offsets.h index 41451ef..e8be210 100644 --- a/include/l4/platform/eb/offsets.h +++ b/include/l4/platform/eb/offsets.h @@ -19,7 +19,11 @@ #define PLATFORM_GIC3_BASE 0x10060000 /* GIC 3 */ #define PLATFORM_GIC4_BASE 0x10070000 /* GIC 4 */ -#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (13 * DEVICE_PAGE)) +/* + * Virtual device offsets for EB platform - starting from + * the last common realview virtual device offset + */ +#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (14 * DEVICE_PAGE)) #if defined (CONFIG_CPU_CORTEXA9) #define MPCORE_PRIVATE_BASE 0x1F000000 diff --git a/include/l4/platform/pb11mpcore/offsets.h b/include/l4/platform/pb11mpcore/offsets.h index 4014acc..010cd13 100644 --- a/include/l4/platform/pb11mpcore/offsets.h +++ b/include/l4/platform/pb11mpcore/offsets.h @@ -16,6 +16,7 @@ #define PLATFORM_TIMER2_BASE 0x10018000 /* TIMER 4-5 */ #define PLATFORM_TIMER3_BASE 0x10019000 /* TIMER 6-7 */ #define PLATFORM_SYSCTRL1_BASE 0x1001A000 /* System controller 1 */ +#define PLATFORM_CLCD0_BASE 0x10020000 /* CLCD */ #define PLATFORM_GIC0_BASE 0x1E000000 /* GIC 0 */ #define PLATFORM_GIC1_BASE 0x1E010000 /* GIC 1 */ #define PLATFORM_GIC2_BASE 0x1E020000 /* GIC 2 */ diff --git a/include/l4/platform/pb926/platform.h b/include/l4/platform/pb926/platform.h index 97cb5a1..ed2a430 100644 --- a/include/l4/platform/pb926/platform.h +++ b/include/l4/platform/pb926/platform.h @@ -1,5 +1,3 @@ -#ifndef __PB926_PLATFORM_H__ -#define __PB926_PLATFORM_H__ /* * Platform specific ties between drivers and generic APIs used by the kernel. * E.g. system timer and console. @@ -7,6 +5,9 @@ * Copyright (C) Bahadir Balban 2007 */ +#ifndef __PB926_PLATFORM_H__ +#define __PB926_PLATFORM_H__ + void platform_timer_start(void); #endif /* __PB926_PLATFORM_H__ */ diff --git a/include/l4/platform/pba8/irq.h b/include/l4/platform/pba8/irq.h index 17ab678..7f20e00 100644 --- a/include/l4/platform/pba8/irq.h +++ b/include/l4/platform/pba8/irq.h @@ -24,5 +24,8 @@ #define IRQ_TIMER2 73 #define IRQ_TIMER3 74 +#define IRQ_KEYBOARD0 52 +#define IRQ_MOUSE0 53 + #endif /* __PLATFORM_IRQ_H__ */ diff --git a/include/l4/platform/pba8/offsets.h b/include/l4/platform/pba8/offsets.h index fd59006..2da31ce 100644 --- a/include/l4/platform/pba8/offsets.h +++ b/include/l4/platform/pba8/offsets.h @@ -24,6 +24,7 @@ #define PLATFORM_TIMER2_BASE 0x10018000 /* Timers 4 and 5 */ #define PLATFORM_TIMER3_BASE 0x10019000 /* Timers 6 and 7 */ #define PLATFORM_SYSCTRL1_BASE 0x1001A000 /* System controller1 */ +#define PLATFORM_CLCD0_BASE 0x10020000 /* CLCD */ #define PLATFORM_GIC1_BASE 0x1E000000 /* GIC 1 */ #define PLATFORM_GIC2_BASE 0x1E010000 /* GIC 2 */ #define PLATFORM_GIC3_BASE 0x1E020000 /* GIC 3 */ diff --git a/include/l4/platform/pba9/irq.h b/include/l4/platform/pba9/irq.h index dc64be3..9a9dda5 100644 --- a/include/l4/platform/pba9/irq.h +++ b/include/l4/platform/pba9/irq.h @@ -20,14 +20,16 @@ #define IRQ_UART1 38 #define IRQ_UART2 39 #define IRQ_UART3 40 + +#define IRQ_KEYBOARD0 44 +#define IRQ_MOUSE0 45 #define IRQ_CLCD0 46 /* - * Interrupt Distribution: - * 0-31: SI, provided by distributed interrupt controller - * 32-63: Externel peripheral interrupts - * 64-71: Tile site interrupt - * 72-95: Externel peripheral interrupts + * Versatile Express A9 Interrupt Distribution: + * 0 - 31: SI, provided by distributed interrupt controller + * 32 - 74: Irqs from Motherboard (0 - 42) + * 75- 81: Test chip interrupts */ #endif /* __PLATFORM_IRQ_H__ */ diff --git a/include/l4/platform/pba9/offsets.h b/include/l4/platform/pba9/offsets.h index 8685503..fb9dda5 100644 --- a/include/l4/platform/pba9/offsets.h +++ b/include/l4/platform/pba9/offsets.h @@ -22,14 +22,24 @@ #define PLATFORM_TIMER3_BASE 0x10019000 /* Timers 2 and 3 */ #define PLATFORM_SYSCTRL1_BASE 0x1001A000 /* System controller1 */ +#define PLATFORM_CLCD0_BASE 0x1001F000 /* CLCD */ + #define PLATFORM_GIC0_BASE 0x1E000000 /* GIC 0 */ #define MPCORE_PRIVATE_BASE 0x1E000000 -#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (13 * DEVICE_PAGE)) #define SCU_BASE MPCORE_PRIVATE_BASE #define SCU_VBASE MPCORE_PRIVATE_VBASE #define GIC0_CPU_VBASE (MPCORE_PRIVATE_VBASE + 0x100) #define GIC0_DIST_VBASE (MPCORE_PRIVATE_VBASE + 0x1000) +/* + * Virtual device offsets for Versatile Express A9 + * Offsets start from the last common realview virtual + * device offset + */ +#define MPCORE_PRIVATE_VBASE (IO_AREA0_VADDR + (14 * DEVICE_PAGE)) + +/* Add userspace devices here as they become necessary for irqs */ + #endif /* __PLATFORM_PBA9_OFFSETS_H__ */ diff --git a/include/l4/platform/realview/offsets.h b/include/l4/platform/realview/offsets.h index e03632c..27204fe 100644 --- a/include/l4/platform/realview/offsets.h +++ b/include/l4/platform/realview/offsets.h @@ -22,6 +22,8 @@ */ #define PLATFORM_SYSTEM_REGISTERS 0x10000000 /* System registers */ #define PLATFORM_SYSCTRL_BASE 0x10001000 /* System controller0 */ +#define PLATFORM_KEYBOARD0_BASE 0x10006000 /* Keyboard */ +#define PLATFORM_MOUSE0_BASE 0x10007000 /* Mouse */ #define PLATFORM_UART0_BASE 0x10009000 /* Console port (UART0) */ #define PLATFORM_UART1_BASE 0x1000A000 /* Console port (UART1) */ #define PLATFORM_UART2_BASE 0x1000B000 /* Console port (UART2) */ @@ -43,12 +45,15 @@ #define PLATFORM_TIMER0_VBASE (IO_AREA0_VADDR + (4 * DEVICE_PAGE)) #define PLATFORM_GIC0_VBASE (IO_AREA0_VADDR + (5 * DEVICE_PAGE)) #define PLATFORM_GIC1_VBASE (IO_AREA0_VADDR + (7 * DEVICE_PAGE)) -#define PLATFORM_GIC2_VBASE (IO_AREA0_VADDR + (9 * DEVICE_PAGE)) -#define PLATFORM_GIC3_VBASE (IO_AREA0_VADDR + (11 * DEVICE_PAGE)) +#define PLATFORM_GIC2_VBASE (IO_AREA0_VADDR + (8 * DEVICE_PAGE)) +#define PLATFORM_GIC3_VBASE (IO_AREA0_VADDR + (9 * DEVICE_PAGE)) -/* Add userspace devices here as they become necessary for irqs */ /* Add size of various user space devices, to be used in capability generation */ +#define PLATFORM_TIMER1_VBASE (IO_AREA0_VADDR + (10 * DEVICE_PAGE)) +#define PLATFORM_KEYBOARD0_VBASE (IO_AREA0_VADDR + (11 * DEVICE_PAGE)) +#define PLATFORM_MOUSE0_VBASE (IO_AREA0_VADDR + (12 * DEVICE_PAGE)) +#define PLATFORM_CLCD0_VBASE (IO_AREA0_VADDR + (13 * DEVICE_PAGE)) /* The SP810 system controller offsets */ #define SP810_BASE PLATFORM_SYSCTRL_VBASE @@ -59,6 +64,9 @@ #define PLATFORM_UART2_SIZE DEVICE_PAGE #define PLATFORM_UART3_SIZE DEVICE_PAGE #define PLATFORM_TIMER1_SIZE DEVICE_PAGE +#define PLATFORM_KEYBOARD0_SIZE DEVICE_PAGE +#define PLATFORM_MOUSE0_SIZE DEVICE_PAGE +#define PLATFORM_CLCD0_SIZE DEVICE_PAGE #endif /* __PLATFORM_REALVIEW_OFFSETS_H__ */ diff --git a/scripts/conts/containers.py b/scripts/conts/containers.py index 43b1746..7207331 100755 --- a/scripts/conts/containers.py +++ b/scripts/conts/containers.py @@ -8,6 +8,7 @@ import os, sys, shelve, glob from os.path import join from tools.pyelf.elfsize import * +from tools.pyelf.elf_section_info import * PROJRELROOT = '../../' @@ -22,6 +23,11 @@ from scripts.linux.build_atags import * from pack import * from packall import * +def fill_pager_section_markers(cont, pager_binary): + cont.pager_rw_section_start, cont.pager_rw_section_end, \ + cont.pager_rx_section_start, cont.pager_rx_section_end = \ + elf_loadable_section_info(join(PROJROOT, pager_binary)) + def build_linux_container(config, projpaths, container): linux_builder = LinuxBuilder(projpaths, container) linux_builder.build_linux(config) @@ -33,9 +39,12 @@ def build_linux_container(config, projpaths, container): # Calculate and store size of pager pager_binary = \ - "cont" + str(container.id) + "/linux/linux-2.6.33/linux.elf" + join(BUILDDIR, "cont" + str(container.id) + + "/linux/linux-2.6.33/linux.elf") config.containers[container.id].pager_size = \ - conv_hex(elf_binary_size(join(BUILDDIR, pager_binary))) + conv_hex(elf_binary_size(pager_binary)) + + fill_pager_section_markers(config.containers[container.id], pager_binary) linux_container_packer = \ LinuxContainerPacker(container, linux_builder, \ @@ -70,9 +79,13 @@ def build_posix_container(config, projpaths, container): os.path.walk(builddir, glob_by_walk, ['*.elf', images]) # Calculate and store size of pager - pager_binary = "cont" + str(container.id) + "/posix/mm0/mm0.elf" + pager_binary = join(BUILDDIR, + "cont" + str(container.id) + "/posix/mm0/mm0.elf") config.containers[container.id].pager_size = \ - conv_hex(elf_binary_size(join(BUILDDIR, pager_binary))) + conv_hex(elf_binary_size(pager_binary)) + + print 'Find markers for ' + pager_binary + fill_pager_section_markers(config.containers[container.id], pager_binary) container_packer = DefaultContainerPacker(container, images) return container_packer.pack_container(config) @@ -89,9 +102,11 @@ def build_default_container(config, projpaths, container): os.path.walk(projdir, glob_by_walk, ['*.elf', images]) # Calculate and store size of pager - pager_binary = "conts/" + container.name + "/main.elf" + pager_binary = join(PROJROOT, "conts/" + container.name + "/main.elf") config.containers[container.id].pager_size = \ - conv_hex(elf_binary_size(join(PROJROOT, pager_binary))) + conv_hex(elf_binary_size(pager_binary)) + + fill_pager_section_markers(config.containers[container.id], pager_binary) container_packer = DefaultContainerPacker(container, images) return container_packer.pack_container(config) diff --git a/scripts/kernel/generate_kernel_cinfo.py b/scripts/kernel/generate_kernel_cinfo.py index 7bed6f7..e3d59b9 100755 --- a/scripts/kernel/generate_kernel_cinfo.py +++ b/scripts/kernel/generate_kernel_cinfo.py @@ -67,6 +67,10 @@ pager_start = \ \t\t\t.pager_lma = __pfn(CONFIG_CONT%(cn)d_PAGER_LOAD_ADDR), \t\t\t.pager_vma = __pfn(CONFIG_CONT%(cn)d_PAGER_VIRT_ADDR), \t\t\t.pager_size = __pfn(page_align_up(CONT%(cn)d_PAGER_MAPSIZE)), +\t\t\t.rw_sections_start = %(rw_sec_start)s, +\t\t\t.rw_sections_end = %(rw_sec_end)s, +\t\t\t.rx_sections_start = %(rx_sec_start)s, +\t\t\t.rx_sections_end = %(rx_sec_end)s, \t\t\t.ncaps = %(caps)d, \t\t\t.caps = { ''' @@ -160,7 +164,12 @@ def generate_kernel_cinfo(config, cinfo_path): # Currently only these are considered as capabilities total_caps = c.virt_regions + c.phys_regions + len(c.caps) fbody += cinfo_start % (c.id, c.name) - fbody += pager_start % { 'cn' : c.id, 'caps' : total_caps} + fbody += pager_start % { 'cn' : c.id, 'caps' : total_caps, + 'rw_sec_start' : hex(c.pager_rw_section_start), + 'rw_sec_end' : hex(c.pager_rw_section_end), + 'rx_sec_start' : hex(c.pager_rx_section_start), + 'rx_sec_end' : hex(c.pager_rx_section_end), + } cap_index = 0 for mem_index in range(c.virt_regions): fbody += cap_virtmem % { 'capidx' : cap_index, 'cn' : c.id, 'vn' : mem_index } diff --git a/scripts/qemu/qemu_cmdline.py b/scripts/qemu/qemu_cmdline.py index e3f48e9..6232e29 100644 --- a/scripts/qemu/qemu_cmdline.py +++ b/scripts/qemu/qemu_cmdline.py @@ -23,12 +23,13 @@ from config.configuration import * map_list = (['EB', 'ARM1136', 'realview-eb', 'arm1136'], ['EB', 'ARM11MPCORE', 'realview-eb-mpcore', 'arm11mpcore'], ['EB', 'CORTEXA8', 'realview-eb', 'cortex-a8'], + ['EB', 'CORTEXA9', 'realview-pbx-a9', 'cortex-a9'], ['PB926', 'ARM926', 'versatilepb', 'arm926'], ['BEAGLE', 'CORTEXA8', 'beagle', 'cortex-a8'], ['PBA9', 'CORTEXA9', 'realview-pbx-a9', 'cortex-a9'], ['PBA8', 'CORTEXA8', 'realview-pb-a8', 'cortex-a8']) -data = \ +data_up = \ ''' cd build qemu-system-arm -s -S -kernel final.elf -nographic -M %s -cpu %s & @@ -36,6 +37,14 @@ arm-none-insight ; pkill qemu-system-arm cd .. ''' +data_smp = \ +''' +cd build +qemu-system-arm -s -S -kernel final.elf -smp %d -nographic -M %s -cpu %s & +arm-none-insight ; pkill qemu-system-arm +cd .. +''' + def build_qemu_cmdline_script(): build_tools_folder = 'tools' qemu_cmd_file = join(build_tools_folder, 'run-qemu-insight') @@ -44,10 +53,14 @@ def build_qemu_cmdline_script(): config = configuration_retrieve() cpu = config.cpu.upper() platform = config.platform.upper() + smp = config.smp + ncpu = config.ncpu # Find appropriate flags - for platform_type, cpu_type, mflag, cpuflag in map_list: + for platform_type, cpu_type, m_flag, cpu_flag in map_list: if platform_type == platform and cpu_type == cpu: + mflag = m_flag + cpuflag = cpu_flag break if not mflag or not cpuflag: @@ -57,9 +70,16 @@ def build_qemu_cmdline_script(): if os.path.exists(build_tools_folder) is False: os.system("mkdir " + build_tools_folder) + # Special case for EB+A9(non-smp) + if platform == 'EB' and cpu == 'CORTEXA9' and smp == False: + mflag = 'realview-eb' + # Write run-qemu-insight file with open(qemu_cmd_file, 'w+') as f: - f.write(data % (mflag, cpuflag)) + if smp == False: + f.write(data_up % (mflag, cpuflag)) + else: + f.write(data_smp % (ncpu, mflag, cpuflag)) os.system("chmod +x " + qemu_cmd_file) diff --git a/src/api/irq.c b/src/api/irq.c index 1811e3a..05205aa 100644 --- a/src/api/irq.c +++ b/src/api/irq.c @@ -91,6 +91,9 @@ int irq_control_register(struct ktcb *task, int slot, l4id_t irqnum) if ((err = irq_register(current, slot, irqnum)) < 0) return err; + /* Make thread a real-time task */ + current->flags |= TASK_REALTIME; + return 0; } @@ -111,16 +114,6 @@ int irq_wait(l4id_t irq_index) if ((ret = tcb_check_and_lazy_map_utcb(current, 1)) < 0) return ret; - /* - * In case user has asked for unmasking the irq only after - * user hanlder is done, unmask the irq - * - * FIXME: This is not the correct place for this call, - * fix this. - */ - if (desc->user_ack) - irq_enable(irq_index); - /* Wait until the irq changes slot value */ WAIT_EVENT(&desc->wqh_irq, utcb->notify[desc->task_notify_slot] != 0, diff --git a/src/api/mutex.c b/src/api/mutex.c index 427a09f..357a851 100644 --- a/src/api/mutex.c +++ b/src/api/mutex.c @@ -102,16 +102,56 @@ void mutex_control_delete(struct mutex_queue *mq) } /* - * A contended thread is expected to show up with the - * contended mutex address here. + * Here's how this whole mutex implementation works: * - * (1) The mutex is converted into its physical form and - * searched for in the existing mutex list. If it does not - * appear there, it gets added. - * (2) The thread is put to sleep in the mutex wait queue - * until a wake up event occurs. If there is already an asleep - * lock holder (i.e. unlocker) that is woken up and we return. + * A thread who locked a user mutex learns how many + * contentions were on it as it unlocks it. It is obliged to + * go to the kernel to wake that many threads up. + * + * Each contender sleeps in the kernel, but the time + * of arrival in the kernel by both the unlocker or + * contenders is asynchronous. + * + * Mutex queue scenarios at any one time: + * + * 1) There may be multiple contenders waiting for + * an earlier lock holder: + * + * Lock holders waitqueue: Empty + * Contenders waitqueue: C - C - C - C + * Contenders to wake up: 0 + * + * The lock holder would wake up that many contenders that it counted + * earlier in userspace as it released the lock. + * + * 2) There may be one lock holder waiting for contenders to arrive: + * + * Lock holders waitqueue: LH + * Contenders waitqueue: Empty + * Contenders to wake up: 5 + * + * As each contender comes in, the contenders value is reduced, and + * when it becomes zero, the lock holder is woken up and mutex + * deleted. + * + * 3) Occasionally multiple lock holders who just released the lock + * make it to the kernel before any contenders: + * + * Contenders: Empty + * Lock holders: LH + * Contenders to wake up: 5 + * + * -> New Lock holder arrives. + * + * As soon as the above occurs, the new LH wakes up the waiting one, + * increments the contenders by its own contender count and starts + * waiting. The scenario transitions to Scenario (2) in this case. + * + * The asynchronous nature of contender and lock holder arrivals make + * for many possibilities, but what matters is the same number of + * wake ups must occur as the number of contended waits. */ + int mutex_control_lock(struct mutex_queue_head *mqhead, unsigned long mutex_address) { @@ -128,24 +168,27 @@ int mutex_control_lock(struct mutex_queue_head *mqhead, } /* Add the queue to mutex queue list */ mutex_control_add(mqhead, mutex_queue); - } else { - /* See if there is a lock holder */ - if (mutex_queue->wqh_holders.sleepers) { - /* - * If yes, wake it up async and we can *hope* - * to acquire the lock before the lock holder - */ + + } else if (mutex_queue->wqh_holders.sleepers) { + /* + * There's a lock holder, so we can consume from + * number of contenders since we are one of them. + */ + mutex_queue->contenders--; + + /* No contenders left as far as current holder is concerned */ + if (mutex_queue->contenders == 0) { + /* Wake up current holder */ wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC); - /* Since noone is left, delete the mutex queue */ + /* There must not be any contenders, delete the mutex */ mutex_control_remove(mqhead, mutex_queue); mutex_control_delete(mutex_queue); - - /* Release lock and return */ - mutex_queue_head_unlock(mqhead); - - return 0; } + + /* Release lock and return */ + mutex_queue_head_unlock(mqhead); + return 0; } /* Prepare to wait on the contenders queue */ @@ -160,22 +203,8 @@ int mutex_control_lock(struct mutex_queue_head *mqhead, return wait_on_prepared_wait(); } -/* - * A thread that has detected a contention on a mutex that - * it had locked but has just released is expected to show up with - * that mutex here. - * - * (1) The mutex is converted into its physical form and - * searched for in the existing mutex list. If not found, - * a new one is created and the thread sleeps there as a lock - * holder. - * (2) All the threads waiting on this mutex are woken up. This may - * cause a thundering herd, but user threads cannot be trusted - * to acquire the mutex, waking up all of them increases the - * chances that some thread may acquire it. - */ int mutex_control_unlock(struct mutex_queue_head *mqhead, - unsigned long mutex_address) + unsigned long mutex_address, int contenders) { struct mutex_queue *mutex_queue; @@ -190,6 +219,9 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead, return -ENOMEM; } + /* Set new or increment the contenders value */ + mutex_queue->contenders = contenders; + /* Add the queue to mutex queue list */ mutex_control_add(mqhead, mutex_queue); @@ -206,51 +238,67 @@ int mutex_control_unlock(struct mutex_queue_head *mqhead, return wait_on_prepared_wait(); } + /* Set new or increment the contenders value */ + mutex_queue->contenders += contenders; + + /* Wake up holders if any, and take wake up responsibility */ + if (mutex_queue->wqh_holders.sleepers) + wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC); + /* - * Note, the mutex in userspace was left free before the - * syscall was entered. - * - * It is possible that a thread has acquired it, another - * contended on it and the holder made it to the kernel - * quicker than us. We detect this situation here. + * Now wake up as many contenders as possible, otherwise + * go to sleep on holders queue */ - if (mutex_queue->wqh_holders.sleepers) { - /* - * Let the first holder do all the waking up - */ - mutex_queue_head_unlock(mqhead); - return 0; + while (mutex_queue->contenders && + mutex_queue->wqh_contenders.sleepers) { + /* Reduce total contenders to be woken up */ + mutex_queue->contenders--; + + /* Wake up a contender who made it to kernel */ + wake_up(&mutex_queue->wqh_contenders, WAKEUP_ASYNC); } /* - * Found it, if it exists, there are contenders, - * now wake all of them up in FIFO order. - * FIXME: Make sure this is FIFO order. It doesn't seem so. + * Are we done with all? Leave. + * + * Not enough contenders? Go to sleep and wait for a new + * contender rendezvous. */ - wake_up_all(&mutex_queue->wqh_contenders, WAKEUP_ASYNC); + if (mutex_queue->contenders == 0) { + /* Delete only if no more contenders */ + if (mutex_queue->wqh_contenders.sleepers == 0) { + /* Since noone is left, delete the mutex queue */ + mutex_control_remove(mqhead, mutex_queue); + mutex_control_delete(mutex_queue); + } - /* Since noone is left, delete the mutex queue */ - mutex_control_remove(mqhead, mutex_queue); - mutex_control_delete(mutex_queue); + /* Release lock and return */ + mutex_queue_head_unlock(mqhead); + } else { + /* Prepare to wait on the lock holders queue */ + CREATE_WAITQUEUE_ON_STACK(wq, current); + + /* Prepare to wait */ + wait_on_prepare(&mutex_queue->wqh_holders, &wq); + + /* Release lock first */ + mutex_queue_head_unlock(mqhead); + + /* Initiate prepared wait */ + return wait_on_prepared_wait(); + } - /* Release lock and return */ - mutex_queue_head_unlock(mqhead); return 0; } -int sys_mutex_control(unsigned long mutex_address, int mutex_op) +int sys_mutex_control(unsigned long mutex_address, int mutex_flags) { unsigned long mutex_physical; - int ret = 0; + int mutex_op = mutex_operation(mutex_flags); + int contenders = mutex_contenders(mutex_flags); + int ret; - // printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid); - - /* Check valid operation */ - if (mutex_op != MUTEX_CONTROL_LOCK && - mutex_op != MUTEX_CONTROL_UNLOCK) { - printk("Invalid args to %s.\n", __FUNCTION__); - return -EINVAL; - } + //printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid); /* Check valid user virtual address */ if (KERN_ADDR(mutex_address)) { @@ -258,6 +306,10 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op) return -EINVAL; } + if (mutex_op != MUTEX_CONTROL_LOCK && + mutex_op != MUTEX_CONTROL_UNLOCK) + return -EPERM; + if ((ret = cap_mutex_check(mutex_address, mutex_op)) < 0) return ret; @@ -278,11 +330,8 @@ int sys_mutex_control(unsigned long mutex_address, int mutex_op) break; case MUTEX_CONTROL_UNLOCK: ret = mutex_control_unlock(&curcont->mutex_queue_head, - mutex_physical); + mutex_physical, contenders); break; - default: - printk("%s: Invalid operands\n", __FUNCTION__); - ret = -EINVAL; } return ret; diff --git a/src/api/thread.c b/src/api/thread.c index c6b1432..4dbcd80 100644 --- a/src/api/thread.c +++ b/src/api/thread.c @@ -494,10 +494,22 @@ int sys_thread_control(unsigned int flags, struct task_ids *ids) MAP_USR_RW, 1)) < 0) return err; - if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) + if ((flags & THREAD_ACTION_MASK) != THREAD_CREATE) { if (!(task = tcb_find(ids->tid))) return -ESRCH; + /* + * Tasks may only operate on their children. They may + * also destroy themselves or any children. + */ + if ((flags & THREAD_ACTION_MASK) == THREAD_DESTROY && + !task_is_child(task) && task != current) + return -EPERM; + if ((flags & THREAD_ACTION_MASK) != THREAD_DESTROY + && !task_is_child(task)) + return -EPERM; + } + if ((err = cap_thread_check(task, flags, ids)) < 0) return err; diff --git a/src/arch/arm/exception-common.c b/src/arch/arm/exception-common.c index ae910cf..7a934b7 100644 --- a/src/arch/arm/exception-common.c +++ b/src/arch/arm/exception-common.c @@ -325,7 +325,7 @@ extern int current_irq_nest_count; */ void irq_overnest_error(void) { - dprintk("Irqs nested beyond limit. Current count: ", + printk("Irqs nested beyond limit. Current count: %d", current_irq_nest_count); print_early("System halted...\n"); while(1) diff --git a/src/arch/arm/v5/SConscript b/src/arch/arm/v5/SConscript index 71ff4c4..e35d6c1 100644 --- a/src/arch/arm/v5/SConscript +++ b/src/arch/arm/v5/SConscript @@ -4,7 +4,7 @@ Import('env') # The set of source files associated with this SConscript file. -src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c'] +src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c', 'atomic.S'] obj = env.Object(src_local) Return('obj') diff --git a/src/arch/arm/v5/exception.c b/src/arch/arm/v5/exception.c index 6fbf5d5..058770e 100644 --- a/src/arch/arm/v5/exception.c +++ b/src/arch/arm/v5/exception.c @@ -58,59 +58,59 @@ int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr) /* Aborts that can't be handled by a pager yet: */ case DABT_TERMINAL: - dprintk("Terminal fault dabt %x", far); + dprintk("Terminal fault dabt ", far); ret = -EABORT; break; case DABT_VECTOR: - dprintk("Vector abort (obsolete!) %x", far); + dprintk("Vector abort (obsolete!) ", far); ret = -EABORT; break; case DABT_ALIGN: - dprintk("Alignment fault dabt %x", far); + dprintk("Alignment fault dabt ", far); ret = -EABORT; break; case DABT_EXT_XLATE_LEVEL1: - dprintk("External LVL1 translation fault %x", far); + dprintk("External LVL1 translation fault ", far); ret = -EABORT; break; case DABT_EXT_XLATE_LEVEL2: - dprintk("External LVL2 translation fault %x", far); + dprintk("External LVL2 translation fault ", far); ret = -EABORT; break; case DABT_DOMAIN_SECT: - dprintk("Section domain fault dabt %x", far); + dprintk("Section domain fault dabt ", far); ret = -EABORT; break; case DABT_DOMAIN_PAGE: - dprintk("Page domain fault dabt %x", far); + dprintk("Page domain fault dabt ", far); ret = -EABORT; break; case DABT_PERM_SECT: - dprintk("Section permission fault dabt %x", far); + dprintk("Section permission fault dabt ", far); ret = -EABORT; break; case DABT_EXT_LFETCH_SECT: dprintk("External section linefetch " - "fault dabt %x", far); + "fault dabt ", far); ret = -EABORT; break; case DABT_EXT_LFETCH_PAGE: - dprintk("Page perm fault dabt %x", far); + dprintk("Page perm fault dabt ", far); ret = -EABORT; break; case DABT_EXT_NON_LFETCH_SECT: dprintk("External section non-linefetch " - "fault dabt %x ", far); + "fault dabt ", far); ret = -EABORT; break; case DABT_EXT_NON_LFETCH_PAGE: dprintk("External page non-linefetch " - "fault dabt %x ", far); + "fault dabt ", far); ret = -EABORT; break; default: dprintk("FATAL: Unrecognised/Unknown " - "data abort %x ", far); + "data abort ", far); dprintk("FATAL: FSR code: ", fsr); ret = -EABORT; } @@ -122,7 +122,7 @@ int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr) */ if (is_kernel_address(faulted_pc)) { dprintk("Unhandled kernel data " - "abort at address %x", + "abort at address ", faulted_pc); ret = -EABORT; } diff --git a/src/arch/arm/v5/irq.c b/src/arch/arm/v5/irq.c index 11a99ba..ca967e4 100644 --- a/src/arch/arm/v5/irq.c +++ b/src/arch/arm/v5/irq.c @@ -30,33 +30,6 @@ void irq_local_restore(unsigned long state) ); } -u8 l4_atomic_dest_readb(unsigned long *location) -{ -#if 0 - unsigned int tmp; - __asm__ __volatile__ ( - "swpb r0, r2, [r1] \n" - : "=r"(tmp) - : "r"(location), "r"(0) - : "memory" - ); - - return (u8)tmp; -#endif - - unsigned int tmp; - unsigned long state; - irq_local_disable_save(&state); - - tmp = *location; - *location = 0; - - irq_local_restore(state); - - return (u8)tmp; - -} - int irqs_enabled(void) { int tmp; diff --git a/src/arch/arm/v5/mapping.c b/src/arch/arm/v5/mapping.c index d3bfaa3..87f9224 100644 --- a/src/arch/arm/v5/mapping.c +++ b/src/arch/arm/v5/mapping.c @@ -372,9 +372,9 @@ void arch_space_switch(struct ktcb *to) void idle_task(void) { while(1) { + /* Do maintenance */ tcb_delete_zombies(); - // printk("Idle task.\n"); schedule(); } } diff --git a/src/arch/arm/vectors.S b/src/arch/arm/vectors.S index 7e3b7a4..a5ac265 100644 --- a/src/arch/arm/vectors.S +++ b/src/arch/arm/vectors.S @@ -7,7 +7,6 @@ #include INC_ARCH(asm.h) #include INC_ARCH(asm-macros.S) -.balign 4096 .section .data.vectors __vector_vaddr: @@ -896,5 +895,4 @@ __irq_stack: .space 128 __fiq_stack: .space 128 __und_stack: .space 128 -.balign 4096 diff --git a/src/drivers/irq/gic/gic.c b/src/drivers/irq/gic/gic.c index 93957c9..fb46152 100644 --- a/src/drivers/irq/gic/gic.c +++ b/src/drivers/irq/gic/gic.c @@ -1,25 +1,30 @@ /* - * PLXXX Generic Interrupt Controller support. + * Generic Interrupt Controller support. * - * This is more ARM Realview EB/PB * Copyright (C) 2009-2010 B Labs Ltd. - * Author: Prem Mallappa + * + * Authors: Prem Mallappa, Bahadir Balban */ - #include #include #include #include INC_PLAT(irq.h) -#include INC_SUBARCH(mmu_ops.h) /* for dmb/dsb() */ +#include INC_SUBARCH(mmu_ops.h) #include +#include + +#define GIC_ACK_IRQ_MASK 0x1FF +#define GIC_ACK_CPU_MASK 0xE00 +#define GIC_IRQ_SPURIOUS 0x3FF volatile struct gic_data gic_data[IRQ_CHIPS_MAX]; static inline struct gic_data *get_gic_data(l4id_t irq) { - struct irq_chip *chip = irq_desc_array[irq].chip; + volatile struct irq_chip *chip = irq_desc_array[irq].chip; + if (chip) - return (struct gic_data *)irq_desc_array[irq].chip->data; + return (struct gic_data *)chip->data; else return 0; } @@ -27,115 +32,129 @@ static inline struct gic_data *get_gic_data(l4id_t irq) /* Returns the irq number on this chip converting the irq bitvector */ l4id_t gic_read_irq(void *data) { - int irq; volatile struct gic_data *gic = (struct gic_data *)data; - irq = gic->cpu->ack & 0x1ff; + l4id_t irq = gic->cpu->ack; - if (irq == 1023) - return -1023; /* Spurious */ + /* This is an IPI - EOI it here, since it requires cpu field */ + if ((irq & GIC_ACK_IRQ_MASK) < 16) { + gic_eoi_irq(irq); + /* Get the actual irq number */ + irq &= GIC_ACK_IRQ_MASK; + } + /* Detect GIC spurious magic value and return generic one */ + if (irq == GIC_IRQ_SPURIOUS) + return IRQ_SPURIOUS; return irq; } void gic_mask_irq(l4id_t irq) { - u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */ volatile struct gic_data *gic = get_gic_data(irq); + u32 offset = irq >> 5; /* irq / 32 */ + gic->dist->clr_en[offset] = 1 << (irq % 32); } void gic_unmask_irq(l4id_t irq) { volatile struct gic_data *gic = get_gic_data(irq); + u32 offset = irq >> 5 ; /* irq / 32 */ - u32 offset = irq >> 5 ; /* offset = irq / 32 */ gic->dist->set_en[offset] = 1 << (irq % 32); } -void gic_ack_irq(l4id_t irq) +void gic_eoi_irq(l4id_t irq) { - u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */ - volatile struct gic_data *gic = get_gic_data(irq); - gic->dist->clr_en[offset] = 1 << (irq % 32); + /* Careful, irq may have cpu field encoded */ + volatile struct gic_data *gic = + get_gic_data(irq & GIC_ACK_IRQ_MASK); + gic->cpu->eoi = irq; } void gic_ack_and_mask(l4id_t irq) { - gic_ack_irq(irq); + //printk("disable/eoi irq %d\n", irq); gic_mask_irq(irq); + gic_eoi_irq(irq); } void gic_set_pending(l4id_t irq) { - u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */ volatile struct gic_data *gic = get_gic_data(irq); + u32 offset = irq >> 5; /* irq / 32 */ gic->dist->set_pending[offset] = 1 << (irq % 32); } void gic_clear_pending(l4id_t irq) { - u32 offset = irq >> 5; /* offset = irq / 32, avoiding division */ volatile struct gic_data *gic = get_gic_data(irq); + u32 offset = irq >> 5; /* irq / 32 */ + gic->dist->clr_pending[offset] = 1 << (irq % 32); } void gic_cpu_init(int idx, unsigned long base) { - struct gic_cpu *cpu; - cpu = gic_data[idx].cpu = (struct gic_cpu *)base; + volatile struct gic_cpu *cpu; + + gic_data[idx].cpu = (struct gic_cpu *)base; + + cpu = gic_data[idx].cpu; /* Disable */ cpu->control = 0; + /* Set */ cpu->prio_mask = 0xf0; cpu->bin_point = 3; + /* Enable */ cpu->control = 1; } void gic_dist_init(int idx, unsigned long base) { - int i, irqs_per_word; /* Interrupts per word */ - struct gic_dist *dist; - dist = gic_data[idx].dist = (struct gic_dist *)(base); + volatile struct gic_dist *dist; + int irqs_per_word; + int nirqs; - /* Surely disable GIC */ + gic_data[idx].dist = (struct gic_dist *)(base); + + dist = gic_data[idx].dist; + + /* Disable gic */ dist->control = 0; /* 32*(N+1) interrupts supported */ - int nirqs = 32 * ((dist->type & 0x1f) + 1); + nirqs = 32 * ((dist->type & 0x1f) + 1); if (nirqs > IRQS_MAX) nirqs = IRQS_MAX; - /* Clear all interrupts */ + /* Disable all interrupts */ irqs_per_word = 32; - for(i = 0; i < nirqs ; i+=irqs_per_word) { + for (int i = 0; i < nirqs; i += irqs_per_word) dist->clr_en[i/irqs_per_word] = 0xffffffff; - } /* Clear all pending interrupts */ - for(i = 0; i < nirqs ; i+=irqs_per_word) { + for (int i = 0; i < nirqs; i += irqs_per_word) dist->clr_pending[i/irqs_per_word] = 0xffffffff; - } /* Set all irqs as normal priority, 8 bits per interrupt */ irqs_per_word = 4; - for(i = 32; i < nirqs ; i+=irqs_per_word) { + for (int i = 32; i < nirqs; i += irqs_per_word) dist->priority[i/irqs_per_word] = 0xa0a0a0a0; - } /* Set all target to cpu0, 8 bits per interrupt */ - for(i = 32; i < nirqs ; i+=irqs_per_word) { + for (int i = 32; i < nirqs; i += irqs_per_word) dist->target[i/irqs_per_word] = 0x01010101; - } /* Configure all to be level-sensitive, 2 bits per interrupt */ irqs_per_word = 16; - for(i = 32; i < nirqs ; i+=irqs_per_word) { + for (int i = 32; i < nirqs; i += irqs_per_word) dist->config[i/irqs_per_word] = 0x00000000; - } /* Enable GIC Distributor */ dist->control = 1; @@ -143,24 +162,28 @@ void gic_dist_init(int idx, unsigned long base) /* Some functions, may be helpful */ -void gic_set_target(u32 irq, u32 cpu) +void gic_set_target(l4id_t irq, u32 cpu) { - /* cpu is a mask, not cpu number */ - cpu &= 0xF; - irq &= 0xFF; volatile struct gic_data *gic = get_gic_data(irq); - u32 offset = irq >> 2; /* offset = irq / 4 */ + u32 offset = irq >> 2; /* irq / 4 */ + + if (cpu > 1) { + printk("Setting irqs to reach multiple cpu targets requires a" + "lock on the irq controller\n" + "GIC is a racy hardware in this respect\n"); + BUG(); + } + gic->dist->target[offset] |= (cpu << ((irq % 4) * 8)); } u32 gic_get_target(u32 irq) { - /* cpu is a mask, not cpu number */ - unsigned int target; - irq &= 0xFF; - u32 offset = irq >> 2; /* offset = irq / 4 */ volatile struct gic_data *gic = get_gic_data(irq); - target = gic->dist->target[offset]; + u32 offset = irq >> 2; /* irq / 4 */ + unsigned int target = gic->dist->target[offset]; + + BUG_ON(irq > 0xFF); target >>= ((irq % 4) * 8); return target & 0xFF; @@ -168,54 +191,44 @@ u32 gic_get_target(u32 irq) void gic_set_priority(u32 irq, u32 prio) { - /* cpu is a mask, not cpu number */ - prio &= 0xF; - irq &= 0xFF; - u32 offset = irq >> 3; /* offset = irq / 8 */ volatile struct gic_data *gic = get_gic_data(irq); + u32 offset = irq >> 3; /* irq / 8 */ + + BUG_ON(prio > 0xF); + BUG_ON(irq > 0xFF); + /* target = cpu << ((irq % 4) * 4) */ gic->dist->target[offset] |= (prio << (irq & 0x1C)); } u32 gic_get_priority(u32 irq) { - /* cpu is a mask, not cpu number */ - irq &= 0xFF; - u32 offset = irq >> 3; /* offset = irq / 8 */ volatile struct gic_data *gic = get_gic_data(irq); - return gic->dist->target[offset] & (irq & 0xFC); + u32 offset = irq >> 3; /* offset = irq / 8 */ + u32 prio = gic->dist->target[offset] & (irq & 0xFC); + + return prio; } -#define TO_MANY 0 /* to all specified in a CPU mask */ -#define TO_OTHERS 1 /* all but me */ -#define TO_SELF 2 /* just to the requesting CPU */ +#define IPI_CPU_SHIFT 16 -#define CPU_MASK_BIT 16 -#define TYPE_MASK_BIT 24 - -void gic_send_ipi(int cpu, int ipi_cmd) +void gic_send_ipi(int cpumask, int ipi_cmd) { - /* if cpu is 0, then ipi is sent to self - * if cpu has exactly 1 bit set, the ipi to just that core - * if cpu has a mask, sent to all but current - */ - struct gic_dist *dist = gic_data[0].dist; - - ipi_cmd &= 0xf; - cpu &= 0xff; - - dsb(); - - if (cpu == 0) /* Self */ - dist->soft_int = (TO_SELF << 24) | ipi_cmd; - else if ((cpu & (cpu-1)) == 0) /* Exactly to one CPU */ - dist->soft_int = (TO_MANY << 24) | (cpu << 16) | ipi_cmd; - else /* All but me */ - dist->soft_int = (TO_OTHERS << 24) | (cpu << 16) | ipi_cmd; + volatile struct gic_dist *dist = gic_data[0].dist; + unsigned int ipi_word = (cpumask << IPI_CPU_SHIFT) | ipi_cmd; + dist->soft_int = ipi_word; } -/* Make the generic code happy :) */ +void gic_print_cpu() +{ + volatile struct gic_cpu *cpu = gic_data[0].cpu; + + printk("GIC CPU%d highest pending: %d\n", smp_get_cpuid(), cpu->high_pending); + printk("GIC CPU%d running: %d\n", smp_get_cpuid(), cpu->running); +} + +/* Make the generic code happy */ void gic_dummy_init() { diff --git a/src/generic/container.c b/src/generic/container.c index a4d39da..3b14658 100644 --- a/src/generic/container.c +++ b/src/generic/container.c @@ -121,6 +121,7 @@ int init_pager(struct pager *pager, struct container *cont) /* Add the address space to container space list */ address_space_add(task->space); +#if 0 printk("%s: Mapping 0x%lx bytes (%lu pages) " "from 0x%lx to 0x%lx for %s\n", __KERNELNAME__, pager->memsize, @@ -131,6 +132,58 @@ int init_pager(struct pager *pager, struct container *cont) add_mapping_pgd(pager->start_lma, pager->start_vma, page_align_up(pager->memsize), MAP_USR_RWX, TASK_PGD(task)); +#else + /* + * Map pager with appropriate section flags + * We do page_align_down() to do a page alignment for + * various kinds of sections, this automatically + * takes care of the case where we have different kinds of + * data lying on same page, eg: RX, RO etc. + * Here one assumption made is, starting of first + * RW section will be already page aligned, if this is + * not true then we have to take special care of this. + */ + if(pager->rx_sections_end >= pager->rw_sections_start) { + pager->rx_sections_end = page_align(pager->rx_sections_end); + pager->rw_sections_start = page_align(pager->rw_sections_start); + } + + unsigned long size = 0; + if((size = page_align_up(pager->rx_sections_end) - + page_align_up(pager->rx_sections_start))) { + add_mapping_pgd(page_align_up(pager->rx_sections_start - + pager->start_vma + + pager->start_lma), + page_align_up(pager->rx_sections_start), + size, MAP_USR_RX, TASK_PGD(task)); + + printk("%s: Mapping 0x%lx bytes as RX " + "from 0x%lx to 0x%lx for %s\n", + __KERNELNAME__, size, + page_align_up(pager->rx_sections_start - + pager->start_vma + pager->start_lma), + page_align_up(pager->rx_sections_start), + cont->name); + } + + if((size = page_align_up(pager->rw_sections_end) - + page_align_up(pager->rw_sections_start))) { + add_mapping_pgd(page_align_up(pager->rw_sections_start - + pager->start_vma + + pager->start_lma), + page_align_up(pager->rw_sections_start), + size, MAP_USR_RW, TASK_PGD(task)); + + printk("%s: Mapping 0x%lx bytes as RW " + "from 0x%lx to 0x%lx for %s\n", + __KERNELNAME__, size, + page_align_up(pager->rw_sections_start - + pager->start_vma + pager->start_lma), + page_align_up(pager->rw_sections_start), + cont->name); + } + +#endif /* Move capability list from dummy to task's space cap list */ cap_list_move(&task->space->cap_list, ¤t->cap_list); diff --git a/src/generic/irq.c b/src/generic/irq.c index 75027fc..bb0bcc3 100644 --- a/src/generic/irq.c +++ b/src/generic/irq.c @@ -1,7 +1,7 @@ /* * Generic kernel irq handling. * - * Copyright (C) 2007 - 2009 Bahadir Balban + * Copyright (C) 2007 - 2010 Bahadir Balban */ #include #include @@ -127,10 +127,21 @@ l4id_t global_irq_index(void) return IRQ_NIL; } +#include + void do_irq(void) { l4id_t irq_index = global_irq_index(); - struct irq_desc *this_irq = irq_desc_array + irq_index; + struct irq_desc *this_irq; + + if (irq_index == IRQ_SPURIOUS) { + printk("CPU%d: FATAL: Spurious irq\n", smp_get_cpuid()); + BUG(); + } + + // printk("CPU%d: Received irq %d\n", smp_get_cpuid(), irq_index); + + this_irq = irq_desc_array + irq_index; system_account_irq(); @@ -148,16 +159,10 @@ void do_irq(void) /* Handle the irq */ BUG_ON(!this_irq->handler); if (this_irq->handler(this_irq) != IRQ_HANDLED) { - printk("Spurious or broken irq\n"); + printk("CPU%d: FATAL: Spurious or broken irq\n", + smp_get_cpuid()); BUG(); } - /* - * Do not enable irq if user wants to do it explicitely - */ - if (!this_irq->user_ack) - irq_enable(irq_index); + irq_enable(irq_index); } - - - diff --git a/src/generic/resource.c b/src/generic/resource.c index 2de8dba..b3cec20 100644 --- a/src/generic/resource.c +++ b/src/generic/resource.c @@ -503,6 +503,10 @@ int copy_pager_info(struct pager *pager, struct pager_info *pinfo) pager->start_lma = __pfn_to_addr(pinfo->pager_lma); pager->start_vma = __pfn_to_addr(pinfo->pager_vma); pager->memsize = __pfn_to_addr(pinfo->pager_size); + pager->rw_sections_start = pinfo->rw_sections_start; + pager->rw_sections_end = pinfo->rw_sections_end; + pager->rx_sections_start = pinfo->rx_sections_start; + pager->rx_sections_end = pinfo->rx_sections_end; /* Copy all cinfo structures into real capabilities */ for (int i = 0; i < pinfo->ncaps; i++) { diff --git a/src/generic/scheduler.c b/src/generic/scheduler.c index 8a50e8b..68ffe46 100644 --- a/src/generic/scheduler.c +++ b/src/generic/scheduler.c @@ -119,12 +119,14 @@ void sched_init() sched->rq_runnable = &sched->sched_rq[0]; sched->rq_expired = &sched->sched_rq[1]; + sched->rq_rt_runnable = &sched->sched_rq[2]; + sched->rq_rt_expired = &sched->sched_rq[3]; sched->prio_total = TASK_PRIO_TOTAL; sched->idle_task = current; } /* Swap runnable and expired runqueues. */ -static void sched_rq_swap_runqueues(void) +static void sched_rq_swap_queues(void) { struct runqueue *temp; @@ -136,6 +138,18 @@ static void sched_rq_swap_runqueues(void) per_cpu(scheduler).rq_expired = temp; } +static void sched_rq_swap_rtqueues(void) +{ + struct runqueue *temp; + + BUG_ON(list_empty(&per_cpu(scheduler).rq_rt_expired->task_list)); + + /* Queues are swapped and expired list becomes runnable */ + temp = per_cpu(scheduler).rq_rt_runnable; + per_cpu(scheduler).rq_rt_runnable = per_cpu(scheduler).rq_rt_expired; + per_cpu(scheduler).rq_rt_expired = temp; +} + /* Set policy on where to add tasks in the runqueue */ #define RQ_ADD_BEHIND 0 #define RQ_ADD_FRONT 1 @@ -185,6 +199,28 @@ static inline void sched_rq_remove_task(struct ktcb *task) sched_unlock_runqueues(sched, irqflags); } +static inline void +sched_run_task(struct ktcb *task, struct scheduler *sched) +{ + if (task->flags & TASK_REALTIME) + sched_rq_add_task(task, sched->rq_rt_runnable, + RQ_ADD_BEHIND); + else + sched_rq_add_task(task, sched->rq_runnable, + RQ_ADD_BEHIND); +} + +static inline void +sched_expire_task(struct ktcb *task, struct scheduler *sched) +{ + + if (task->flags & TASK_REALTIME) + sched_rq_add_task(current, sched->rq_rt_expired, + RQ_ADD_BEHIND); + else + sched_rq_add_task(current, sched->rq_expired, + RQ_ADD_BEHIND); +} void sched_init_task(struct ktcb *task, int prio) { @@ -196,6 +232,27 @@ void sched_init_task(struct ktcb *task, int prio) task->flags |= TASK_RESUMING; } +/* Synchronously resumes a task */ +void sched_resume_sync(struct ktcb *task) +{ + BUG_ON(task == current); + task->state = TASK_RUNNABLE; + sched_run_task(task, &per_cpu_byid(scheduler, task->affinity)); + schedule(); +} + +/* + * Asynchronously resumes a task. + * The task will run in the future, but at + * the scheduler's discretion. It is possible that current + * task wakes itself up via this function in the scheduler(). + */ +void sched_resume_async(struct ktcb *task) +{ + task->state = TASK_RUNNABLE; + sched_run_task(task, &per_cpu_byid(scheduler, task->affinity)); +} + /* * Takes all the action that will make a task sleep * in the scheduler. If the task is woken up before @@ -210,37 +267,10 @@ void sched_prepare_sleep() preempt_enable(); } -/* Synchronously resumes a task */ -void sched_resume_sync(struct ktcb *task) -{ - BUG_ON(task == current); - task->state = TASK_RUNNABLE; - sched_rq_add_task(task, - per_cpu_byid(scheduler, - task->affinity).rq_runnable, - RQ_ADD_FRONT); - schedule(); -} - /* - * Asynchronously resumes a task. - * The task will run in the future, but at - * the scheduler's discretion. It is possible that current - * task wakes itself up via this function in the scheduler(). - */ -void sched_resume_async(struct ktcb *task) -{ - task->state = TASK_RUNNABLE; - sched_rq_add_task(task, - per_cpu_byid(scheduler, - task->affinity).rq_runnable, - RQ_ADD_FRONT); -// printk("CPU%d: Resuming task %d with affinity %d\n", smp_get_cpuid(), task->tid, task->affinity); -} - -/* - * NOTE: Could do these as sched_prepare_suspend() - * + schedule() or need_resched = 1 + * preempt_enable/disable()'s are for avoiding the + * entry to scheduler during this period - but this + * is only true for current cpu. */ void sched_suspend_sync(void) { @@ -282,6 +312,11 @@ static inline void context_switch(struct ktcb *next) system_account_context_switch(); /* Flush caches and everything */ + BUG_ON(!current); + BUG_ON(!current->space); + BUG_ON(!next); + BUG_ON(!next->space); + BUG_ON(!next->space); if (current->space->spid != next->space->spid) arch_space_switch(next); @@ -306,6 +341,107 @@ static inline int sched_recalc_ticks(struct ktcb *task, int prio_total) CONFIG_SCHED_TICKS * task->priority / prio_total; } +/* + * Select a real-time task 1/8th of any one selection + */ +static inline int sched_select_rt(struct scheduler *sched) +{ + int ctr = sched->task_select_ctr++ & 0xF; + + if (ctr == 0 || ctr == 8 || ctr == 15) + return 0; + else + return 1; +} + +/* + * Selection happens as follows: + * + * A real-time task is chosen %87.5 of the time. This is evenly + * distributed to a given interval. + * + * Idle task is run once when it is explicitly suggested (e.g. + * for cleanup after a task exited) but only when no real-time + * tasks are in the queues. + * + * And idle task is otherwise run only when no other tasks are + * runnable. + */ +struct ktcb *sched_select_next(void) +{ + struct scheduler *sched = &per_cpu(scheduler); + int realtime = sched_select_rt(sched); + struct ktcb *next = 0; + + for (;;) { + + /* Decision to run an RT task? */ + if (realtime && sched->rq_rt_runnable->total > 0) { + /* Get a real-time task, if available */ + next = link_to_struct(sched->rq_rt_runnable->task_list.next, + struct ktcb, rq_list); + break; + } else if (realtime && sched->rq_rt_expired->total > 0) { + /* Swap real-time queues */ + sched_rq_swap_rtqueues(); + /* Get a real-time task */ + next = link_to_struct(sched->rq_rt_runnable->task_list.next, + struct ktcb, rq_list); + break; + /* Idle flagged for run? */ + } else if (sched->flags & SCHED_RUN_IDLE) { + /* Clear idle flag */ + sched->flags &= ~SCHED_RUN_IDLE; + next = sched->idle_task; + break; + } else if (sched->rq_runnable->total > 0) { + /* Get a regular runnable task, if available */ + next = link_to_struct(sched->rq_runnable->task_list.next, + struct ktcb, rq_list); + break; + } else if (sched->rq_expired->total > 0) { + /* Swap queues and retry if not */ + sched_rq_swap_queues(); + next = link_to_struct(sched->rq_runnable->task_list.next, + struct ktcb, rq_list); + break; + } else if (in_process_context()) { + /* No runnable task. Do idle if in process context */ + next = sched->idle_task; + break; + } else { + /* + * Nobody is runnable. Irq calls must return + * to interrupted current process to run idle task + */ + next = current; + break; + } + } + return next; +} + +/* Prepare next runnable task right before switching to it */ +void sched_prepare_next(struct ktcb *next) +{ + /* New tasks affect runqueue total priority. */ + if (next->flags & TASK_RESUMING) + next->flags &= ~TASK_RESUMING; + + /* Zero ticks indicates task hasn't ran since last rq swap */ + if (next->ticks_left == 0) { + /* + * Redistribute timeslice. We do this as each task + * becomes runnable rather than all at once. It is done + * every runqueue swap + */ + sched_recalc_ticks(next, per_cpu(scheduler).prio_total); + next->ticks_left = next->ticks_assigned; + } + + /* Reinitialise task's schedule granularity boundary */ + next->sched_granule = SCHED_GRANULARITY; +} /* * Tasks come here, either by setting need_resched (via next irq), @@ -359,13 +495,9 @@ void schedule() if (current->state == TASK_RUNNABLE) { sched_rq_remove_task(current); if (current->ticks_left) - sched_rq_add_task(current, - per_cpu(scheduler).rq_runnable, - RQ_ADD_BEHIND); + sched_run_task(current, &per_cpu(scheduler)); else - sched_rq_add_task(current, - per_cpu(scheduler).rq_expired, - RQ_ADD_BEHIND); + sched_expire_task(current, &per_cpu(scheduler)); } /* @@ -391,52 +523,17 @@ void schedule() sched_suspend_async(); } - /* Simpler task pick up loop. May put in sched_pick_next() */ - for (;;) { - struct scheduler *sched = &per_cpu(scheduler); - - /* If we or a child has just exited, run idle task once for clean up */ - if (current->flags & TASK_EXITED) { - current->flags &= ~TASK_EXITED; - next = sched->idle_task; - break; - } else if (sched->rq_runnable->total > 0) { - /* Get a runnable task, if available */ - next = link_to_struct(sched->rq_runnable->task_list.next, - struct ktcb, rq_list); - break; - } else if (sched->rq_expired->total > 0) { - /* Swap queues and retry if not */ - sched_rq_swap_runqueues(); - continue; - } else if (in_process_context()) { - /* Do idle task if no runnable tasks and in process */ - next = sched->idle_task; - break; - } else { - /* Irq calls must return to interrupted current process */ - next = current; - break; - } + /* Hint scheduler to run idle asap to free task */ + if (current->flags & TASK_EXITED) { + current->flags &= ~TASK_EXITED; + per_cpu(scheduler).flags |= SCHED_RUN_IDLE; } - /* New tasks affect runqueue total priority. */ - if (next->flags & TASK_RESUMING) - next->flags &= ~TASK_RESUMING; + /* Decide on next runnable task */ + next = sched_select_next(); - /* Zero ticks indicates task hasn't ran since last rq swap */ - if (next->ticks_left == 0) { - /* - * Redistribute timeslice. We do this as each task - * becomes runnable rather than all at once. It is done - * every runqueue swap - */ - sched_recalc_ticks(next, per_cpu(scheduler).prio_total); - next->ticks_left = next->ticks_assigned; - } - - /* Reinitialise task's schedule granularity boundary */ - next->sched_granule = SCHED_GRANULARITY; + /* Prepare next task for running */ + sched_prepare_next(next); /* Finish */ disable_irqs(); diff --git a/src/generic/tcb.c b/src/generic/tcb.c index 932942a..9d2b755 100644 --- a/src/generic/tcb.c +++ b/src/generic/tcb.c @@ -34,7 +34,6 @@ void tcb_init(struct ktcb *new) spin_lock_init(&new->thread_lock); - init_ktcb_list(&new->child_exit_list); cap_list_init(&new->cap_list); /* Initialise task's scheduling state and parameters. */ diff --git a/src/generic/time.c b/src/generic/time.c index b192ff5..466c7b0 100644 --- a/src/generic/time.c +++ b/src/generic/time.c @@ -15,6 +15,7 @@ #include INC_ARCH(exception.h) #include #include +#include INC_GLUE(ipi.h) /*FIXME: Remove this */ /* TODO: * 1) Add RTC support. @@ -141,13 +142,23 @@ void update_process_times(void) need_resched = 1; } - int do_timer_irq(void) { increase_jiffies(); update_process_times(); update_system_time(); +#if defined (CONFIG_SMP) + smp_send_ipi(cpu_mask_others(), IPI_TIMER_EVENT); +#endif + + return IRQ_HANDLED; +} + +/* Secondary cpus call this */ +int secondary_timer_irq(void) +{ + update_process_times(); return IRQ_HANDLED; } diff --git a/src/glue/arm/SConscript b/src/glue/arm/SConscript index fb4231b..da8cc4a 100644 --- a/src/glue/arm/SConscript +++ b/src/glue/arm/SConscript @@ -16,7 +16,7 @@ src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c', 'cache.c', 'debug.c'] for name, val in symbols: if 'CONFIG_SMP' == name: - src_local += ['smp.c', 'ipi.c', 'smp_test.c'] + src_local += ['smp.c', 'ipi.c'] obj = env.Object(src_local) Return('obj') diff --git a/src/glue/arm/init.c b/src/glue/arm/init.c index db48ff4..fcaba90 100644 --- a/src/glue/arm/init.c +++ b/src/glue/arm/init.c @@ -143,23 +143,13 @@ void setup_idle_task() /* Initialize space caps list */ cap_list_init(¤t->space->cap_list); -#if 0 - /* - * Unneeded stuff - */ - /* - * Set up idle context. - */ - current->context.spsr = ARM_MODE_SVC; - current->context.pc = (u32)idle_task; - current->context.sp = (u32)align((unsigned long)current + PAGE_SIZE, - STACK_ALIGNMENT); -#endif - /* * FIXME: This must go to kernel resources init. */ + /* Init scheduler structs */ + sched_init_task(current, TASK_PRIO_NORMAL); + /* * If using split page tables, kernel * resources must point at the global pgd @@ -236,18 +226,18 @@ void start_kernel(void) sched_init(); - /* Try to initialize secondary cores if there are any */ - smp_start_cores(); - - /* Remove one-to-one kernel mapping */ - remove_initial_mapping(); - /* * Map and enable high vector page. * Faults can be handled after here. */ vectors_init(); + /* Try to initialize secondary cores if there are any */ + smp_start_cores(); + + /* Remove one-to-one kernel mapping */ + remove_initial_mapping(); + /* Remap 1MB kernel sections as 4Kb pages. */ remap_as_pages((void *)page_align(_start_kernel), (void *)page_align_up(_end_kernel)); diff --git a/src/glue/arm/ipi.c b/src/glue/arm/ipi.c index 2e5eff5..fd89d06 100644 --- a/src/glue/arm/ipi.c +++ b/src/glue/arm/ipi.c @@ -10,9 +10,32 @@ #include INC_GLUE(smp.h) #include INC_SUBARCH(cpu.h) #include +#include +#include /* This should be in a file something like exception.S */ int ipi_handler(struct irq_desc *desc) { + int ipi_event = (desc - irq_desc_array) / sizeof(struct irq_desc); + +// printk("CPU%d: entered IPI%d\n", smp_get_cpuid(), +// (desc - irq_desc_array) / sizeof(struct irq_desc)); + + switch (ipi_event) { + case IPI_TIMER_EVENT: + // printk("CPU%d: Handling timer ipi\n", smp_get_cpuid()); + secondary_timer_irq(); + break; + default: + printk("CPU%d: IPI with no meaning: %d\n", + smp_get_cpuid(), ipi_event); + break; + } return 0; } + +void smp_send_ipi(unsigned int cpumask, int ipi_num) +{ + gic_send_ipi(cpumask, ipi_num); +} + diff --git a/src/glue/arm/smp.c b/src/glue/arm/smp.c index 7c8d2af..3b51d57 100644 --- a/src/glue/arm/smp.c +++ b/src/glue/arm/smp.c @@ -18,7 +18,7 @@ #include unsigned long secondary_run_signal; - +unsigned long secondary_ready_signal; void __smp_start(void); @@ -35,14 +35,17 @@ void smp_start_cores(void) arm_smp_inval_icache_entirely(); /* Start other cpus */ - for (int i = 1; i < CONFIG_NCPU; i++) { - printk("%s: Bringing up CPU%d\n", __KERNELNAME__, i); - if ((platform_smp_start(i, smp_start_func)) < 0) { + for (int cpu = 1; cpu < CONFIG_NCPU; cpu++) { + printk("%s: Bringing up CPU%d\n", __KERNELNAME__, cpu); + if ((platform_smp_start(cpu, smp_start_func)) < 0) { printk("FATAL: Could not start secondary cpu. " - "cpu=%d\n", i); + "cpu=%d\n", cpu); BUG(); } - wfi(); /* wait for other cpu send IPI to core0 */ + + /* Wait for this particular secondary to become ready */ + while(!(secondary_ready_signal & CPUID_TO_MASK(cpu))) + dmb(); } scu_print_state(); @@ -50,12 +53,11 @@ void smp_start_cores(void) void init_smp(void) { - /* Start_secondary_cpus */ - if (CONFIG_NCPU > 1) { - - /* This sets IPI function pointer at bare minimum */ - platform_smp_init(CONFIG_NCPU); - } + /* Start_secondary_cpus */ + if (CONFIG_NCPU > 1) { + /* This sets IPI function pointer at bare minimum */ + platform_smp_init(CONFIG_NCPU); + } } void secondary_setup_idle_task(void) @@ -122,9 +124,9 @@ void smp_secondary_init(void) sched_init(); - dsb(); - - gic_send_ipi(CPUID_TO_MASK(0), 0); + /* Signal primary that we are ready */ + dmb(); + secondary_ready_signal |= cpu_mask_self(); /* * Wait for the first runnable task to become available diff --git a/src/lib/printk.c b/src/lib/printk.c index cf766df..26c7d0d 100644 --- a/src/lib/printk.c +++ b/src/lib/printk.c @@ -449,6 +449,6 @@ int printk(char *format, ...) va_end(args); return i; -}; +} diff --git a/src/platform/eb/irq.c b/src/platform/eb/irq.c index 9e97fbd..7d3c0bb 100644 --- a/src/platform/eb/irq.c +++ b/src/platform/eb/irq.c @@ -5,6 +5,7 @@ */ #include #include INC_PLAT(irq.h) +#include #include extern struct gic_data gic_data[IRQ_CHIPS_MAX]; @@ -61,3 +62,26 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = { }; #endif +struct irq_desc irq_desc_array[IRQS_MAX] = { + [IRQ_TIMER0] = { + .name = "Timer0", + .chip = &irq_chip_array[0], + .handler = platform_timer_handler, + }, + [IRQ_TIMER1] = { + .name = "Timer1", + .chip = &irq_chip_array[0], + .handler = platform_timer_user_handler, + }, + [IRQ_KEYBOARD0] = { + .name = "Keyboard", + .chip = &irq_chip_array[0], + .handler = platform_keyboard_user_handler, + }, + [IRQ_MOUSE0] = { + .name = "Mouse", + .chip = &irq_chip_array[0], + .handler = platform_mouse_user_handler, + }, +}; + diff --git a/src/platform/eb/platform.c b/src/platform/eb/platform.c index dab0af0..0b8d39c 100644 --- a/src/platform/eb/platform.c +++ b/src/platform/eb/platform.c @@ -17,6 +17,25 @@ #include INC_GLUE(mapping.h) #include INC_GLUE(smp.h) +/* + * FIXME: This is not a platform specific + * call, we will move this out later + */ +void device_cap_init(struct kernel_resources *kres, int devtype, + int devnum, unsigned long base) +{ + struct capability *cap; + + cap = alloc_bootmem(sizeof(*cap), 0); + cap_set_devtype(cap, devtype); + cap_set_devnum(cap, devnum); + cap->start = __pfn(base); + cap->end = cap->start + 1; + cap->size = cap->end - cap->start; + link_init(&cap->list); + cap_list_insert(cap, &kres->devmem_free); +} + /* * The devices that are used by the kernel are mapped * independent of these capabilities, but these provide a @@ -24,45 +43,12 @@ */ int platform_setup_device_caps(struct kernel_resources *kres) { - struct capability *uart[4], *timer[4]; - - /* Setup capabilities for userspace uarts and timers */ - uart[1] = alloc_bootmem(sizeof(*uart[1]), 0); - uart[1]->start = __pfn(PLATFORM_UART1_BASE); - uart[1]->end = uart[1]->start + 1; - uart[1]->size = uart[1]->end - uart[1]->start; - cap_set_devtype(uart[1], CAP_DEVTYPE_UART); - cap_set_devnum(uart[1], 1); - link_init(&uart[1]->list); - cap_list_insert(uart[1], &kres->devmem_free); - - uart[2] = alloc_bootmem(sizeof(*uart[2]), 0); - uart[2]->start = __pfn(PLATFORM_UART2_BASE); - uart[2]->end = uart[2]->start + 1; - uart[2]->size = uart[2]->end - uart[2]->start; - cap_set_devtype(uart[2], CAP_DEVTYPE_UART); - cap_set_devnum(uart[2], 2); - link_init(&uart[2]->list); - cap_list_insert(uart[2], &kres->devmem_free); - - uart[3] = alloc_bootmem(sizeof(*uart[3]), 0); - uart[3]->start = __pfn(PLATFORM_UART3_BASE); - uart[3]->end = uart[3]->start + 1; - uart[3]->size = uart[3]->end - uart[3]->start; - cap_set_devtype(uart[3], CAP_DEVTYPE_UART); - cap_set_devnum(uart[3], 3); - link_init(&uart[3]->list); - cap_list_insert(uart[3], &kres->devmem_free); - - /* Setup timer1 capability as free */ - timer[1] = alloc_bootmem(sizeof(*timer[1]), 0); - timer[1]->start = __pfn(PLATFORM_TIMER1_BASE); - timer[1]->end = timer[1]->start + 1; - timer[1]->size = timer[1]->end - timer[1]->start; - cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER); - cap_set_devnum(timer[1], 1); - link_init(&timer[1]->list); - cap_list_insert(timer[1], &kres->devmem_free); + device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE); + device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE); + device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE); + device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE); + device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE); + device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE); return 0; } @@ -98,5 +84,16 @@ void init_platform_irq_controller() void init_platform_devices() { + /* TIMER23 */ + add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* KEYBOARD - KMI0 */ + add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* MOUSE - KMI1 */ + add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); } diff --git a/src/platform/pb926/irq.c b/src/platform/pb926/irq.c index d5d1398..8b65a5f 100644 --- a/src/platform/pb926/irq.c +++ b/src/platform/pb926/irq.c @@ -10,6 +10,7 @@ #include INC_PLAT(platform.h) #include INC_PLAT(timer.h) #include INC_ARCH(exception.h) +#include #include struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = { @@ -68,8 +69,18 @@ static int platform_timer_user_handler(struct irq_desc *desc) /* * Keyboard handler for userspace */ +#define PL050_KMICR 0x00 +#define PL050_KMI_RXINTR (1 << 0x4) + static int platform_keyboard_user_handler(struct irq_desc *desc) { + /* + * Disable rx keyboard interrupt. + * User will enable this + */ + clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR, + PL050_KMI_RXINTR); + irq_thread_notify(desc); return 0; } @@ -79,6 +90,13 @@ static int platform_keyboard_user_handler(struct irq_desc *desc) */ static int platform_mouse_user_handler(struct irq_desc *desc) { + /* + * Disable rx mouse interrupt. + * User will enable this + */ + clrbit((unsigned int *)PLATFORM_MOUSE0_VBASE + PL050_KMICR, + PL050_KMI_RXINTR); + irq_thread_notify(desc); return 0; } @@ -92,25 +110,21 @@ struct irq_desc irq_desc_array[IRQS_MAX] = { .name = "Timer0", .chip = &irq_chip_array[0], .handler = platform_timer_handler, - .user_ack = 0, }, [IRQ_TIMER1] = { .name = "Timer1", .chip = &irq_chip_array[0], .handler = platform_timer_user_handler, - .user_ack = 0, }, [IRQ_KEYBOARD0] = { .name = "Keyboard", .chip = &irq_chip_array[1], .handler = platform_keyboard_user_handler, - .user_ack = 1, }, [IRQ_MOUSE0] = { .name = "Mouse", .chip = &irq_chip_array[1], .handler = platform_mouse_user_handler, - .user_ack = 1, }, }; diff --git a/src/platform/pb926/platform.c b/src/platform/pb926/platform.c index ad4c689..8f6a57a 100644 --- a/src/platform/pb926/platform.c +++ b/src/platform/pb926/platform.c @@ -103,18 +103,24 @@ void init_platform_irq_controller() irq_controllers_init(); } -/* - * Add userspace devices here as you develop - * their irq handlers, - * Only the devices to which kernel has to do - * anything needs to be mapped, rest will be - * mapped in userspace by user - */ +/* Add userspace devices here as you develop their irq handlers */ void init_platform_devices() { /* TIMER23 */ add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE, PAGE_SIZE, MAP_IO_DEFAULT); + + /* KEYBOARD - KMI0 */ + add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* MOUSE - KMI1 */ + add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* CLCD */ + add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); } /* If these bits are off, 32Khz OSC source is used */ diff --git a/src/platform/pba9/irq.c b/src/platform/pba9/irq.c index b844c89..e1b3bc7 100644 --- a/src/platform/pba9/irq.c +++ b/src/platform/pba9/irq.c @@ -9,7 +9,9 @@ #include INC_PLAT(irq.h) #include INC_PLAT(platform.h) #include INC_ARCH(exception.h) +#include #include +#include extern struct gic_data gic_data[IRQ_CHIPS_MAX]; @@ -26,7 +28,35 @@ struct irq_chip irq_chip_array[IRQ_CHIPS_MAX] = { .read_irq = gic_read_irq, .ack_and_mask = gic_ack_and_mask, .unmask = gic_unmask_irq, + .set_cpu = gic_set_target, }, }, }; +/* + * Built-in irq handlers initialised at compile time. + * Else register with register_irq() + */ +struct irq_desc irq_desc_array[IRQS_MAX] = { + [IRQ_TIMER0] = { + .name = "Timer0", + .chip = &irq_chip_array[0], + .handler = platform_timer_handler, + }, + [IRQ_TIMER1] = { + .name = "Timer1", + .chip = &irq_chip_array[0], + .handler = platform_timer_user_handler, + }, + [IRQ_KEYBOARD0] = { + .name = "Keyboard", + .chip = &irq_chip_array[0], + .handler = platform_keyboard_user_handler, + }, + [IRQ_MOUSE0] = { + .name = "Mouse", + .chip = &irq_chip_array[0], + .handler = platform_mouse_user_handler, + }, +}; + diff --git a/src/platform/pba9/platform.c b/src/platform/pba9/platform.c index a802b29..965beb1 100644 --- a/src/platform/pba9/platform.c +++ b/src/platform/pba9/platform.c @@ -19,6 +19,25 @@ #include #include +/* + * FIXME: This is not a platform specific + * call, we will move this out later + */ +void device_cap_init(struct kernel_resources *kres, int devtype, + int devnum, unsigned long base) +{ + struct capability *cap; + + cap = alloc_bootmem(sizeof(*cap), 0); + cap_set_devtype(cap, devtype); + cap_set_devnum(cap, devnum); + cap->start = __pfn(base); + cap->end = cap->start + 1; + cap->size = cap->end - cap->start; + link_init(&cap->list); + cap_list_insert(cap, &kres->devmem_free); +} + /* * The devices that are used by the kernel are mapped * independent of these capabilities, but these provide a @@ -26,17 +45,13 @@ */ int platform_setup_device_caps(struct kernel_resources *kres) { - struct capability *timer[2]; - - /* Setup timer1 capability as free */ - timer[1] = alloc_bootmem(sizeof(*timer[1]), 0); - timer[1]->start = __pfn(PLATFORM_TIMER1_BASE); - timer[1]->end = timer[1]->start + 1; - timer[1]->size = timer[1]->end - timer[1]->start; - cap_set_devtype(timer[1], CAP_DEVTYPE_TIMER); - cap_set_devnum(timer[1], 1); - link_init(&timer[1]->list); - cap_list_insert(timer[1], &kres->devmem_free); + device_cap_init(kres, CAP_DEVTYPE_UART, 1, PLATFORM_UART1_BASE); + device_cap_init(kres, CAP_DEVTYPE_UART, 2, PLATFORM_UART2_BASE); + device_cap_init(kres, CAP_DEVTYPE_UART, 3, PLATFORM_UART3_BASE); + device_cap_init(kres, CAP_DEVTYPE_TIMER, 1, PLATFORM_TIMER1_BASE); + device_cap_init(kres, CAP_DEVTYPE_KEYBOARD, 0, PLATFORM_KEYBOARD0_BASE); + device_cap_init(kres, CAP_DEVTYPE_MOUSE, 0, PLATFORM_MOUSE0_BASE); + device_cap_init(kres, CAP_DEVTYPE_CLCD, 0, PLATFORM_CLCD0_BASE); return 0; } @@ -54,5 +69,21 @@ void init_platform_irq_controller() void init_platform_devices() { + /* TIMER23 */ + add_boot_mapping(PLATFORM_TIMER1_BASE, PLATFORM_TIMER1_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* KEYBOARD - KMI0 */ + add_boot_mapping(PLATFORM_KEYBOARD0_BASE, PLATFORM_KEYBOARD0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* MOUSE - KMI1 */ + add_boot_mapping(PLATFORM_MOUSE0_BASE, PLATFORM_MOUSE0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + + /* CLCD */ + add_boot_mapping(PLATFORM_CLCD0_BASE, PLATFORM_CLCD0_VBASE, + PAGE_SIZE, MAP_IO_DEFAULT); + } diff --git a/src/platform/realview/irq.c b/src/platform/realview/irq.c index f47f9ed..f8733b4 100644 --- a/src/platform/realview/irq.c +++ b/src/platform/realview/irq.c @@ -7,24 +7,61 @@ #include #include INC_PLAT(offsets.h) #include INC_PLAT(irq.h) -#include +#include +#include -static int platform_timer_handler(struct irq_desc *desc) +/* + * Timer handler for userspace + */ +int platform_timer_user_handler(struct irq_desc *desc) +{ + /* Ack the device irq */ + timer_irq_clear(PLATFORM_TIMER1_VBASE); + + /* Notify the userspace */ + irq_thread_notify(desc); + + return 0; +} + +/* + * Keyboard handler for userspace + */ +#define PL050_KMICR 0x00 +#define PL050_KMI_RXINTR (1 << 0x4) +int platform_keyboard_user_handler(struct irq_desc *desc) +{ + /* + * Disable rx keyboard interrupt. + * User will enable this + */ + clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR, + PL050_KMI_RXINTR); + + irq_thread_notify(desc); + return 0; +} + +/* + * Mouse handler for userspace + */ +int platform_mouse_user_handler(struct irq_desc *desc) +{ + /* + * Disable rx keyboard interrupt. + * User will enable this + */ + clrbit((unsigned int *)PLATFORM_KEYBOARD0_VBASE + PL050_KMICR, + PL050_KMI_RXINTR); + + irq_thread_notify(desc); + return 0; +} + +int platform_timer_handler(struct irq_desc *desc) { timer_irq_clear(PLATFORM_TIMER0_VBASE); return do_timer_irq(); } -/* - * Built-in irq handlers initialised at compile time. - * Else register with register_irq() - */ -struct irq_desc irq_desc_array[IRQS_MAX] = { - [IRQ_TIMER0] = { - .name = "Timer0", - .chip = &irq_chip_array[0], - .handler = platform_timer_handler, - }, -}; - diff --git a/src/platform/realview/perfmon.c b/src/platform/realview/perfmon.c index 76ea891..5fcbec2 100644 --- a/src/platform/realview/perfmon.c +++ b/src/platform/realview/perfmon.c @@ -5,7 +5,7 @@ * * Author: Bahadir Balban */ -#include +#include #include #include INC_PLAT(offsets.h) #include INC_SUBARCH(perfmon.h) diff --git a/src/platform/realview/platform.c b/src/platform/realview/platform.c index 5f3ee7f..2d57b65 100644 --- a/src/platform/realview/platform.c +++ b/src/platform/realview/platform.c @@ -4,13 +4,14 @@ * Copyright (C) 2009 B Labs Ltd. */ #include -#include +#include #include INC_PLAT(offsets.h) #include INC_GLUE(mapping.h) #include INC_GLUE(smp.h) #include #include #include +#include #include INC_PLAT(platform.h) #include INC_ARCH(io.h) @@ -36,6 +37,9 @@ void platform_timer_start(void) /* Enable irq line for TIMER0 */ irq_enable(IRQ_TIMER0); + /* Set cpu to all cpus for timer0 */ + // irq_set_cpu(IRQ_TIMER0, cpu_all_mask()); + /* Enable timer */ timer_start(PLATFORM_TIMER0_VBASE); } diff --git a/src/platform/realview/smp.c b/src/platform/realview/smp.c index b46fb64..33e6771 100644 --- a/src/platform/realview/smp.c +++ b/src/platform/realview/smp.c @@ -19,7 +19,6 @@ #include #include - extern struct irq_desc irq_desc_array[IRQS_MAX]; /* Print some SCU information */ @@ -49,16 +48,14 @@ void scu_init(void) void platform_smp_init(int ncpus) { - unsigned int i; + /* Add GIC SoftIRQ (aka IPI) */ + for (int i = 0; i < 16; i++) { + strncpy(irq_desc_array[i].name, "SoftInt", 8); + irq_desc_array[i].chip = &irq_chip_array[0]; + irq_desc_array[i].handler = &ipi_handler; + } - /* Add GIC SoftIRQ (aka IPI) */ - for (i = 0; i <= 15; i++) { - strncpy(irq_desc_array[i].name, "SoftInt", 8); - irq_desc_array[i].chip = &irq_chip_array[0]; - irq_desc_array[i].handler = &ipi_handler; - } - - add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE, + add_boot_mapping(PLATFORM_SYSTEM_REGISTERS, PLATFORM_SYSREGS_VBASE, PAGE_SIZE, MAP_IO_DEFAULT); } @@ -74,7 +71,7 @@ int platform_smp_start(int cpu, void (*smp_start_func)(int)) dsb(); /* Make sure the write occurs */ /* Wake up other core who is waiting on a WFI. */ - gic_send_ipi(CPUID_TO_MASK(cpu), 1); + gic_send_ipi(CPUID_TO_MASK(cpu), 0); return 0; } @@ -82,13 +79,4 @@ int platform_smp_start(int cpu, void (*smp_start_func)(int)) void secondary_init_platform(void) { gic_cpu_init(0, GIC0_CPU_VBASE); - gic_ack_irq(1); - - gic_set_target(IRQ_TIMER0, 1 << smp_get_cpuid()); } - -void arch_send_ipi(u32 cpu, int cmd) -{ - gic_send_ipi(cpu, cmd); -} - diff --git a/tools/gdbinit b/tools/gdbinit index 0d4607e..f6e7d73 100644 --- a/tools/gdbinit +++ b/tools/gdbinit @@ -6,4 +6,3 @@ break break_virtual continue sym kernel.elf stepi - diff --git a/tools/rvdebug.inc b/tools/rvdebug.inc index eade2c2..5bc1923 100644 --- a/tools/rvdebug.inc +++ b/tools/rvdebug.inc @@ -1,4 +1,4 @@ load/r '/home/bahadir/codezero/build/final.elf' load/ni/np '/home/bahadir/codezero/build/kernel.elf' +bexec platform_init bexec smp_start_cores -bexec idle_task