From 792213d66bcc1aa1a562af7c79702edfdc348063 Mon Sep 17 00:00:00 2001 From: Tido Klaassen Date: Wed, 30 Sep 2015 08:51:28 +0200 Subject: [PATCH] More work on making port use newlib's reentry mechanisms - archContextSwitch stores address of new thread's struct reent in ctx_switch_info - pend_sv_handler uses address in ctx_switch_info.reent instead of fixed offset into atom_tcb - added header with defines for offsets into struct ctx_switch_info used in assembler code. Also added compile time verification for this offsets --- kernel/atom.h | 5 +--- ports/cortex-m/asm_offsets.h | 37 ++++++++++++++++++++++++ ports/cortex-m/atomport-asm.S | 48 +++++++++++++++---------------- ports/cortex-m/atomport-private.h | 3 ++ ports/cortex-m/atomport.c | 30 ++++++++++++++++--- ports/cortex-m/atomport.h | 5 ++++ 6 files changed, 96 insertions(+), 32 deletions(-) create mode 100644 ports/cortex-m/asm_offsets.h diff --git a/kernel/atom.h b/kernel/atom.h index 152f98e..3773c9b 100755 --- a/kernel/atom.h +++ b/kernel/atom.h @@ -59,10 +59,7 @@ typedef struct atom_tcb */ POINTER sp_save_ptr; - /* Thread's port specific private data. Do not move, some poorly written - * thread switching code (*cough* Cortex-M *cough*) might depend on a - * known offset into the atom_tcb struct - */ + /* Thread's port specific private data. */ THREAD_PORT_PRIV; /* Thread priority (0-255) */ diff --git a/ports/cortex-m/asm_offsets.h b/ports/cortex-m/asm_offsets.h new file mode 100644 index 0000000..495bbdd --- /dev/null +++ b/ports/cortex-m/asm_offsets.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2015, Tido Klaassen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. No personal names or organizations' names associated with the + * Atomthreads project may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ATOM_PORT_ASM_OFFSETS_H +#define __ATOM_PORT_ASM_OFFSETS_H + +#define CTX_RUN_OFF 0 +#define CTX_NEXT_OFF 4 +#define CTX_REENT_OFF 8 + +#endif /* __ATOM_PORT_ASM_OFFSETS_H */ diff --git a/ports/cortex-m/atomport-asm.S b/ports/cortex-m/atomport-asm.S index 24f1060..d1c8454 100644 --- a/ports/cortex-m/atomport-asm.S +++ b/ports/cortex-m/atomport-asm.S @@ -27,6 +27,8 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include "asm_offsets.h" + .syntax unified /** @@ -65,11 +67,11 @@ .text -.global archFirstThreadRestore -.func archFirstThreadRestore -.type archFirstThreadRestore,%function +.global _archFirstThreadRestore +.func _archFirstThreadRestore +.type _archFirstThreadRestore,%function .thumb_func -archFirstThreadRestore: +_archFirstThreadRestore: /** * Disable interrupts. They should be disabled anyway, but just * to make sure... @@ -87,24 +89,23 @@ archFirstThreadRestore: /* Update ctx_switch_info, set this thread as both running and next */ ldr r1, = CTX_SW_NFO - str r0, [r1, #0] - str r0, [r1, #4] - - /* Get thread stack pointer from tcb. Conveniently the first element */ - ldr r1, [r0, #0] - msr PSP, r1 + str r0, [r1, #CTX_RUN_OFF] + str r0, [r1, #CTX_NEXT_OFF] #if defined(__NEWLIB__) /** * Store the thread's reentry context address in _impure_ptr. This - * should be the TCB's second element + * will have been stored in ctx_switch_info.reent. */ - ldr r1, = _impure_ptr - mov r2, r0 - add r2, #4 - str r2, [r1, #0] + ldr r2, [r1, #CTX_REENT_OFF] + ldr r3, = _impure_ptr + str r2, [r3, #0] #endif + /* Get thread stack pointer from tcb. Conveniently the first element */ + ldr r1, [r0, #0] + msr PSP, r1 + /** * Set bit #1 in CONTROL. Causes switch to PSP, so we can work directly * with SP now and use pop/push. @@ -197,7 +198,7 @@ archFirstThreadRestore: pop {r2,r3,pc} nop -.size archFirstThreadRestore, . - archFirstThreadRestore +.size _archFirstThreadRestore, . - _archFirstThreadRestore .endfunc .global pend_sv_handler @@ -234,8 +235,8 @@ pend_sv_handler: * gets called. */ ldr r0, = CTX_SW_NFO - ldr r1, [r0, #0] - ldr r2, [r0, #4] + ldr r1, [r0, #CTX_RUN_OFF] + ldr r2, [r0, #CTX_NEXT_OFF] cmp r1, r2 beq no_switch @@ -271,7 +272,7 @@ pend_sv_handler: /** * Save old thread's register context on Cortex-M0. * Push running thread's remaining registers on stack. - * Thumb-1 can only use stm on low registers, so we + * Thumb-1 can use stm only on low registers, so we * have to do this in two steps. */ @@ -301,7 +302,7 @@ pend_sv_handler: #endif // !THUMB_2 /** * Address of running TCB still in r1. Store thread's current stack top - * into its sp_save_ptr, which is the struct's first element + * into its sp_save_ptr, which is the struct's first element. */ str r3, [r1, #0] @@ -309,15 +310,14 @@ pend_sv_handler: * ctx_switch_info.next_tcb is going to become ctx_switch_info.running_tcb, * so we update the pointer. */ - str r2, [r0, #0] + str r2, [r0, #CTX_RUN_OFF] #if defined(__NEWLIB__) /** * Store the thread's reentry context address in _impure_ptr. This - * should be the TCB's second element + * will have been stored in ctx_switch_info.reent. */ - mov r4, r2 - add r4, #4 + ldr r4, [r0, #CTX_REENT_OFF] ldr r3, = _impure_ptr str r4, [r3, #0] #endif diff --git a/ports/cortex-m/atomport-private.h b/ports/cortex-m/atomport-private.h index 6fe0e86..b670673 100644 --- a/ports/cortex-m/atomport-private.h +++ b/ports/cortex-m/atomport-private.h @@ -112,6 +112,9 @@ struct task_fpu_stack { struct task_switch_info { volatile struct atom_tcb *running_tcb; volatile struct atom_tcb *next_tcb; +#if defined(__NEWLIB__) + struct _reent *reent; +#endif } __attribute__((packed)); #endif /* __ATOMPORT_PRIVATE_H_ */ diff --git a/ports/cortex-m/atomport.c b/ports/cortex-m/atomport.c index 67767cf..3ec72a1 100644 --- a/ports/cortex-m/atomport.c +++ b/ports/cortex-m/atomport.c @@ -37,18 +37,27 @@ #include "atomport.h" #include "atomport-private.h" +#include "asm_offsets.h" static void thread_shell(void); -/** - * - */ struct task_switch_info ctx_switch_info asm("CTX_SW_NFO") = { .running_tcb = NULL, .next_tcb = NULL, }; +extern void _archFirstThreadRestore(ATOM_TCB *); +void archFirstThreadRestore(ATOM_TCB *new_tcb_ptr) +{ +#if defined(__NEWLIB__) + ctx_switch_info.reent = &(new_tcb_ptr->port_priv.reent); + __dmb(); +#endif + + _archFirstThreadRestore(new_tcb_ptr); +} + /** * We do not perform the context switch directly. Instead we mark the new tcb * as should-be-running in ctx_switch_info and trigger a PendSv-interrupt. @@ -70,7 +79,9 @@ archContextSwitch(ATOM_TCB *old_tcb_ptr __maybe_unused, ATOM_TCB *new_tcb_ptr) { if(likely(ctx_switch_info.running_tcb != NULL)){ ctx_switch_info.next_tcb = new_tcb_ptr; - +#if defined(__NEWLIB__) + ctx_switch_info.reent = &(new_tcb_ptr->port_priv.reent); +#endif __dmb(); SCB_ICSR = SCB_ICSR_PENDSVSET; @@ -145,6 +156,17 @@ void archThreadContextInit(ATOM_TCB *tcb_ptr, void *stack_top, struct isr_stack *isr_ctx; struct task_stack *tsk_ctx; + /** + * Do compile time verification for offsets used in _archFirstThreadRestore + * and pend_sv_handler. If compilation aborts here, you will have to adjust + * the offsets for struct task_switch_info's members in asm-offsets.h + */ + assert_static(offsetof(struct task_switch_info, running_tcb) == CTX_RUN_OFF); + assert_static(offsetof(struct task_switch_info, next_tcb) == CTX_NEXT_OFF); +#if defined(__NEWLIB__) + assert_static(offsetof(struct task_switch_info, reent) == CTX_REENT_OFF); +#endif + /** * Enforce initial stack alignment */ diff --git a/ports/cortex-m/atomport.h b/ports/cortex-m/atomport.h index 1f662f9..d3337ea 100644 --- a/ports/cortex-m/atomport.h +++ b/ports/cortex-m/atomport.h @@ -52,6 +52,11 @@ #define unlikely(x) __builtin_expect(!!(x), 0) #define __maybe_unused __attribute__((unused)) +#define assert_static(e) \ + do { \ + enum { assert_static__ = 1/(e) }; \ + } while (0) + /** * Critical region protection: this should disable interrupts * to protect OS data structures during modification. It must