More work on making port use newlib's reentry mechanisms

- archContextSwitch stores address of new thread's struct reent in
  ctx_switch_info
- pend_sv_handler uses address in  ctx_switch_info.reent instead of fixed
  offset into atom_tcb
- added header with defines for offsets into struct ctx_switch_info used
  in assembler code. Also added compile time verification for this offsets
This commit is contained in:
Tido Klaassen
2015-09-30 08:51:28 +02:00
parent 0be9a35aeb
commit 792213d66b
6 changed files with 96 additions and 32 deletions

View File

@@ -59,10 +59,7 @@ typedef struct atom_tcb
*/
POINTER sp_save_ptr;
/* Thread's port specific private data. Do not move, some poorly written
* thread switching code (*cough* Cortex-M *cough*) might depend on a
* known offset into the atom_tcb struct
*/
/* Thread's port specific private data. */
THREAD_PORT_PRIV;
/* Thread priority (0-255) */

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2015, Tido Klaassen. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. No personal names or organizations' names associated with the
* Atomthreads project may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE ATOMTHREADS PROJECT AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ATOM_PORT_ASM_OFFSETS_H
#define __ATOM_PORT_ASM_OFFSETS_H
#define CTX_RUN_OFF 0
#define CTX_NEXT_OFF 4
#define CTX_REENT_OFF 8
#endif /* __ATOM_PORT_ASM_OFFSETS_H */

View File

@@ -27,6 +27,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "asm_offsets.h"
.syntax unified
/**
@@ -65,11 +67,11 @@
.text
.global archFirstThreadRestore
.func archFirstThreadRestore
.type archFirstThreadRestore,%function
.global _archFirstThreadRestore
.func _archFirstThreadRestore
.type _archFirstThreadRestore,%function
.thumb_func
archFirstThreadRestore:
_archFirstThreadRestore:
/**
* Disable interrupts. They should be disabled anyway, but just
* to make sure...
@@ -87,24 +89,23 @@ archFirstThreadRestore:
/* Update ctx_switch_info, set this thread as both running and next */
ldr r1, = CTX_SW_NFO
str r0, [r1, #0]
str r0, [r1, #4]
/* Get thread stack pointer from tcb. Conveniently the first element */
ldr r1, [r0, #0]
msr PSP, r1
str r0, [r1, #CTX_RUN_OFF]
str r0, [r1, #CTX_NEXT_OFF]
#if defined(__NEWLIB__)
/**
* Store the thread's reentry context address in _impure_ptr. This
* should be the TCB's second element
* will have been stored in ctx_switch_info.reent.
*/
ldr r1, = _impure_ptr
mov r2, r0
add r2, #4
str r2, [r1, #0]
ldr r2, [r1, #CTX_REENT_OFF]
ldr r3, = _impure_ptr
str r2, [r3, #0]
#endif
/* Get thread stack pointer from tcb. Conveniently the first element */
ldr r1, [r0, #0]
msr PSP, r1
/**
* Set bit #1 in CONTROL. Causes switch to PSP, so we can work directly
* with SP now and use pop/push.
@@ -197,7 +198,7 @@ archFirstThreadRestore:
pop {r2,r3,pc}
nop
.size archFirstThreadRestore, . - archFirstThreadRestore
.size _archFirstThreadRestore, . - _archFirstThreadRestore
.endfunc
.global pend_sv_handler
@@ -234,8 +235,8 @@ pend_sv_handler:
* gets called.
*/
ldr r0, = CTX_SW_NFO
ldr r1, [r0, #0]
ldr r2, [r0, #4]
ldr r1, [r0, #CTX_RUN_OFF]
ldr r2, [r0, #CTX_NEXT_OFF]
cmp r1, r2
beq no_switch
@@ -271,7 +272,7 @@ pend_sv_handler:
/**
* Save old thread's register context on Cortex-M0.
* Push running thread's remaining registers on stack.
* Thumb-1 can only use stm on low registers, so we
* Thumb-1 can use stm only on low registers, so we
* have to do this in two steps.
*/
@@ -301,7 +302,7 @@ pend_sv_handler:
#endif // !THUMB_2
/**
* Address of running TCB still in r1. Store thread's current stack top
* into its sp_save_ptr, which is the struct's first element
* into its sp_save_ptr, which is the struct's first element.
*/
str r3, [r1, #0]
@@ -309,15 +310,14 @@ pend_sv_handler:
* ctx_switch_info.next_tcb is going to become ctx_switch_info.running_tcb,
* so we update the pointer.
*/
str r2, [r0, #0]
str r2, [r0, #CTX_RUN_OFF]
#if defined(__NEWLIB__)
/**
* Store the thread's reentry context address in _impure_ptr. This
* should be the TCB's second element
* will have been stored in ctx_switch_info.reent.
*/
mov r4, r2
add r4, #4
ldr r4, [r0, #CTX_REENT_OFF]
ldr r3, = _impure_ptr
str r4, [r3, #0]
#endif

View File

@@ -112,6 +112,9 @@ struct task_fpu_stack {
struct task_switch_info {
volatile struct atom_tcb *running_tcb;
volatile struct atom_tcb *next_tcb;
#if defined(__NEWLIB__)
struct _reent *reent;
#endif
} __attribute__((packed));
#endif /* __ATOMPORT_PRIVATE_H_ */

View File

@@ -37,18 +37,27 @@
#include "atomport.h"
#include "atomport-private.h"
#include "asm_offsets.h"
static void thread_shell(void);
/**
*
*/
struct task_switch_info ctx_switch_info asm("CTX_SW_NFO") =
{
.running_tcb = NULL,
.next_tcb = NULL,
};
extern void _archFirstThreadRestore(ATOM_TCB *);
void archFirstThreadRestore(ATOM_TCB *new_tcb_ptr)
{
#if defined(__NEWLIB__)
ctx_switch_info.reent = &(new_tcb_ptr->port_priv.reent);
__dmb();
#endif
_archFirstThreadRestore(new_tcb_ptr);
}
/**
* We do not perform the context switch directly. Instead we mark the new tcb
* as should-be-running in ctx_switch_info and trigger a PendSv-interrupt.
@@ -70,7 +79,9 @@ archContextSwitch(ATOM_TCB *old_tcb_ptr __maybe_unused, ATOM_TCB *new_tcb_ptr)
{
if(likely(ctx_switch_info.running_tcb != NULL)){
ctx_switch_info.next_tcb = new_tcb_ptr;
#if defined(__NEWLIB__)
ctx_switch_info.reent = &(new_tcb_ptr->port_priv.reent);
#endif
__dmb();
SCB_ICSR = SCB_ICSR_PENDSVSET;
@@ -145,6 +156,17 @@ void archThreadContextInit(ATOM_TCB *tcb_ptr, void *stack_top,
struct isr_stack *isr_ctx;
struct task_stack *tsk_ctx;
/**
* Do compile time verification for offsets used in _archFirstThreadRestore
* and pend_sv_handler. If compilation aborts here, you will have to adjust
* the offsets for struct task_switch_info's members in asm-offsets.h
*/
assert_static(offsetof(struct task_switch_info, running_tcb) == CTX_RUN_OFF);
assert_static(offsetof(struct task_switch_info, next_tcb) == CTX_NEXT_OFF);
#if defined(__NEWLIB__)
assert_static(offsetof(struct task_switch_info, reent) == CTX_REENT_OFF);
#endif
/**
* Enforce initial stack alignment
*/

View File

@@ -52,6 +52,11 @@
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __maybe_unused __attribute__((unused))
#define assert_static(e) \
do { \
enum { assert_static__ = 1/(e) }; \
} while (0)
/**
* Critical region protection: this should disable interrupts
* to protect OS data structures during modification. It must