mirror of
https://github.com/kelvinlawson/atomthreads.git
synced 2026-01-11 18:33:16 +01:00
Minor refactoring of code.
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
@@ -32,32 +32,25 @@
|
||||
.section .text
|
||||
|
||||
/**
|
||||
* Function that performs the contextSwitch. Whether its a voluntary release
|
||||
* of CPU by thread or a pre-emption, under both conditions this function is
|
||||
* called. The signature is as follows:
|
||||
*
|
||||
* archContextSwitch(ATOM_TCB *old_tcb, ATOM_TCB *new_tcb)
|
||||
* int archSetJumpLowLevel(pt_regs_t *regs)
|
||||
*/
|
||||
.globl archContextSwitch
|
||||
archContextSwitch:
|
||||
.globl archSetJumpLowLevel
|
||||
archSetJumpLowLevel:
|
||||
bx lr
|
||||
|
||||
/**
|
||||
* archFirstThreadRestore(ATOM_TCB *new_tcb)
|
||||
*
|
||||
* This function is responsible for restoring and starting the first
|
||||
* thread the OS runs. It expects to find the thread context exactly
|
||||
* as it would be if a context save had previously taken place on it.
|
||||
* The only real difference between this and the archContextSwitch()
|
||||
* routine is that there is no previous thread for which context must
|
||||
* be saved.
|
||||
*
|
||||
* The final action this function must do is to restore interrupts.
|
||||
* void archLongJumpLowLevel(pt_regs_t *regs)
|
||||
*/
|
||||
.globl archFirstThreadRestore
|
||||
archFirstThreadRestore:
|
||||
ldr r0, [r0]
|
||||
mov sp, r0
|
||||
.globl archLongJumpLowLevel
|
||||
archLongJumpLowLevel:
|
||||
bx lr
|
||||
|
||||
/**
|
||||
* void archFirstThreadRestoreLowLevel(pt_regs_t *regs)
|
||||
*/
|
||||
.globl archFirstThreadRestoreLowLevel
|
||||
archFirstThreadRestoreLowLevel:
|
||||
add r0, r0, #(4 * 17)
|
||||
mrs r1, cpsr
|
||||
SET_CURRENT_MODE CPSR_MODE_UNDEFINED
|
||||
mov sp, r0
|
||||
@@ -68,6 +61,7 @@ archFirstThreadRestore:
|
||||
SET_CURRENT_MODE CPSR_MODE_FIQ
|
||||
mov sp, r0
|
||||
msr cpsr, r1
|
||||
mov sp, r0
|
||||
sub sp, sp, #(4 * 17)
|
||||
ldr r0, [sp], #0x0004; /* Get CPSR from stack */
|
||||
msr spsr_all, r0;
|
||||
|
||||
@@ -68,3 +68,48 @@ void archThreadContextInit (ATOM_TCB *tcb_ptr, void *stack_top,
|
||||
regs->pc = (uint32_t)entry_point;
|
||||
}
|
||||
|
||||
extern void archFirstThreadRestoreLowLevel(pt_regs_t *regs);
|
||||
|
||||
/**
|
||||
* archFirstThreadRestore(ATOM_TCB *new_tcb)
|
||||
*
|
||||
* This function is responsible for restoring and starting the first
|
||||
* thread the OS runs. It expects to find the thread context exactly
|
||||
* as it would be if a context save had previously taken place on it.
|
||||
* The only real difference between this and the archContextSwitch()
|
||||
* routine is that there is no previous thread for which context must
|
||||
* be saved.
|
||||
*
|
||||
* The final action this function must do is to restore interrupts.
|
||||
*/
|
||||
void archFirstThreadRestore(ATOM_TCB *new_tcb)
|
||||
{
|
||||
pt_regs_t *regs = NULL;
|
||||
regs = (pt_regs_t *)((uint32_t)new_tcb->sp_save_ptr
|
||||
- sizeof(pt_regs_t));
|
||||
archFirstThreadRestoreLowLevel(regs);
|
||||
}
|
||||
|
||||
extern int archSetJumpLowLevel(pt_regs_t *regs);
|
||||
extern void archLongJumpLowLevel(pt_regs_t *regs);
|
||||
|
||||
/**
|
||||
* Function that performs the contextSwitch. Whether its a voluntary release
|
||||
* of CPU by thread or a pre-emption, under both conditions this function is
|
||||
* called. The signature is as follows:
|
||||
*
|
||||
* archContextSwitch(ATOM_TCB *old_tcb, ATOM_TCB *new_tcb)
|
||||
*/
|
||||
void archContextSwitch(ATOM_TCB *old_tcb, ATOM_TCB *new_tcb)
|
||||
{
|
||||
pt_regs_t *old_regs = NULL;
|
||||
pt_regs_t *new_regs = NULL;
|
||||
old_regs = (pt_regs_t *)((uint32_t)old_tcb->sp_save_ptr
|
||||
- sizeof(pt_regs_t));
|
||||
new_regs = (pt_regs_t *)((uint32_t)new_tcb->sp_save_ptr
|
||||
- sizeof(pt_regs_t));
|
||||
if (archSetJumpLowLevel(old_regs)) {
|
||||
archLongJumpLowLevel(new_regs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user