Fixed Context switch and timer-delay problem with MIPS.

Signed-off-by: Himanshu Chauhan <hschauhan@nulltrace.org>
This commit is contained in:
Himanshu Chauhan
2011-07-22 23:36:17 +05:30
parent fc53574c12
commit 48fe6bc12a
7 changed files with 127 additions and 209 deletions

View File

@@ -47,7 +47,7 @@ fn:
nop; \
nop;
#define enable_global_interrupts ei $0
#define enable_global_interrupts ei
#define disable_global_interrupts di $0
#define EXCEPTION_VECTOR(_name, _offset, _where)\
@@ -63,70 +63,69 @@ _name: \
#define LOAD_REG(reg, treg) \
lw reg, ((reg ## _IDX) * 4)(treg)
#define SAVE_INT_CONTEXT \
addiu sp, sp, -((NUM_REGISTERS + 1)* 4); \
#define SAVE_INT_CONTEXT(treg) \
mfc0 k1, CP0_EPC; \
SAVE_REG(t0,sp); \
SAVE_REG(t1,sp); \
SAVE_REG(t2,sp); \
SAVE_REG(t3,sp); \
SAVE_REG(t4,sp); \
SAVE_REG(t5,sp); \
SAVE_REG(t6,sp); \
SAVE_REG(t7,sp); \
SAVE_REG(t8,sp); \
SAVE_REG(t9,sp); \
SAVE_REG(v0,sp); \
SAVE_REG(v1,sp); \
SAVE_REG(a0,sp); \
SAVE_REG(a1,sp); \
SAVE_REG(a2,sp); \
SAVE_REG(a3,sp); \
SAVE_REG(s0,sp); \
SAVE_REG(s1,sp); \
SAVE_REG(s2,sp); \
SAVE_REG(s3,sp); \
SAVE_REG(s4,sp); \
SAVE_REG(s5,sp); \
SAVE_REG(s6,sp); \
SAVE_REG(s7,sp); \
SAVE_REG(gp,sp); \
SAVE_REG(s8,sp); \
SAVE_REG(ra,sp); \
sw k0, (sp_IDX * 4)(sp); \
sw k1, (NUM_REGISTERS * 4)(sp);
SAVE_REG(t0,treg); \
SAVE_REG(t1,treg); \
SAVE_REG(t2,treg); \
SAVE_REG(t3,treg); \
SAVE_REG(t4,treg); \
SAVE_REG(t5,treg); \
SAVE_REG(t6,treg); \
SAVE_REG(t7,treg); \
SAVE_REG(t8,treg); \
SAVE_REG(t9,treg); \
SAVE_REG(v0,treg); \
SAVE_REG(v1,treg); \
SAVE_REG(a0,treg); \
SAVE_REG(a1,treg); \
SAVE_REG(a2,treg); \
SAVE_REG(a3,treg); \
SAVE_REG(s0,treg); \
SAVE_REG(s1,treg); \
SAVE_REG(s2,treg); \
SAVE_REG(s3,treg); \
SAVE_REG(s4,treg); \
SAVE_REG(s5,treg); \
SAVE_REG(s6,treg); \
SAVE_REG(s7,treg); \
SAVE_REG(gp,treg); \
SAVE_REG(s8,treg); \
SAVE_REG(ra,treg); \
sw k0, (sp_IDX * 4)(treg); \
sw k1, (cp0_epc_IDX * 4)(treg);
#define RESTORE_INT_CONTEXT \
lw k1, (NUM_REGISTERS * 4)(sp); \
#define RESTORE_INT_CONTEXT(treg) \
lw k1, (cp0_epc_IDX * 4)(treg); \
mtc0 k1, CP0_EPC; \
LOAD_REG(s0,sp); \
LOAD_REG(s1,sp); \
LOAD_REG(s2,sp); \
LOAD_REG(s3,sp); \
LOAD_REG(s4,sp); \
LOAD_REG(s5,sp); \
LOAD_REG(s6,sp); \
LOAD_REG(s7,sp); \
LOAD_REG(v0,sp); \
LOAD_REG(v1,sp); \
LOAD_REG(a0,sp); \
LOAD_REG(a1,sp); \
LOAD_REG(a2,sp); \
LOAD_REG(a3,sp); \
LOAD_REG(t0,sp); \
LOAD_REG(t1,sp); \
LOAD_REG(t2,sp); \
LOAD_REG(t3,sp); \
LOAD_REG(t4,sp); \
LOAD_REG(t5,sp); \
LOAD_REG(t6,sp); \
LOAD_REG(t7,sp); \
LOAD_REG(t8,sp); \
LOAD_REG(t9,sp); \
LOAD_REG(gp,sp); \
LOAD_REG(ra,sp); \
LOAD_REG(s8,sp); \
lw sp, (sp_IDX * 4)(sp);
LOAD_REG(s0,treg); \
LOAD_REG(s1,treg); \
LOAD_REG(s2,treg); \
LOAD_REG(s3,treg); \
LOAD_REG(s4,treg); \
LOAD_REG(s5,treg); \
LOAD_REG(s6,treg); \
LOAD_REG(s7,treg); \
LOAD_REG(v0,treg); \
LOAD_REG(v1,treg); \
LOAD_REG(a0,treg); \
LOAD_REG(a1,treg); \
LOAD_REG(a2,treg); \
LOAD_REG(a3,treg); \
LOAD_REG(t0,treg); \
LOAD_REG(t1,treg); \
LOAD_REG(t2,treg); \
LOAD_REG(t3,treg); \
LOAD_REG(t4,treg); \
LOAD_REG(t5,treg); \
LOAD_REG(t6,treg); \
LOAD_REG(t7,treg); \
LOAD_REG(t8,treg); \
LOAD_REG(t9,treg); \
LOAD_REG(gp,treg); \
LOAD_REG(ra,treg); \
LOAD_REG(s8,treg); \
lw sp, (sp_IDX * 4)(treg);
#endif /* __ASSEMBLY__ */

View File

@@ -31,9 +31,6 @@
.section .text
.extern atomCurrentContext
.extern at_preempt_count
/**
* Function that performs the contextSwitch. Whether its a voluntary release
* of CPU by thread or a pre-emption, under both conditions this function is
@@ -43,121 +40,49 @@
*/
.globl archContextSwitch
archContextSwitch:
/*
* Check if we are being called in interrupt
* context. If yes, we need to restore complete
* context and return directly from here.
*/
move k0, ra
bal atomCurrentContext
nop
beq v0, zero, __in_int_context
nop
move ra, k0
move v0, a0 /* return old tcb when we return from here */
lw k0, 0(a0) /* assume that sp_save_ptr is always at base of ATOM_TCB */
sw s0, (s0_IDX * 4)(k0)
sw s1, (s1_IDX * 4)(k0)
sw s2, (s2_IDX * 4)(k0)
sw s3, (s3_IDX * 4)(k0)
sw s4, (s4_IDX * 4)(k0)
sw s5, (s5_IDX * 4)(k0)
sw s6, (s6_IDX * 4)(k0)
sw s7, (s7_IDX * 4)(k0)
sw s8, (s8_IDX * 4)(k0)
sw sp, (sp_IDX * 4)(k0)
sw gp, (gp_IDX * 4)(k0)
sw ra, (ra_IDX * 4)(k0)
/*
* We are saving registers in non-interrupt context because
* a thread probably is trying to yield CPU. Storing zero
* in EPC offset differentiates this. When restoring the
* context, if EPC offset has zero we will restore only
* the partial context. Rest will be done by GCC while
* unwinding the call.
*/
sw zero, (cp0_epc_IDX * 4)(k0)
SAVE_REG(s0, k0)
SAVE_REG(s1, k0)
SAVE_REG(s2, k0)
SAVE_REG(s3, k0)
SAVE_REG(s4, k0)
SAVE_REG(s5, k0)
SAVE_REG(s6, k0)
SAVE_REG(s7, k0)
SAVE_REG(s8, k0)
SAVE_REG(sp, k0)
SAVE_REG(gp, k0)
SAVE_REG(ra, k0)
lw k1, 0(a1)
LOAD_REG(s0, k1)
LOAD_REG(s1, k1)
LOAD_REG(s2, k1)
LOAD_REG(s3, k1)
LOAD_REG(s4, k1)
LOAD_REG(s5, k1)
LOAD_REG(s6, k1)
LOAD_REG(s7, k1)
LOAD_REG(s8, k1)
LOAD_REG(sp, k1)
LOAD_REG(gp, k1)
LOAD_REG(ra, k1)
lw k0, (cp0_epc_IDX * 4)(k1)
bnez k0, __unwind_int_context
bnez k0, 1f
nop
lw s0, (s0_IDX * 4)(k1)
lw s1, (s1_IDX * 4)(k1)
lw s2, (s2_IDX * 4)(k1)
lw s3, (s3_IDX * 4)(k1)
lw s4, (s4_IDX * 4)(k1)
lw s5, (s5_IDX * 4)(k1)
lw s6, (s6_IDX * 4)(k1)
lw s7, (s7_IDX * 4)(k1)
lw s8, (s8_IDX * 4)(k1)
lw sp, (sp_IDX * 4)(k1)
lw gp, (gp_IDX * 4)(k1)
lw ra, (ra_IDX * 4)(k1)
li k0, 0x00000001
sw k0, (cp0_epc_IDX * 4)(k1)
LOAD_REG(a0, k1)
LOAD_REG(a1, k1)
LOAD_REG(a2, k1)
LOAD_REG(a3, k1)
enable_global_interrupts
1:
jr ra
nop
__in_int_context:
move ra, k0
/*
* In interrupt context, the interrupt handler
* saves the context for us. Its very well there
* and we don't need to do it again.
*
* We will figure out of the task that we are
* switching in was saved in interrupt context
* or otherwise.
*/
lw k0, (cp0_epc_IDX * 4)(k1)
bnez k0, __unwind_int_context
nop
/*
* Unwinding a task switched in non-interrupt context.
* So, restore only the partials. But since we are in
* interrupt mode, we will put ra in epc and do a eret
* so that we get out of interrupt mode and switch to
* the new task.
*/
__unwind_non_int_context:
lw s0, (s0_IDX * 4)(k1)
lw s1, (s1_IDX * 4)(k1)
lw s2, (s2_IDX * 4)(k1)
lw s3, (s3_IDX * 4)(k1)
lw s4, (s4_IDX * 4)(k1)
lw s5, (s5_IDX * 4)(k1)
lw s6, (s6_IDX * 4)(k1)
lw s7, (s7_IDX * 4)(k1)
lw s8, (s8_IDX * 4)(k1)
lw sp, (sp_IDX * 4)(k1)
lw gp, (gp_IDX * 4)(k1)
lw ra, (ra_IDX * 4)(k1)
mtc0 ra, CP0_EPC
nop
nop
nop
j __ret_from_switch
nop
__unwind_int_context:
move sp, k1
RESTORE_INT_CONTEXT
__ret_from_switch:
la k0, at_preempt_count
lw k1, (k0)
addi k1, k1, -1
sw k1, (k0)
bnez k1, __return_from_int
nop
enable_global_interrupts
ehb
__return_from_int:
eret
/**
* archFirstThreadRestore(ATOM_TCB *new_tcb)
*
@@ -182,5 +107,13 @@ archFirstThreadRestore:
nop
nop
nop
ehb
li k0, 0x00000001
sw k0, (cp0_epc_IDX * 4)(k1)
nop
ehb
enable_global_interrupts
ehb
nop
nop
eret

View File

@@ -90,32 +90,16 @@ LEAF(_handle_interrupt)
beq k0, zero, 1f
nop
move k0, ra
move k1, v0
bal atomCurrentContext
nop
beq v0, zero, 2f /* v0 should be current context */
nop
move ra, k0
lw k0, 0(v0)
move v0, k1
move k1, k0
/*
* Note that we aren't loading any new SP. Context
* will be save on the interrupted threads' stack.
*/
move k0, sp
move sp, k1
SAVE_INT_CONTEXT
/* Calculate interrupt context base */
addi sp, sp, -(NUM_CTX_REGS * WORD_SIZE)
SAVE_INT_CONTEXT(sp)
bal handle_mips_systick
nop
RESTORE_INT_CONTEXT
1:
RESTORE_INT_CONTEXT(sp)
1:
enable_global_interrupts
eret
2: b 2b
END(_handle_interrupt)
LEAF(_handle_cache_error)

View File

@@ -54,14 +54,19 @@ void mips_cpu_timer_enable(void)
void handle_mips_systick(void)
{
/* clear EXL from status */
uint32_t sr = read_c0_status();
sr &= ~0x00000002;
write_c0_status(sr);
/* Call the interrupt entry routine */
atomIntEnter();
/* Call the OS system tick handler */
atomTimerTick();
write_c0_compare(read_c0_count() + COUNTER_TICK_COUNT);
/* Call the interrupt exit routine */
atomIntExit(TRUE);
write_c0_compare(read_c0_count() + COUNTER_TICK_COUNT);
}

View File

@@ -31,7 +31,7 @@
#include <atom.h>
#include <atomport-private.h>
#include <atomport.h>
#include "regs.h"
#include <atomport-asm-macros.h>
#include <string.h>
@@ -58,11 +58,11 @@ void archThreadContextInit (ATOM_TCB *tcb_ptr, void *stack_top,
void (*entry_point)(UINT32),
UINT32 entry_param)
{
#define STORE_VAL(base, reg, val) \
*((uint32_t *)(base + ((reg ## _IDX) * WORD_SIZE))) = (uint32_t)val
uint32_t stack_start = (uint32_t)(stack_top - (WORD_SIZE * (NUM_REGISTERS + 1)));
/* Make space for context saving */
uint32_t stack_start = (uint32_t)(stack_top - (WORD_SIZE * NUM_CTX_REGS));
tcb_ptr->sp_save_ptr = (void *)stack_start;
@@ -75,7 +75,7 @@ void archThreadContextInit (ATOM_TCB *tcb_ptr, void *stack_top,
STORE_VAL(stack_start, s5, 0);
STORE_VAL(stack_start, s6, 0);
STORE_VAL(stack_start, s7, 0);
STORE_VAL(stack_start, cp0_epc, entry_point);
STORE_VAL(stack_start, cp0_epc, 0);
STORE_VAL(stack_start, ra, entry_point);
STORE_VAL(stack_start, a0, entry_param);
}

View File

@@ -72,21 +72,14 @@ extern uint32_t at_preempt_count;
__asm__ __volatile__("di %0\t\n" \
"ehb\t\n" \
:"=r"(status_reg)); \
at_preempt_count++; \
}while(0);
#define CRITICAL_END() \
do { \
at_preempt_count--; \
\
if (at_preempt_count == 0) { \
if (atomCurrentContext()) { \
__asm__ __volatile__("ei %0\t\n" \
"ehb\t\n" \
::"r"(status_reg));\
} \
} \
\
__asm__ __volatile__("mtc0 %0, $12\t\n" \
"nop\t\n" \
"ehb\t\n" \
::"r"(status_reg)); \
}while(0);
/* Uncomment to enable stack-checking */

View File

@@ -101,6 +101,10 @@
#define at_IDX 30
#define zero_IDX 31
#define cp0_epc_IDX 32
#define cp0_status_IDX 33
#define cp_cause_IDX 34
#define NUM_CTX_REGS 35
#define CP0_INDEX $0
#define CP0_RANDOM $1