Atomthreads working on MIPS (Qemu MIPS Machine).

Signed-off-by: Himanshu Chauhan <hschauhan@nulltrace.org>
This commit is contained in:
Himanshu Chauhan
2011-05-25 22:31:13 +05:30
parent 83841d2673
commit 4e2b83c36f
7 changed files with 230 additions and 128 deletions

1
ports/mips/.gdbinit Normal file
View File

@@ -0,0 +1 @@
target remote localhost:1234

View File

@@ -63,17 +63,9 @@ _name: \
#define LOAD_REG(reg, treg) \
lw reg, ((reg ## _IDX) * 4)(treg)
#define SAVE_INT_CONTEXT(_int_sp) \
move k0, sp; \
la sp, _int_sp; \
addiu sp, sp, -((CPU_USER_REG_COUNT + 1)* 4); \
#define SAVE_INT_CONTEXT \
addiu sp, sp, -((NUM_REGISTERS + 1)* 4); \
mfc0 k1, CP0_EPC; \
SAVE_REG(v0,sp); \
SAVE_REG(v1,sp); \
SAVE_REG(a0,sp); \
SAVE_REG(a1,sp); \
SAVE_REG(a2,sp); \
SAVE_REG(a3,sp); \
SAVE_REG(t0,sp); \
SAVE_REG(t1,sp); \
SAVE_REG(t2,sp); \
@@ -82,6 +74,14 @@ _name: \
SAVE_REG(t5,sp); \
SAVE_REG(t6,sp); \
SAVE_REG(t7,sp); \
SAVE_REG(t8,sp); \
SAVE_REG(t9,sp); \
SAVE_REG(v0,sp); \
SAVE_REG(v1,sp); \
SAVE_REG(a0,sp); \
SAVE_REG(a1,sp); \
SAVE_REG(a2,sp); \
SAVE_REG(a3,sp); \
SAVE_REG(s0,sp); \
SAVE_REG(s1,sp); \
SAVE_REG(s2,sp); \
@@ -90,45 +90,43 @@ _name: \
SAVE_REG(s5,sp); \
SAVE_REG(s6,sp); \
SAVE_REG(s7,sp); \
SAVE_REG(t8,sp); \
SAVE_REG(t9,sp); \
SAVE_REG(gp,sp); \
SAVE_REG(s8,sp); \
SAVE_REG(ra,sp); \
sw k0, (sp_IDX * 4)(sp); \
sw k1, (CPU_USER_REG_COUNT * 4)(sp);
sw k1, (NUM_REGISTERS * 4)(sp);
#define RESTORE_INT_CONTEXT(treg) \
lw k1, (CPU_USER_REG_COUNT * 4)(treg); \
#define RESTORE_INT_CONTEXT \
lw k1, (NUM_REGISTERS * 4)(sp); \
mtc0 k1, CP0_EPC; \
LOAD_REG(v0,treg); \
LOAD_REG(v1,treg); \
LOAD_REG(a0,treg); \
LOAD_REG(a1,treg); \
LOAD_REG(a2,treg); \
LOAD_REG(a3,treg); \
LOAD_REG(t0,treg); \
LOAD_REG(t1,treg); \
LOAD_REG(t2,treg); \
LOAD_REG(t3,treg); \
LOAD_REG(t4,treg); \
LOAD_REG(t5,treg); \
LOAD_REG(t6,treg); \
LOAD_REG(t7,treg); \
LOAD_REG(s0,treg); \
LOAD_REG(s1,treg); \
LOAD_REG(s2,treg); \
LOAD_REG(s3,treg); \
LOAD_REG(s4,treg); \
LOAD_REG(s5,treg); \
LOAD_REG(s6,treg); \
LOAD_REG(s7,treg); \
LOAD_REG(t8,treg); \
LOAD_REG(t9,treg); \
LOAD_REG(gp,treg); \
LOAD_REG(ra,treg); \
LOAD_REG(s8,treg); \
lw sp, (sp_IDX * 4)(treg);
LOAD_REG(s0,sp); \
LOAD_REG(s1,sp); \
LOAD_REG(s2,sp); \
LOAD_REG(s3,sp); \
LOAD_REG(s4,sp); \
LOAD_REG(s5,sp); \
LOAD_REG(s6,sp); \
LOAD_REG(s7,sp); \
LOAD_REG(v0,sp); \
LOAD_REG(v1,sp); \
LOAD_REG(a0,sp); \
LOAD_REG(a1,sp); \
LOAD_REG(a2,sp); \
LOAD_REG(a3,sp); \
LOAD_REG(t0,sp); \
LOAD_REG(t1,sp); \
LOAD_REG(t2,sp); \
LOAD_REG(t3,sp); \
LOAD_REG(t4,sp); \
LOAD_REG(t5,sp); \
LOAD_REG(t6,sp); \
LOAD_REG(t7,sp); \
LOAD_REG(t8,sp); \
LOAD_REG(t9,sp); \
LOAD_REG(gp,sp); \
LOAD_REG(ra,sp); \
LOAD_REG(s8,sp); \
lw sp, (sp_IDX * 4)(sp);
#endif /* __ASSEMBLY__ */

View File

@@ -31,6 +31,7 @@
.section .text
.extern atomCurrentContext
/**
* Function that performs the contextSwitch. Whether its a voluntary release
* of CPU by thread or a pre-emption, under both conditions this function is
@@ -40,6 +41,18 @@
*/
.globl archContextSwitch
archContextSwitch:
/*
* Check if we are being called in interrupt
* context. If yes, we need to restore complete
* context and return directly from here.
*/
move k0, ra
bal atomCurrentContext
nop
beq v0, zero, __in_int_context
nop
move ra, k0
move v0, a0 /* return old tcb when we return from here */
lw k0, 0(a0) /* assume that sp_save_ptr is always at base of ATOM_TCB */
sw s0, (s0_IDX * 4)(k0)
@@ -53,13 +66,22 @@ archContextSwitch:
sw s8, (s8_IDX * 4)(k0)
sw sp, (sp_IDX * 4)(k0)
sw gp, (gp_IDX * 4)(k0)
mfc0 k1, CP0_EPC
nop
nop
nop
sw k1, (ra_IDX * 4)(k0)
sw ra, (ra_IDX * 4)(k0)
/*
* We are saving registers in non-interrupt context because
* a thread probably is trying to yield CPU. Storing zero
* in EPC offset differentiates this. When restoring the
* context, if EPC offset has zero we will restore only
* the partial context. Rest will be done by GCC while
* unwinding the call.
*/
sw zero, (cp0_epc_IDX * 4)(k0)
lw k1, 0(a1)
lw k0, (cp0_epc_IDX * 4)(k1)
bnez k0, __unwind_int_context
nop
lw s0, (s0_IDX * 4)(k1)
lw s1, (s1_IDX * 4)(k1)
lw s2, (s2_IDX * 4)(k1)
@@ -71,15 +93,61 @@ archContextSwitch:
lw s8, (s8_IDX * 4)(k1)
lw sp, (sp_IDX * 4)(k1)
lw gp, (gp_IDX * 4)(k1)
lw k0, (ra_IDX * 4)(k1)
mtc0 k0, CP0_EPC
nop
nop
nop
lw ra, (ra_IDX * 4)(k1)
jr ra
nop
__in_int_context:
move ra, k0
/*
* In interrupt context, the interrupt handler
* saves the context for us. Its very well there
* and we don't need to do it again.
*
* We will figure out of the task that we are
* switching in was saved in interrupt context
* or otherwise.
*/
lw k0, (cp0_epc_IDX * 4)(k1)
bnez k0, __unwind_int_context
nop
/*
* Unwinding a task switched in non-interrupt context.
* So, restore only the partials. But since we are in
* interrupt mode, we will put ra in epc and do a eret
* so that we get out of interrupt mode and switch to
* the new task.
*/
__unwind_non_int_context:
lw s0, (s0_IDX * 4)(k1)
lw s1, (s1_IDX * 4)(k1)
lw s2, (s2_IDX * 4)(k1)
lw s3, (s3_IDX * 4)(k1)
lw s4, (s4_IDX * 4)(k1)
lw s5, (s5_IDX * 4)(k1)
lw s6, (s6_IDX * 4)(k1)
lw s7, (s7_IDX * 4)(k1)
lw s8, (s8_IDX * 4)(k1)
lw sp, (sp_IDX * 4)(k1)
lw gp, (gp_IDX * 4)(k1)
lw ra, (ra_IDX * 4)(k1)
mtc0 ra, CP0_EPC
nop
nop
nop
j __ret_from_switch
nop
__unwind_int_context:
move sp, k1
RESTORE_INT_CONTEXT
__ret_from_switch:
enable_global_interrupts
eret
/**
* archFirstThreadRestore(ATOM_TCB *new_tcb)
*

View File

@@ -89,13 +89,33 @@ LEAF(_handle_interrupt)
and k0, k1, k0
beq k0, zero, 1f
nop
move k0, ra
move k1, v0
bal atomCurrentContext
nop
beq v0, zero, 2f /* v0 should be current context */
nop
move ra, k0
lw k0, 0(v0)
move v0, k1
move k1, k0
/*
* Note that we aren't loading any new SP. Context
* will be save on the interrupted threads' stack.
*/
move k0, sp
la sp, _int_stack
move sp, k1
SAVE_INT_CONTEXT
bal handle_mips_systick
nop
RESTORE_INT_CONTEXT
1:
enable_global_interrupts
eret
2: b 2b
END(_handle_interrupt)
LEAF(_handle_cache_error)

View File

@@ -67,76 +67,77 @@
#define NUM_REGISTERS 32
#define WORD_SIZE 4
#define v0_IDX 0
#define v1_IDX 1
#define a0_IDX 2
#define a1_IDX 3
#define a2_IDX 4
#define a3_IDX 5
#define t0_IDX 6
#define t1_IDX 7
#define t2_IDX 8
#define t3_IDX 9
#define t4_IDX 10
#define t5_IDX 11
#define t6_IDX 12
#define t7_IDX 13
#define s0_IDX 14
#define s1_IDX 15
#define s2_IDX 16
#define s3_IDX 17
#define s4_IDX 18
#define s5_IDX 19
#define s6_IDX 20
#define s7_IDX 21
#define t8_IDX 22
#define t9_IDX 23
#define sp_IDX 24
#define gp_IDX 25
#define s8_IDX 26
#define ra_IDX 27
#define k0_IDX 28
#define k1_IDX 29
#define at_IDX 30
#define zero_IDX 31
#define v0_IDX 0
#define v1_IDX 1
#define a0_IDX 2
#define a1_IDX 3
#define a2_IDX 4
#define a3_IDX 5
#define t0_IDX 6
#define t1_IDX 7
#define t2_IDX 8
#define t3_IDX 9
#define t4_IDX 10
#define t5_IDX 11
#define t6_IDX 12
#define t7_IDX 13
#define s0_IDX 14
#define s1_IDX 15
#define s2_IDX 16
#define s3_IDX 17
#define s4_IDX 18
#define s5_IDX 19
#define s6_IDX 20
#define s7_IDX 21
#define t8_IDX 22
#define t9_IDX 23
#define sp_IDX 24
#define gp_IDX 25
#define s8_IDX 26
#define ra_IDX 27
#define k0_IDX 28
#define k1_IDX 29
#define at_IDX 30
#define zero_IDX 31
#define cp0_epc_IDX 32
#define CP0_INDEX $0
#define CP0_RANDOM $1
#define CP0_ENTRYLO0 $2
#define CP0_ENTRYLO1 $3
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
#define CP0_WIRED $6
#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_COUNT $9
#define CP0_ENTRYHI $10
#define CP0_COMPARE $11
#define CP0_STATUS $12
#define CP0_INTCTL $12,1
#define CP0_SRSCTL $12,2
#define CP0_SRSMAP $12,3
#define CP0_CAUSE $13
#define CP0_EPC $14
#define CP0_PRID $15
#define CP0_EBASE $15,1
#define CP0_CONFIG $16
#define CP0_CONFIG1 $16,1
#define CP0_CONFIG2 $16,2
#define CP0_CONFIG3 $16,3
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
#define CP0_DEBUG $23
#define CP0_DEPC $24
#define CP0_PERFCTL $25,0
#define CP0_PERFCNT $25,1
#define CP0_ECC $26
#define CP0_CACHEERR $27
#define CP0_TAGLO $28
#define CP0_DATALO $28,1
#define CP0_TAGHI $29
#define CP0_DATAHI $29,1
#define CP0_ERRORPC $30
#define CP0_INDEX $0
#define CP0_RANDOM $1
#define CP0_ENTRYLO0 $2
#define CP0_ENTRYLO1 $3
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
#define CP0_WIRED $6
#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_COUNT $9
#define CP0_ENTRYHI $10
#define CP0_COMPARE $11
#define CP0_STATUS $12
#define CP0_INTCTL $12,1
#define CP0_SRSCTL $12,2
#define CP0_SRSMAP $12,3
#define CP0_CAUSE $13
#define CP0_EPC $14
#define CP0_PRID $15
#define CP0_EBASE $15,1
#define CP0_CONFIG $16
#define CP0_CONFIG1 $16,1
#define CP0_CONFIG2 $16,2
#define CP0_CONFIG3 $16,3
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
#define CP0_DEBUG $23
#define CP0_DEPC $24
#define CP0_PERFCTL $25,0
#define CP0_PERFCNT $25,1
#define CP0_ECC $26
#define CP0_CACHEERR $27
#define CP0_TAGLO $28
#define CP0_DATALO $28,1
#define CP0_TAGHI $29
#define CP0_DATAHI $29,1
#define CP0_ERRORPC $30
#endif /* __ATOMPORT_PRIVATE_H_ */

View File

@@ -57,7 +57,7 @@ void archThreadContextInit (ATOM_TCB *tcb_ptr, void *stack_top,
#define STORE_VAL(base, reg, val) \
*((uint32_t *)(base + ((reg ## _IDX) * WORD_SIZE))) = (uint32_t)val
uint32_t stack_start = (uint32_t)(stack_top - (WORD_SIZE * NUM_REGISTERS));
uint32_t stack_start = (uint32_t)(stack_top - (WORD_SIZE * (NUM_REGISTERS + 1)));
tcb_ptr->sp_save_ptr = (void *)stack_start;
@@ -70,6 +70,7 @@ void archThreadContextInit (ATOM_TCB *tcb_ptr, void *stack_top,
STORE_VAL(stack_start, s5, 0);
STORE_VAL(stack_start, s6, 0);
STORE_VAL(stack_start, s7, 0);
STORE_VAL(stack_start, cp0_epc, entry_point);
STORE_VAL(stack_start, ra, entry_point);
STORE_VAL(stack_start, a0, entry_param);
}

View File

@@ -41,9 +41,22 @@
#define POINTER void *
/* Critical region protection */
#define CRITICAL_STORE
#define CRITICAL_START() __asm__ __volatile__("di $0\n\t")
#define CRITICAL_END() __asm__ __volatile__("ei $0\n\t");
#define CRITICAL_STORE unsigned int status_reg
#define CRITICAL_START() \
__asm__ __volatile__("di %0\t\n" \
"ssnop\t\n" \
"ssnop\t\n" \
"ssnop\t\n" \
"ehb\t\n" \
:"=r"(status_reg));
#define CRITICAL_END() \
__asm__ __volatile__("ei %0\t\n" \
"ssnop\t\n" \
"ssnop\t\n" \
"ssnop\t\n" \
"ehb\t\n" \
::"r"(status_reg));
/* Uncomment to enable stack-checking */
/* #define ATOM_STACK_CHECKING */