B.Robinson MIPS patch

This commit is contained in:
Greg Menke
2006-06-08 18:03:55 +00:00
parent 549e88f623
commit 7c99007641
11 changed files with 252 additions and 189 deletions

View File

@@ -1,7 +1,13 @@
2006-06-08 Bruce Robinson <brucer@pmccorp.com>
* Makefile.am: add interruptmask.c
* shared/interrupts/interuptmask.c: TX49 conditional mask computation
* shared/interrupts/vectorexceptions.c: Corrections to exception codes
& descriptions
2006-05-16 Ralf Corsépius <ralf.corsepius@rtems.org>
* configure.ac: Use RTEMS_AMPOLISH3.
2006-04-02 Ralf Corsépius <ralf.corsepius@rtems.org>
* Makefile.am: Remove bogus CFLAGS_OPTIMIZE_V.

View File

@@ -41,7 +41,8 @@ endif
noinst_PROGRAMS += shared/interrupts.rel
shared_interrupts_rel_SOURCES = shared/interrupts/installisrentries.c \
shared/interrupts/vectorexceptions.c shared/interrupts/isr_entries.S
shared/interrupts/vectorexceptions.c shared/interrupts/interruptmask.c \
shared/interrupts/isr_entries.S
shared_interrupts_rel_CPPFLAGS = $(AM_CPPFLAGS) $(interrupts_CPPFLAGS)
shared_interrupts_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)

View File

@@ -0,0 +1,24 @@
/*
* $Id: interruptmask.c,v 1.0 2006/04/04 05:18:49
*/
#include <rtems.h>
/*
* This function returns a mask value which is used to select the bits
* in the processor status register that can be set to enable interrupts.
* The mask value should not include the 2 software interrupt enable bits.
*/
uint32_t mips_interrupt_mask( void )
{
uint32_t interrupt_mask;
#ifdef TX49
interrupt_mask = 0x00000400; /* Toshiba TX49 processors have a non-standard interrupt mask */
#else
interrupt_mask = 0x0000fc00;
#endif
return(interrupt_mask);
}

View File

@@ -24,17 +24,18 @@ static const char *cause_strings[32] =
/* 5 */ "Address Store",
/* 6 */ "Instruction Bus Error",
/* 7 */ "Data Bus Error",
/* 9 */ "Syscall",
/* 10 */ "Breakpoint",
/* 11 */ "Reserved Instruction",
/* 12 */ "Coprocessor Unuseable",
/* 13 */ "Overflow",
/* 14 */ "Trap",
/* 15 */ "Instruction Virtual Coherency Error",
/* 16 */ "FP Exception",
/* 8 */ "Syscall",
/* 9 */ "Breakpoint",
/* 10 */ "Reserved Instruction",
/* 11 */ "Coprocessor Unuseable",
/* 12 */ "Overflow",
/* 13 */ "Trap",
/* 14 */ "Instruction Virtual Coherency Error",
/* 15 */ "FP Exception",
/* 16 */ "Reserved 16",
/* 17 */ "Reserved 17",
/* 18 */ "Reserved 17",
/* 19 */ "Reserved 17",
/* 18 */ "Reserved 18",
/* 19 */ "Reserved 19",
/* 20 */ "Reserved 20",
/* 21 */ "Reserved 21",
/* 22 */ "Reserved 22",

View File

@@ -1,3 +1,11 @@
2006-06-08 Bruce Robinson <brucer@pmccorp.com>
* cpu.c: Add int64 types for __mips==3 cpus, incorporate
mips_interrupt_mask() into mask computations
* cpu_asm.S: Add int64 register save/restores for __mips==3 cpus. Adjustment
of mips1 vs mips3 macros.
* cpu.h: Add int64 types for __mips==3 cpus.
2006-03-17 Ralf Corsepius <ralf.corsepius@rtems.org>
* cpu.c (_CPU_Initialize): Add fpu initialization.

View File

@@ -50,11 +50,18 @@
/*
** local dword used in cpu_asm to pass the exception stack frame to the
** context switch code.
** Exception stack frame pointer used in cpu_asm to pass the exception stack frame
** address to the context switch code.
*/
unsigned __exceptionStackFrame = 0;
#if (__mips == 1)
typedef uint32_t ESF_PTR_TYPE;
#elif (__mips == 3)
typedef uint64_t ESF_PTR_TYPE;
#else
#error "unknown MIPS ISA"
#endif
ESF_PTR_TYPE __exceptionStackFrame = 0;
@@ -107,11 +114,11 @@ uint32_t _CPU_ISR_Get_level( void )
#if (__mips == 3) || (__mips == 32)
/* IE bit and shift down hardware ints into bits 1 thru 6 */
sr = (sr & SR_IE) | ((sr & 0xfc00) >> 9);
sr = (sr & SR_IE) | ((sr & mips_interrupt_mask()) >> 9);
#elif __mips == 1
/* IEC bit and shift down hardware ints into bits 1 thru 6 */
sr = (sr & SR_IEC) | ((sr & 0xfc00) >> 9);
sr = (sr & SR_IEC) | ((sr & mips_interrupt_mask()) >> 9);
#else
#error "CPU ISR level: unknown MIPS level for SR handling"
@@ -142,8 +149,8 @@ void _CPU_ISR_Set_level( uint32_t new_level )
srbits = sr & ~(0xfc00 | SR_IE);
sr = srbits | ((new_level==0)? (0xfc00 | SR_IE): \
(((new_level<<9) & 0xfc00) | \
sr = srbits | ((new_level==0)? (mips_interrupt_mask() | SR_IE): \
(((new_level<<9) & mips_interrupt_mask()) | \
((new_level & 1)?SR_IE:0)));
/*
if ( (new_level & SR_EXL) == (sr & SR_EXL) )

View File

@@ -92,18 +92,20 @@
#if __mips == 3
/* 64 bit register operations */
#define NOP nop
/*
#define ADD dadd
#define MFCO dmfc0
#define MTCO dmtc0
*/
#define ADD add
#define MFCO mfc0
#define MTCO mtc0
#define STREG sd
#define LDREG ld
#define MFCO dmfc0 /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
#define MTCO dmtc0 /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
#define ADDU addu
#define ADDIU addiu
#if (__mips_fpr==32)
#define STREGC1 swc1
#define LDREGC1 lwc1
#elif (__mips_fpr==64) /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
#define STREGC1 sdc1
#define LDREGC1 ldc1
#endif
#define R_SZ 8
#define F_SZ 8
#define SZ_INT 8
@@ -121,6 +123,8 @@
#define MTCO mtc0
#define ADDU add
#define ADDIU addi
#define STREGC1 swc1
#define LDREGC1 lwc1
#define R_SZ 4
#define F_SZ 4
#define SZ_INT 4
@@ -223,20 +227,20 @@ FRAME(_CPU_Context_save_fp,sp,0,ra)
** is here because the FPU context switch might occur when an
** integer task is switching out with a FP task switching in.
*/
MFC0 t0,C0_SR
mfc0 t0,C0_SR
li t2,SR_CU1
move t1,t0
or t0,t2 /* turn on the fpu */
#if __mips == 3
li t2,SR_EXL | SR_IE
#if (__mips == 3) || (__mips == 32)
li t2,SR_IE
#elif __mips == 1
li t2,SR_IEC
#endif
not t2
and t0,t2 /* turn off interrupts */
MTC0 t0,C0_SR
mtc0 t0,C0_SR
ld a1,(a0)
lw a1,(a0) /* get address of context storage area */
move t0,ra
jal _CPU_Context_save_fp_from_exception
NOP
@@ -244,44 +248,44 @@ FRAME(_CPU_Context_save_fp,sp,0,ra)
/*
** Reassert the task's state because we've not saved it yet.
*/
MTC0 t1,C0_SR
mtc0 t1,C0_SR
j t0
NOP
.globl _CPU_Context_save_fp_from_exception
_CPU_Context_save_fp_from_exception:
swc1 $f0,FP0_OFFSET*F_SZ(a1)
swc1 $f1,FP1_OFFSET*F_SZ(a1)
swc1 $f2,FP2_OFFSET*F_SZ(a1)
swc1 $f3,FP3_OFFSET*F_SZ(a1)
swc1 $f4,FP4_OFFSET*F_SZ(a1)
swc1 $f5,FP5_OFFSET*F_SZ(a1)
swc1 $f6,FP6_OFFSET*F_SZ(a1)
swc1 $f7,FP7_OFFSET*F_SZ(a1)
swc1 $f8,FP8_OFFSET*F_SZ(a1)
swc1 $f9,FP9_OFFSET*F_SZ(a1)
swc1 $f10,FP10_OFFSET*F_SZ(a1)
swc1 $f11,FP11_OFFSET*F_SZ(a1)
swc1 $f12,FP12_OFFSET*F_SZ(a1)
swc1 $f13,FP13_OFFSET*F_SZ(a1)
swc1 $f14,FP14_OFFSET*F_SZ(a1)
swc1 $f15,FP15_OFFSET*F_SZ(a1)
swc1 $f16,FP16_OFFSET*F_SZ(a1)
swc1 $f17,FP17_OFFSET*F_SZ(a1)
swc1 $f18,FP18_OFFSET*F_SZ(a1)
swc1 $f19,FP19_OFFSET*F_SZ(a1)
swc1 $f20,FP20_OFFSET*F_SZ(a1)
swc1 $f21,FP21_OFFSET*F_SZ(a1)
swc1 $f22,FP22_OFFSET*F_SZ(a1)
swc1 $f23,FP23_OFFSET*F_SZ(a1)
swc1 $f24,FP24_OFFSET*F_SZ(a1)
swc1 $f25,FP25_OFFSET*F_SZ(a1)
swc1 $f26,FP26_OFFSET*F_SZ(a1)
swc1 $f27,FP27_OFFSET*F_SZ(a1)
swc1 $f28,FP28_OFFSET*F_SZ(a1)
swc1 $f29,FP29_OFFSET*F_SZ(a1)
swc1 $f30,FP30_OFFSET*F_SZ(a1)
swc1 $f31,FP31_OFFSET*F_SZ(a1)
STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
cfc1 a0,$31 /* Read FP status/conrol reg */
cfc1 a0,$31 /* Two reads clear pipeline */
NOP
@@ -322,20 +326,20 @@ FRAME(_CPU_Context_restore_fp,sp,0,ra)
** is here because the FPU context switch might occur when an
** integer task is switching out with a FP task switching in.
*/
MFC0 t0,C0_SR
mfc0 t0,C0_SR
li t2,SR_CU1
move t1,t0
or t0,t2 /* turn on the fpu */
#if __mips == 3
li t2,SR_EXL | SR_IE
#if (__mips == 3) || (__mips == 32)
li t2,SR_IE
#elif __mips == 1
li t2,SR_IEC
#endif
not t2
and t0,t2 /* turn off interrupts */
MTC0 t0,C0_SR
mtc0 t0,C0_SR
ld a1,(a0)
lw a1,(a0) /* get address of context storage area */
move t0,ra
jal _CPU_Context_restore_fp_from_exception
NOP
@@ -344,49 +348,49 @@ FRAME(_CPU_Context_restore_fp,sp,0,ra)
** Reassert the old task's state because we've not restored the
** new one yet.
*/
MTC0 t1,C0_SR
mtc0 t1,C0_SR
j t0
NOP
.globl _CPU_Context_restore_fp_from_exception
_CPU_Context_restore_fp_from_exception:
lwc1 $f0,FP0_OFFSET*4(a1)
lwc1 $f1,FP1_OFFSET*4(a1)
lwc1 $f2,FP2_OFFSET*4(a1)
lwc1 $f3,FP3_OFFSET*4(a1)
lwc1 $f4,FP4_OFFSET*4(a1)
lwc1 $f5,FP5_OFFSET*4(a1)
lwc1 $f6,FP6_OFFSET*4(a1)
lwc1 $f7,FP7_OFFSET*4(a1)
lwc1 $f8,FP8_OFFSET*4(a1)
lwc1 $f9,FP9_OFFSET*4(a1)
lwc1 $f10,FP10_OFFSET*4(a1)
lwc1 $f11,FP11_OFFSET*4(a1)
lwc1 $f12,FP12_OFFSET*4(a1)
lwc1 $f13,FP13_OFFSET*4(a1)
lwc1 $f14,FP14_OFFSET*4(a1)
lwc1 $f15,FP15_OFFSET*4(a1)
lwc1 $f16,FP16_OFFSET*4(a1)
lwc1 $f17,FP17_OFFSET*4(a1)
lwc1 $f18,FP18_OFFSET*4(a1)
lwc1 $f19,FP19_OFFSET*4(a1)
lwc1 $f20,FP20_OFFSET*4(a1)
lwc1 $f21,FP21_OFFSET*4(a1)
lwc1 $f22,FP22_OFFSET*4(a1)
lwc1 $f23,FP23_OFFSET*4(a1)
lwc1 $f24,FP24_OFFSET*4(a1)
lwc1 $f25,FP25_OFFSET*4(a1)
lwc1 $f26,FP26_OFFSET*4(a1)
lwc1 $f27,FP27_OFFSET*4(a1)
lwc1 $f28,FP28_OFFSET*4(a1)
lwc1 $f29,FP29_OFFSET*4(a1)
lwc1 $f30,FP30_OFFSET*4(a1)
lwc1 $f31,FP31_OFFSET*4(a1)
LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
cfc1 a0,$31 /* Read from FP status/control reg */
cfc1 a0,$31 /* Two reads clear pipeline */
NOP /* NOPs ensure execution */
NOP
lw a0,FPCS_OFFSET*4(a1) /* Load saved FPCS value */
lw a0,FPCS_OFFSET*F_SZ(a1) /* Load saved FPCS value */
NOP
ctc1 a0,$31 /* Restore FPCS register */
NOP
@@ -410,7 +414,7 @@ ENDFRAME(_CPU_Context_restore_fp)
FRAME(_CPU_Context_switch,sp,0,ra)
.set noreorder
MFC0 t0,C0_SR
mfc0 t0,C0_SR
#if (__mips == 3) || (__mips == 32)
li t1,SR_IE
#elif __mips == 1
@@ -419,7 +423,7 @@ FRAME(_CPU_Context_switch,sp,0,ra)
STREG t0,C0_SR_OFFSET*R_SZ(a0) /* save the task's SR */
not t1
and t0,t1 /* mask off interrupts while we context switch */
MTC0 t0,C0_SR
mtc0 t0,C0_SR
NOP
STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */
@@ -455,7 +459,7 @@ FRAME(_CPU_Context_switch,sp,0,ra)
NOP
LDREG t0,R_EPC*R_SZ(t1) /* get the userspace EPC from the frame */
b 2f
nop
NOP
1: la t0,_Thread_Dispatch /* if ==0, we're switched out */
@@ -478,7 +482,7 @@ _CPU_Context_switch_restore:
LDREG t0, C0_SR_OFFSET*R_SZ(a1)
/* NOP */
/*#if __mips == 3 */
/*#if (__mips == 3) || (__mips == 32) */
/* andi t0,SR_EXL */
/* bnez t0,_CPU_Context_1 */ /* set exception level from restore context */
/* li t0,~SR_EXL */
@@ -530,7 +534,7 @@ _CPU_Context_switch_restore:
/*
** Save IE
*/
or t2, SR_IE
or t2,SR_IE
#elif __mips == 1
/*
** Save current, previous & old int enables. This is key because
@@ -543,11 +547,11 @@ _CPU_Context_switch_restore:
#endif
and t0,t2 /* keep only the per-task bits */
MFC0 t1,C0_SR /* grab the current SR */
mfc0 t1,C0_SR /* grab the current SR */
not t2
and t1,t2 /* mask off the old task's per-task bits */
or t1,t0 /* or in the new task's bits */
MTC0 t1,C0_SR /* and load the new SR */
mtc0 t1,C0_SR /* and load the new SR */
NOP
/* _CPU_Context_1: */
@@ -578,11 +582,11 @@ FRAME(_CPU_Context_restore,sp,0,ra)
ENDFRAME(_CPU_Context_restore)
ASM_EXTERN(_ISR_Nest_level, SZ_INT)
ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
ASM_EXTERN(_Thread_Executing,SZ_INT)
ASM_EXTERN(_ISR_Nest_level,4)
ASM_EXTERN(_Thread_Dispatch_disable_level,4)
ASM_EXTERN(_Context_Switch_necessary,4)
ASM_EXTERN(_ISR_Signals_to_thread_executing,4)
ASM_EXTERN(_Thread_Executing,4)
.extern _Thread_Dispatch
.extern _ISR_Vector_table
@@ -678,8 +682,8 @@ FRAME(_ISR_Handler,sp,0,ra)
STREG AT, R_AT*R_SZ(sp)
.set at
MFC0 t0,C0_SR
MFC0 t1,C0_EPC
mfc0 t0,C0_SR
MFCO t1,C0_EPC
STREG t0,R_SR*R_SZ(sp)
STREG t1,R_EPC*R_SZ(sp)
@@ -692,7 +696,7 @@ FRAME(_ISR_Handler,sp,0,ra)
/* determine if an interrupt generated this exception */
MFC0 t0,C0_CAUSE
mfc0 t0,C0_CAUSE
NOP
and t1,t0,CAUSE_EXCMASK
@@ -725,9 +729,9 @@ _ISR_Handler_Exception:
/* CP0 special registers */
#if __mips == 1
MFC0 t0,C0_TAR
mfc0 t0,C0_TAR
#endif
MFC0 t1,C0_BADVADDR
MFCO t1,C0_BADVADDR
#if __mips == 1
STREG t0,R_TAR*R_SZ(sp)
@@ -737,7 +741,7 @@ _ISR_Handler_Exception:
STREG t1,R_BADVADDR*R_SZ(sp)
#if ( CPU_HARDWARE_FP == TRUE )
MFC0 t0,C0_SR /* we have a FPU, save state if enabled */
mfc0 t0,C0_SR /* FPU is enabled, save state */
NOP
srl t0,t0,16
andi t0,t0,(SR_CU1 >> 16)
@@ -747,8 +751,8 @@ _ISR_Handler_Exception:
la a1,R_F0*R_SZ(sp)
jal _CPU_Context_save_fp_from_exception
NOP
MFC1 t0,C1_REVISION
MFC1 t1,C1_STATUS
mfc1 t0,C1_REVISION
mfc1 t1,C1_STATUS
STREG t0,R_FEIR*R_SZ(sp)
STREG t1,R_FCSR*R_SZ(sp)
@@ -829,7 +833,7 @@ excreturn:
#if ( CPU_HARDWARE_FP == TRUE )
MFC0 t0,C0_SR /* FPU is present, restore state if enabled */
mfc0 t0,C0_SR /* FPU is enabled, restore state */
NOP
srl t0,t0,16
andi t0,t0,(SR_CU1 >> 16)
@@ -841,8 +845,8 @@ excreturn:
NOP
LDREG t0,R_FEIR*R_SZ(sp)
LDREG t1,R_FCSR*R_SZ(sp)
MTC1 t0,C1_REVISION
MTC1 t1,C1_STATUS
mtc1 t0,C1_REVISION
mtc1 t1,C1_STATUS
2:
#endif
LDREG s0,R_S0*R_SZ(sp) /* restore s0 - s7 */
@@ -870,7 +874,7 @@ excreturn:
_ISR_Handler_1:
MFC0 t1,C0_SR
mfc0 t1,C0_SR
and t0,CAUSE_IPMASK
and t0,t1
@@ -879,8 +883,7 @@ _ISR_Handler_1:
/* Then where did it come from??? */
beq t0,zero,_ISR_Handler_exit
nop
NOP
/*
@@ -893,20 +896,21 @@ _ISR_Handler_1:
* #endif
*/
/*
* _ISR_Nest_level++;
*/
LDREG t0,_ISR_Nest_level
lw t0,_ISR_Nest_level
NOP
ADD t0,t0,1
STREG t0,_ISR_Nest_level
add t0,t0,1
sw t0,_ISR_Nest_level
/*
* _Thread_Dispatch_disable_level++;
*/
LDREG t1,_Thread_Dispatch_disable_level
lw t1,_Thread_Dispatch_disable_level
NOP
ADD t1,t1,1
STREG t1,_Thread_Dispatch_disable_level
add t1,t1,1
sw t1,_Thread_Dispatch_disable_level
/*
* Call the CPU model or BSP specific routine to decode the
@@ -932,17 +936,17 @@ _ISR_Handler_1:
/*
* --_ISR_Nest_level;
*/
LDREG t2,_ISR_Nest_level
lw t2,_ISR_Nest_level
NOP
ADD t2,t2,-1
STREG t2,_ISR_Nest_level
add t2,t2,-1
sw t2,_ISR_Nest_level
/*
* --_Thread_Dispatch_disable_level;
*/
LDREG t1,_Thread_Dispatch_disable_level
lw t1,_Thread_Dispatch_disable_level
NOP
ADD t1,t1,-1
STREG t1,_Thread_Dispatch_disable_level
add t1,t1,-1
sw t1,_Thread_Dispatch_disable_level
/*
* if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
* goto the label "exit interrupt (simple case)"
@@ -952,8 +956,6 @@ _ISR_Handler_1:
NOP
/*
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
* restore stack
@@ -962,8 +964,8 @@ _ISR_Handler_1:
* if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
* goto the label "exit interrupt (simple case)"
*/
LDREG t0,_Context_Switch_necessary
LDREG t1,_ISR_Signals_to_thread_executing
lw t0,_Context_Switch_necessary
lw t1,_ISR_Signals_to_thread_executing
NOP
or t0,t0,t1
beq t0,zero,_ISR_Handler_exit
@@ -984,7 +986,7 @@ _ISR_Handler_1:
** that prevents recursive entry into Thread_Dispatch.
*/
MFC0 t0, C0_SR
mfc0 t0, C0_SR
#if __mips == 1
li t1,SR_IEC
@@ -1001,7 +1003,7 @@ _ISR_Handler_1:
or t0, SR_IE
#endif
MTC0 t0, C0_SR
mtc0 t0, C0_SR
NOP
/* save off our stack frame so the context switcher can get to it */
@@ -1023,7 +1025,7 @@ _ISR_Handler_1:
** turn interrupts back off while we restore context so
** a badly timed interrupt won't mess things up
*/
MFC0 t0, C0_SR
mfc0 t0, C0_SR
#if __mips == 1
@@ -1031,30 +1033,29 @@ _ISR_Handler_1:
li t1,SR_IEC | SR_KUP | SR_KUC
not t1
and t0, t1
MTC0 t0, C0_SR
mtc0 t0, C0_SR
NOP
#elif (__mips == 3) || (__mips == 32)
#elif (__mips == 3) || (__mips == 32)
move t2, t0
/* make sure XL & IE are clear so ints are disabled & we can update EPC for the return */
li t1,SR_EXL | SR_IE
/* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
li t1,SR_IE /* Clear IE first (recommended) */
not t1
and t0,t1
MTC0 t0,C0_SR
mtc0 t0,C0_SR
NOP
/* store new EPC value, which we can do since XL=0 */
/* apply task's SR with EXL set so the eret will return properly */
or t0, SR_EXL | SR_IE
mtc0 t0, C0_SR
NOP
/* store new EPC value, which we can do since EXL=0 */
LDREG t0, R_EPC*R_SZ(sp)
NOP
MTC0 t0, C0_EPC
MTCO t0, C0_EPC
NOP
/* apply task's SR with XL set so the eret will return properly */
or t2, SR_EXL
MTC0 t2, C0_SR
NOP
#endif

View File

@@ -442,7 +442,7 @@ typedef struct {
__MIPS_FPU_REGISTER_TYPE fp29;
__MIPS_FPU_REGISTER_TYPE fp30;
__MIPS_FPU_REGISTER_TYPE fp31;
__MIPS_FPU_REGISTER_TYPE fpcs;
uint32_t fpcs;
#endif
} Context_Control_fp;
@@ -766,6 +766,13 @@ extern unsigned int mips_interrupt_number_of_vectors;
#define _CPU_Initialize_vectors()
/*
* Declare the function that is present in the shared libcpu directory,
* that returns the processor dependent interrupt mask.
*/
uint32_t mips_interrupt_mask( void );
/*
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in _level.
@@ -872,12 +879,16 @@ void _CPU_ISR_Set_level( uint32_t ); /* in cpu.c */
#if (__mips == 3) || (__mips == 32)
#define _INTON SR_IE
#if __mips_fpr==64
#define _EXTRABITS SR_FR
#else
#define _EXTRABITS 0
#endif
#endif /* __mips_fpr==64 */
#endif /* __mips == 3 */
#if __mips == 1
#define _INTON SR_IEC
#define _EXTRABITS 0 /* make sure we're in user mode on MIPS1 processors */
#endif
#endif /* __mips == 1 */
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, _isr, _entry_point, _is_fp ) \
{ \
@@ -888,9 +899,8 @@ void _CPU_ISR_Set_level( uint32_t ); /* in cpu.c */
(_the_context)->sp = _stack_tmp; \
(_the_context)->fp = _stack_tmp; \
(_the_context)->ra = (__MIPS_REGISTER_TYPE)_entry_point; \
(_the_context)->c0_sr = ((_intlvl==0)?(0xFF00 | _INTON):( ((_intlvl<<9) & 0xfc00) | \
0x300 | \
((_intlvl & 1)?_INTON:0)) ) | \
(_the_context)->c0_sr = ((_intlvl==0)?(mips_interrupt_mask() | 0x300 | _INTON): \
( ((_intlvl<<9) & mips_interrupt_mask()) | 0x300 | ((_intlvl & 1)?_INTON:0)) ) | \
SR_CU0 | ((_is_fp)?SR_CU1:0) | _EXTRABITS; \
}

View File

@@ -1,3 +1,8 @@
2006-06-08 Bruce Robinson <brucer@pmccorp.com>
* custom/rbtx4925.cfg: Fix CPU_CFLAGS defines.
* custom/rbtx4938.cfg: Fix CPU_CFLAGS defines.
2005-04-10 Eric Norum <norume@aps.anl.gov>
* custom/uC5282.cfg: Keep frame pointer -- gdb is much more useful.

View File

@@ -13,7 +13,7 @@ RTEMS_BSP_FAMILY=rbtx4925
# This contains the compiler options necessary to select the CPU model
# and (hopefully) optimize for it.
CPU_CFLAGS = -mips3 -G0 -EL -DCPU_TX49
CPU_CFLAGS = -mips3 -G0 -EL
# optimize flag: typically -0, could use -O4 or -fast
# -O4 is ok for RTEMS

View File

@@ -13,7 +13,7 @@ RTEMS_BSP_FAMILY=rbtx4938
# This contains the compiler options necessary to select the CPU model
# and (hopefully) optimize for it.
CPU_CFLAGS = -mips3 -G0 -EL -DCPU_TX49
CPU_CFLAGS = -mips3 -G0 -EL
# optimize flag: typically -0, could use -O4 or -fast
# -O4 is ok for RTEMS