forked from Imagelibrary/rtems
2001-02-01 Greg Menke <gregory.menke@gsfc.nasa.gov>
* cpu.c: Enhancements and fixes for modifying the SR when changing the interrupt level. * cpu_asm.S: Fixed handling of FP enable bit so it is properly managed on a per-task basis, improved handling of interrupt levels, and made deferred FP contexts work on the MIPS. * rtems/score/cpu.h: Modified to support above changes.
This commit is contained in:
@@ -1,3 +1,12 @@
|
|||||||
|
2001-02-01 Greg Menke <gregory.menke@gsfc.nasa.gov>
|
||||||
|
|
||||||
|
* cpu.c: Enhancements and fixes for modifying the SR when changing
|
||||||
|
the interrupt level.
|
||||||
|
* cpu_asm.S: Fixed handling of FP enable bit so it is properly
|
||||||
|
managed on a per-task basis, improved handling of interrupt levels,
|
||||||
|
and made deferred FP contexts work on the MIPS.
|
||||||
|
* rtems/score/cpu.h: Modified to support above changes.
|
||||||
|
|
||||||
2002-01-28 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
|
2002-01-28 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
|
||||||
|
|
||||||
* rtems/Makefile.am: Removed.
|
* rtems/Makefile.am: Removed.
|
||||||
|
|||||||
@@ -82,51 +82,66 @@ unsigned32 _CPU_ISR_Get_level( void )
|
|||||||
mips_get_sr(sr);
|
mips_get_sr(sr);
|
||||||
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
return ((sr & SR_EXL) >> 1);
|
/* EXL bit and shift down hardware ints into bits 1 thru 6 */
|
||||||
|
return ((sr & SR_EXL) >> 1) + ((sr & 0xfc00) >> 9);
|
||||||
|
|
||||||
#elif __mips == 1
|
#elif __mips == 1
|
||||||
return ((sr & SR_IEC) ? 0 : 1);
|
/* IEC bit and shift down hardware ints into bits 1 thru 6 */
|
||||||
|
return (sr & SR_IEC) + ((sr & 0xfc00) >> 9);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#error "CPU ISR level: unknown MIPS level for SR handling"
|
#error "CPU ISR level: unknown MIPS level for SR handling"
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void _CPU_ISR_Set_level( unsigned32 new_level )
|
void _CPU_ISR_Set_level( unsigned32 new_level )
|
||||||
{
|
{
|
||||||
unsigned int sr;
|
unsigned int sr, srbits;
|
||||||
|
|
||||||
|
/*
|
||||||
|
** mask off the int level bits only so we can
|
||||||
|
** preserve software int settings and FP enable
|
||||||
|
** for this thread. Note we don't force software ints
|
||||||
|
** enabled when changing level, they were turned on
|
||||||
|
** when this task was created, but may have been turned
|
||||||
|
** off since, so we'll just leave them alone.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
mips_get_sr(sr);
|
mips_get_sr(sr);
|
||||||
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
|
mips_set_sr(sr & ~SR_IE); /* first disable ie bit (recommended) */
|
||||||
|
|
||||||
|
srbits = sr & ~(0xfc00 | SR_EXL | SR_IE);
|
||||||
|
|
||||||
|
sr = srbits | ((new_level==0)? (0xfc00 | SR_EXL | SR_IE): \
|
||||||
|
(((new_level<<9) & 0xfc000) | \
|
||||||
|
(new_level & 1)?(SR_EXL | SR_IE):0));
|
||||||
|
/*
|
||||||
if ( (new_level & SR_EXL) == (sr & SR_EXL) )
|
if ( (new_level & SR_EXL) == (sr & SR_EXL) )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( (new_level & SR_EXL) == 0 ) {
|
if ( (new_level & SR_EXL) == 0 ) {
|
||||||
sr &= ~SR_EXL; /* clear the EXL bit */
|
sr &= ~SR_EXL; * clear the EXL bit *
|
||||||
mips_set_sr(sr);
|
mips_set_sr(sr);
|
||||||
} else {
|
} else {
|
||||||
sr &= ~SR_IE;
|
|
||||||
mips_set_sr(sr); /* first disable ie bit (recommended) */
|
|
||||||
|
|
||||||
sr |= SR_EXL|SR_IE; /* enable exception level */
|
sr |= SR_EXL|SR_IE; * enable exception level *
|
||||||
mips_set_sr(sr); /* first disable ie bit (recommended) */
|
mips_set_sr(sr); * first disable ie bit (recommended) *
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
#elif __mips == 1
|
#elif __mips == 1
|
||||||
if ( (new_level & SR_IEC) == (sr & SR_IEC) )
|
mips_set_sr( (sr & ~SR_IEC) );
|
||||||
return;
|
|
||||||
|
|
||||||
sr &= ~SR_IEC; /* clear the IEC bit */
|
|
||||||
if ( !new_level )
|
|
||||||
sr |= SR_IEC; /* enable interrupts */
|
|
||||||
|
|
||||||
mips_set_sr(sr);
|
|
||||||
|
|
||||||
|
srbits = sr & ~(0xfc00 | SR_IEC);
|
||||||
|
sr = srbits | ((new_level==0)?0xfc01:( ((new_level<<9) & 0xfc000) | (new_level & 1)));
|
||||||
#else
|
#else
|
||||||
#error "CPU ISR level: unknown MIPS level for SR handling"
|
#error "CPU ISR level: unknown MIPS level for SR handling"
|
||||||
#endif
|
#endif
|
||||||
|
mips_set_sr( sr );
|
||||||
}
|
}
|
||||||
|
|
||||||
/*PAGE
|
/*PAGE
|
||||||
|
|||||||
@@ -23,6 +23,13 @@
|
|||||||
* 2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
|
* 2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
|
||||||
* rewriting as much as possible in C and added the JMR3904 BSP
|
* rewriting as much as possible in C and added the JMR3904 BSP
|
||||||
* so testing could be performed on a simulator.
|
* so testing could be performed on a simulator.
|
||||||
|
* 2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
|
||||||
|
* performance, tweaking this code and the isr vectoring routines
|
||||||
|
* to reduce overhead & latencies. Added optional
|
||||||
|
* instrumentation as well.
|
||||||
|
* 2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
|
||||||
|
* cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
|
||||||
|
* and deferred FP contexts.
|
||||||
*
|
*
|
||||||
* COPYRIGHT (c) 1989-2000.
|
* COPYRIGHT (c) 1989-2000.
|
||||||
* On-Line Applications Research Corporation (OAR).
|
* On-Line Applications Research Corporation (OAR).
|
||||||
@@ -42,8 +49,9 @@
|
|||||||
* Mongoose-ism debug tool- but may be handy in the future so we
|
* Mongoose-ism debug tool- but may be handy in the future so we
|
||||||
* left it in...
|
* left it in...
|
||||||
*/
|
*/
|
||||||
/* #define INSTRUMENT */
|
|
||||||
|
|
||||||
|
#define INSTRUMENT_ISR_VECTORING
|
||||||
|
//#define INSTRUMENT_EXECUTING_THREAD
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -165,9 +173,27 @@
|
|||||||
* );
|
* );
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if ( CPU_HARDWARE_FP == TRUE )
|
#if ( CPU_HARDWARE_FP == FALSE )
|
||||||
FRAME(_CPU_Context_save_fp,sp,0,ra)
|
FRAME(_CPU_Context_save_fp,sp,0,ra)
|
||||||
.set noat
|
.set noat
|
||||||
|
|
||||||
|
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
|
||||||
|
/*
|
||||||
|
** Make sure the FPU is on before we save state. This code is here
|
||||||
|
** because the FPU context switch might occur when an integer
|
||||||
|
** task is switching out w/ an FP task switching in, but the current
|
||||||
|
** FPU state was left by a sometime previously scheduled FP task.
|
||||||
|
**
|
||||||
|
** In non-deferred FP context switch, if the exiting task is FP, then
|
||||||
|
** the FPU is already on so we don't need to do this.
|
||||||
|
*/
|
||||||
|
|
||||||
|
MFC0 t0,C0_SR
|
||||||
|
li k0,SR_CU1
|
||||||
|
or t0,k0
|
||||||
|
MTC0 t0,C0_SR
|
||||||
|
#endif
|
||||||
|
|
||||||
ld a1,(a0)
|
ld a1,(a0)
|
||||||
NOP
|
NOP
|
||||||
swc1 $f0,FP0_OFFSET*F_SZ(a1)
|
swc1 $f0,FP0_OFFSET*F_SZ(a1)
|
||||||
@@ -226,9 +252,21 @@ ENDFRAME(_CPU_Context_save_fp)
|
|||||||
* )
|
* )
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if ( CPU_HARDWARE_FP == TRUE )
|
#if ( CPU_HARDWARE_FP == FALSE )
|
||||||
FRAME(_CPU_Context_restore_fp,sp,0,ra)
|
FRAME(_CPU_Context_restore_fp,sp,0,ra)
|
||||||
.set noat
|
.set noat
|
||||||
|
|
||||||
|
/*
|
||||||
|
** Make sure the FPU is on before we retrieve state. This code
|
||||||
|
** is here because the FPU context switch might occur when an
|
||||||
|
** integer task is switching out with a FP task switching in.
|
||||||
|
*/
|
||||||
|
|
||||||
|
MFC0 t0,C0_SR
|
||||||
|
li k0,SR_CU1
|
||||||
|
or t0,k0
|
||||||
|
MTC0 t0,C0_SR
|
||||||
|
|
||||||
ld a1,(a0)
|
ld a1,(a0)
|
||||||
NOP
|
NOP
|
||||||
lwc1 $f0,FP0_OFFSET*4(a1)
|
lwc1 $f0,FP0_OFFSET*4(a1)
|
||||||
@@ -284,13 +322,12 @@ FRAME(_CPU_Context_switch,sp,0,ra)
|
|||||||
|
|
||||||
MFC0 t0,C0_SR
|
MFC0 t0,C0_SR
|
||||||
li t1,~(SR_INTERRUPT_ENABLE_BITS)
|
li t1,~(SR_INTERRUPT_ENABLE_BITS)
|
||||||
STREG t0,C0_SR_OFFSET*4(a0) /* save status register */
|
STREG t0,C0_SR_OFFSET*R_SZ(a0)
|
||||||
and t0,t1
|
and t0,t1
|
||||||
MTC0 t0,C0_SR /* first disable ie bit (recommended) */
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
ori t0,SR_EXL|SR_IE /* enable exception level to disable interrupts */
|
ori t0,(SR_EXL|SR_IE) /* enable exception level to disable interrupts */
|
||||||
MTC0 t0,C0_SR
|
|
||||||
#endif
|
#endif
|
||||||
|
MTC0 t0,C0_SR
|
||||||
|
|
||||||
STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */
|
STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */
|
||||||
STREG sp,SP_OFFSET*R_SZ(a0)
|
STREG sp,SP_OFFSET*R_SZ(a0)
|
||||||
@@ -304,7 +341,7 @@ FRAME(_CPU_Context_switch,sp,0,ra)
|
|||||||
STREG s6,S6_OFFSET*R_SZ(a0)
|
STREG s6,S6_OFFSET*R_SZ(a0)
|
||||||
STREG s7,S7_OFFSET*R_SZ(a0)
|
STREG s7,S7_OFFSET*R_SZ(a0)
|
||||||
|
|
||||||
/*
|
/* EPC is readonly...
|
||||||
MFC0 t0,C0_EPC
|
MFC0 t0,C0_EPC
|
||||||
NOP
|
NOP
|
||||||
STREG t0,C0_EPC_OFFSET*R_SZ(a0)
|
STREG t0,C0_EPC_OFFSET*R_SZ(a0)
|
||||||
@@ -323,35 +360,64 @@ _CPU_Context_switch_restore:
|
|||||||
LDREG s6,S6_OFFSET*R_SZ(a1)
|
LDREG s6,S6_OFFSET*R_SZ(a1)
|
||||||
LDREG s7,S7_OFFSET*R_SZ(a1)
|
LDREG s7,S7_OFFSET*R_SZ(a1)
|
||||||
|
|
||||||
/*
|
/* EPC is readonly...
|
||||||
LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
|
LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
|
||||||
NOP
|
NOP
|
||||||
MTC0 t0,C0_EPC
|
MTC0 t0,C0_EPC
|
||||||
*/
|
*/
|
||||||
|
|
||||||
LDREG t0, C0_SR_OFFSET*R_SZ(a1)
|
LDREG t0, C0_SR_OFFSET*R_SZ(a1)
|
||||||
NOP
|
|
||||||
|
// NOP
|
||||||
|
//#if __mips == 3
|
||||||
|
// andi t0,SR_EXL
|
||||||
|
// bnez t0,_CPU_Context_1 /* set exception level from restore context */
|
||||||
|
// li t0,~SR_EXL
|
||||||
|
// MFC0 t1,C0_SR
|
||||||
|
// NOP
|
||||||
|
// and t1,t0
|
||||||
|
// MTC0 t1,C0_SR
|
||||||
|
//
|
||||||
|
//#elif __mips == 1
|
||||||
|
//
|
||||||
|
// andi t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
|
||||||
|
// beq t0,$0,_CPU_Context_1 /* set level from restore context */
|
||||||
|
// MFC0 t0,C0_SR
|
||||||
|
// NOP
|
||||||
|
// or t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled */
|
||||||
|
// MTC0 t0,C0_SR /* set with enabled */
|
||||||
|
// NOP
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
** Incorporate the new thread's FP coprocessor state and interrupt mask/enable
|
||||||
|
** into the status register. We jump thru the requisite hoops to ensure we
|
||||||
|
** maintain all other SR bits as global values.
|
||||||
|
**
|
||||||
|
** Get the thread's FPU enable, int mask & int enable bits. Although we keep the
|
||||||
|
** software int enables on a per-task basis, the rtems_task_create
|
||||||
|
** Interrupt Level & int level manipulation functions cannot enable/disable them,
|
||||||
|
** so they are automatically enabled for all tasks. To turn them off, a thread
|
||||||
|
** must itself manipulate the SR register.
|
||||||
|
*/
|
||||||
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
andi t0,SR_EXL
|
li k0,(SR_CU1 | SR_IMASK | SR_EXL | SR_IE)
|
||||||
bnez t0,_CPU_Context_1 /* set exception level from restore context */
|
|
||||||
li t0,~SR_EXL
|
|
||||||
MFC0 t1,C0_SR
|
|
||||||
NOP
|
|
||||||
and t1,t0
|
|
||||||
MTC0 t1,C0_SR
|
|
||||||
|
|
||||||
#elif __mips == 1
|
#elif __mips == 1
|
||||||
andi t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
|
li k0,(SR_CU1 | SR_IMASK | SR_IEC)
|
||||||
beq t0,$0,_CPU_Context_1 /* set level from restore context */
|
|
||||||
MFC0 t0,C0_SR
|
|
||||||
NOP
|
|
||||||
or t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled */
|
|
||||||
MTC0 t0,C0_SR /* set with enabled */
|
|
||||||
#endif
|
#endif
|
||||||
|
and t0,k0
|
||||||
|
|
||||||
|
MFC0 t1,C0_SR /* grab the current SR */
|
||||||
|
not k0 /* invert k0 so we can clear out the SR bits */
|
||||||
|
and t1,k0
|
||||||
|
|
||||||
_CPU_Context_1:
|
or t0,t1 /* setup the new task's SR value */
|
||||||
|
|
||||||
|
MTC0 t0,C0_SR /* and load the new SR */
|
||||||
|
NOP
|
||||||
|
|
||||||
|
/* _CPU_Context_1: */
|
||||||
j ra
|
j ra
|
||||||
NOP
|
NOP
|
||||||
ENDFRAME(_CPU_Context_switch)
|
ENDFRAME(_CPU_Context_switch)
|
||||||
@@ -452,37 +518,10 @@ FRAME(_ISR_Handler,sp,0,ra)
|
|||||||
STREG t1,R_EPC*R_SZ(sp)
|
STREG t1,R_EPC*R_SZ(sp)
|
||||||
|
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
lw t2, _Thread_Executing
|
lw t2, _Thread_Executing
|
||||||
nop
|
nop
|
||||||
sw t2, 0x8001FFF0
|
sw t2, 0x8001FFF0
|
||||||
|
|
||||||
sw t0, 0x8001F050
|
|
||||||
sw t1, 0x8001F054
|
|
||||||
|
|
||||||
li t0, 0xdeadbeef
|
|
||||||
li t1, 0xdeadbeef
|
|
||||||
li t2, 0xdeadbeef
|
|
||||||
|
|
||||||
sw ra, 0x8001F000
|
|
||||||
sw v0, 0x8001F004
|
|
||||||
sw v1, 0x8001F008
|
|
||||||
sw a0, 0x8001F00c
|
|
||||||
sw a1, 0x8001F010
|
|
||||||
sw a2, 0x8001F014
|
|
||||||
sw a3, 0x8001F018
|
|
||||||
sw t0, 0x8001F01c
|
|
||||||
sw t1, 0x8001F020
|
|
||||||
sw t2, 0x8001F024
|
|
||||||
sw t3, 0x8001F028
|
|
||||||
sw t4, 0x8001F02c
|
|
||||||
sw t5, 0x8001F030
|
|
||||||
sw t6, 0x8001F034
|
|
||||||
sw t7, 0x8001F038
|
|
||||||
sw t8, 0x8001F03c
|
|
||||||
sw t9, 0x8001F040
|
|
||||||
sw gp, 0x8001F044
|
|
||||||
sw fp, 0x8001F048
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* determine if an interrupt generated this exception */
|
/* determine if an interrupt generated this exception */
|
||||||
@@ -516,7 +555,9 @@ _ISR_Handler_1:
|
|||||||
|
|
||||||
beq k0,zero,_ISR_Handler_exit
|
beq k0,zero,_ISR_Handler_exit
|
||||||
|
|
||||||
li t2,1 /* set a flag so we process interrupts */
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* save some or all context on stack
|
* save some or all context on stack
|
||||||
@@ -547,10 +588,23 @@ _ISR_Handler_1:
|
|||||||
* Call the CPU model or BSP specific routine to decode the
|
* Call the CPU model or BSP specific routine to decode the
|
||||||
* interrupt source and actually vector to device ISR handlers.
|
* interrupt source and actually vector to device ISR handlers.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef INSTRUMENT_ISR_VECTORING
|
||||||
|
nop
|
||||||
|
li t1, 1
|
||||||
|
sw t1, 0x8001e000
|
||||||
|
#endif
|
||||||
|
|
||||||
move a0,sp
|
move a0,sp
|
||||||
jal mips_vector_isr_handlers
|
jal mips_vector_isr_handlers
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
#ifdef INSTRUMENT_ISR_VECTORING
|
||||||
|
li t1, 0
|
||||||
|
sw t1, 0x8001e000
|
||||||
|
nop
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* --_ISR_Nest_level;
|
* --_ISR_Nest_level;
|
||||||
*/
|
*/
|
||||||
@@ -572,6 +626,14 @@ _ISR_Handler_1:
|
|||||||
or t0,t2,t1
|
or t0,t2,t1
|
||||||
bne t0,zero,_ISR_Handler_exit
|
bne t0,zero,_ISR_Handler_exit
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
|
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
|
||||||
* restore stack
|
* restore stack
|
||||||
@@ -589,32 +651,35 @@ _ISR_Handler_1:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
li t0,0x11111111
|
lw t0,_Thread_Executing
|
||||||
sw t0,0x8001F104
|
nop
|
||||||
|
sw t0,0x8001FFF4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* restore interrupt state from the saved status register,
|
/*
|
||||||
* if the isr vectoring didn't so we allow nested interrupts to
|
** Turn on interrupts before entering Thread_Dispatch which
|
||||||
* occur */
|
** will run for a while, thus allowing new interrupts to
|
||||||
|
** be serviced. Observe the Thread_Dispatch_disable_level interlock
|
||||||
|
** that prevents recursive entry into Thread_Dispatch.
|
||||||
|
*/
|
||||||
|
|
||||||
LDREG t0,R_SR*R_SZ(sp)
|
MFC0 t0, C0_SR
|
||||||
NOP
|
NOP
|
||||||
|
or t0, SR_INTERRUPT_ENABLE_BITS
|
||||||
MTC0 t0, C0_SR
|
MTC0 t0, C0_SR
|
||||||
rfe
|
NOP
|
||||||
|
|
||||||
|
|
||||||
jal _Thread_Dispatch
|
jal _Thread_Dispatch
|
||||||
|
NOP
|
||||||
|
|
||||||
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
|
lw t0,_Thread_Executing
|
||||||
nop
|
nop
|
||||||
|
sw t0,0x8001FFF8
|
||||||
#ifdef INSTRUMENT
|
|
||||||
li t0,0x22222222
|
|
||||||
sw t0,0x8001F100
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* prepare to get out of interrupt
|
* prepare to get out of interrupt
|
||||||
* return from interrupt (maybe to _ISR_Dispatch)
|
* return from interrupt (maybe to _ISR_Dispatch)
|
||||||
@@ -625,16 +690,19 @@ _ISR_Handler_1:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
_ISR_Handler_exit:
|
_ISR_Handler_exit:
|
||||||
LDREG t0, R_SR*R_SZ(sp)
|
/*
|
||||||
NOP
|
** Skip the SR restore because its a global register. _CPU_Context_switch_restore
|
||||||
MTC0 t0, C0_SR
|
** adjusts it according to each task's configuration. If we didn't dispatch, the
|
||||||
|
** SR value isn't changing, so all we need to do is return.
|
||||||
|
**
|
||||||
|
*/
|
||||||
|
|
||||||
/* restore context from stack */
|
/* restore context from stack */
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
lw t0,_Thread_Executing
|
lw t0,_Thread_Executing
|
||||||
nop
|
nop
|
||||||
sw t0, 0x8001FFF4
|
sw t0, 0x8001FFFC
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
LDREG k0, R_MDLO*R_SZ(sp)
|
LDREG k0, R_MDLO*R_SZ(sp)
|
||||||
@@ -661,28 +729,6 @@ _ISR_Handler_exit:
|
|||||||
LDREG v1, R_V1*R_SZ(sp)
|
LDREG v1, R_V1*R_SZ(sp)
|
||||||
LDREG v0, R_V0*R_SZ(sp)
|
LDREG v0, R_V0*R_SZ(sp)
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
|
||||||
sw ra, 0x8001F000
|
|
||||||
sw v0, 0x8001F004
|
|
||||||
sw v1, 0x8001F008
|
|
||||||
sw a0, 0x8001F00c
|
|
||||||
sw a1, 0x8001F010
|
|
||||||
sw a2, 0x8001F014
|
|
||||||
sw a3, 0x8001F018
|
|
||||||
sw t0, 0x8001F01c
|
|
||||||
sw t1, 0x8001F020
|
|
||||||
sw t2, 0x8001F024
|
|
||||||
sw t3, 0x8001F028
|
|
||||||
sw t4, 0x8001F02c
|
|
||||||
sw t5, 0x8001F030
|
|
||||||
sw t6, 0x8001F034
|
|
||||||
sw t7, 0x8001F038
|
|
||||||
sw t8, 0x8001F03c
|
|
||||||
sw t9, 0x8001F040
|
|
||||||
sw gp, 0x8001F044
|
|
||||||
sw fp, 0x8001F048
|
|
||||||
#endif
|
|
||||||
|
|
||||||
LDREG k0, R_EPC*R_SZ(sp)
|
LDREG k0, R_EPC*R_SZ(sp)
|
||||||
|
|
||||||
.set noat
|
.set noat
|
||||||
@@ -697,6 +743,7 @@ _ISR_Handler_exit:
|
|||||||
.set reorder
|
.set reorder
|
||||||
ENDFRAME(_ISR_Handler)
|
ENDFRAME(_ISR_Handler)
|
||||||
|
|
||||||
|
|
||||||
FRAME(mips_break,sp,0,ra)
|
FRAME(mips_break,sp,0,ra)
|
||||||
#if 1
|
#if 1
|
||||||
break 0x0
|
break 0x0
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ extern "C" {
|
|||||||
* one subroutine call is avoided entirely.]
|
* one subroutine call is avoided entirely.]
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define CPU_INLINE_ENABLE_DISPATCH TRUE
|
#define CPU_INLINE_ENABLE_DISPATCH FALSE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should the body of the search loops in _Thread_queue_Enqueue_priority
|
* Should the body of the search loops in _Thread_queue_Enqueue_priority
|
||||||
@@ -207,7 +207,7 @@ extern "C" {
|
|||||||
* must be saved as part of the preemption.
|
* must be saved as part of the preemption.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define CPU_IDLE_TASK_IS_FP FALSE
|
#define CPU_IDLE_TASK_IS_FP TRUE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should the saving of the floating point registers be deferred
|
* Should the saving of the floating point registers be deferred
|
||||||
@@ -554,13 +554,6 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
#define CPU_INTERRUPT_NUMBER_OF_VECTORS (mips_interrupt_number_of_vectors)
|
#define CPU_INTERRUPT_NUMBER_OF_VECTORS (mips_interrupt_number_of_vectors)
|
||||||
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
|
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
|
||||||
|
|
||||||
/*
|
|
||||||
* This is defined if the port has a special way to report the ISR nesting
|
|
||||||
* level. Most ports maintain the variable _ISR_Nest_level.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should be large enough to run all RTEMS tests. This insures
|
* Should be large enough to run all RTEMS tests. This insures
|
||||||
* that a "reasonable" small application should not have any problems.
|
* that a "reasonable" small application should not have any problems.
|
||||||
@@ -632,7 +625,8 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
#define _CPU_ISR_Disable( _level ) \
|
#define _CPU_ISR_Disable( _level ) \
|
||||||
do { \
|
do { \
|
||||||
mips_get_sr( _level ); \
|
mips_get_sr( _level ); \
|
||||||
mips_set_sr( (_level) & ~SR_INTERRUPT_ENABLE_BITS ); \
|
mips_set_sr( _level & ~SR_INTERRUPT_ENABLE_BITS ); \
|
||||||
|
_level &= SR_INTERRUPT_ENABLE_BITS; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -643,7 +637,9 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
|
|
||||||
#define _CPU_ISR_Enable( _level ) \
|
#define _CPU_ISR_Enable( _level ) \
|
||||||
do { \
|
do { \
|
||||||
mips_set_sr(_level); \
|
unsigned int _scratch; \
|
||||||
|
mips_get_sr( _scratch ); \
|
||||||
|
mips_set_sr( (_scratch & ~SR_INTERRUPT_ENABLE_BITS) | (_level & SR_INTERRUPT_ENABLE_BITS) ); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -655,9 +651,8 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
|
|
||||||
#define _CPU_ISR_Flash( _xlevel ) \
|
#define _CPU_ISR_Flash( _xlevel ) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int _scratch; \
|
|
||||||
_CPU_ISR_Enable( _xlevel ); \
|
_CPU_ISR_Enable( _xlevel ); \
|
||||||
_CPU_ISR_Disable( _scratch ); \
|
_CPU_ISR_Disable( _xlevel ); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -701,22 +696,52 @@ void _CPU_ISR_Set_level( unsigned32 ); /* in cpu.c */
|
|||||||
* point thread. This is typically only used on CPUs where the
|
* point thread. This is typically only used on CPUs where the
|
||||||
* FPU may be easily disabled by software such as on the SPARC
|
* FPU may be easily disabled by software such as on the SPARC
|
||||||
* where the PSR contains an enable FPU bit.
|
* where the PSR contains an enable FPU bit.
|
||||||
|
*
|
||||||
|
* The per-thread status register holds the interrupt enable, FP enable
|
||||||
|
* and global interrupt enable for that thread. It means each thread can
|
||||||
|
* enable its own set of interrupts. If interrupts are disabled, RTEMS
|
||||||
|
* can still dispatch via blocking calls. This is the function of the
|
||||||
|
* "Interrupt Level", and on the MIPS, it controls the IEC bit and all
|
||||||
|
* the hardware interrupts as defined in the SR. Software ints
|
||||||
|
* are automatically enabled for all threads, as they will only occur under
|
||||||
|
* program control anyhow. Besides, the interrupt level parm is only 8 bits,
|
||||||
|
* and controlling the software ints plus the others would require 9.
|
||||||
|
*
|
||||||
|
* If the Interrupt Level is 0, all ints are on. Otherwise, the
|
||||||
|
* Interrupt Level should supply a bit pattern to impose on the SR
|
||||||
|
* interrupt bits; bit 0 applies to the mips1 IEC bit/mips3 EXL&IE, bits 1 thru 6
|
||||||
|
* apply to the SR register Intr bits from bit 10 thru bit 15. Bit 7 of
|
||||||
|
* the Interrupt Level parameter is unused at this time.
|
||||||
|
*
|
||||||
|
* These are the only per-thread SR bits, the others are maintained
|
||||||
|
* globally & explicitly preserved by the Context Switch code in cpu_asm.s
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
|
|
||||||
_isr, _entry_point, _is_fp ) \
|
#if __mips == 3
|
||||||
|
#define _INTON (SR_EXL | SR_IE)
|
||||||
|
#endif
|
||||||
|
#if __mips == 1
|
||||||
|
#define _INTON SR_IEC
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, _isr, _entry_point, _is_fp ) \
|
||||||
{ \
|
{ \
|
||||||
unsigned32 _stack_tmp = \
|
unsigned32 _stack_tmp = \
|
||||||
(unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
|
(unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
|
||||||
|
unsigned32 _intlvl = _isr & 0xff; \
|
||||||
_stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
|
_stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
|
||||||
(_the_context)->sp = _stack_tmp; \
|
(_the_context)->sp = _stack_tmp; \
|
||||||
(_the_context)->fp = _stack_tmp; \
|
(_the_context)->fp = _stack_tmp; \
|
||||||
(_the_context)->ra = (unsigned64)_entry_point; \
|
(_the_context)->ra = (unsigned64)_entry_point; \
|
||||||
(_the_context)->c0_sr = ((_the_context)->c0_sr & 0x0fff0000) | \
|
(_the_context)->c0_sr = ((_intlvl==0)?(0xFF00 | _INTON):( ((_intlvl<<9) & 0xfc00) | \
|
||||||
((_isr)?0xff00:0xff01) | \
|
0x300 | \
|
||||||
((_is_fp)?0x20000000:0x10000000); \
|
((_intlvl & 1)?_INTON:0)) ) | \
|
||||||
|
SR_CU0 | ((_is_fp)?SR_CU1:0); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine is responsible for somehow restarting the currently
|
* This routine is responsible for somehow restarting the currently
|
||||||
* executing task. If you are lucky, then all that is necessary
|
* executing task. If you are lucky, then all that is necessary
|
||||||
|
|||||||
@@ -1,3 +1,12 @@
|
|||||||
|
2001-02-01 Greg Menke <gregory.menke@gsfc.nasa.gov>
|
||||||
|
|
||||||
|
* cpu.c: Enhancements and fixes for modifying the SR when changing
|
||||||
|
the interrupt level.
|
||||||
|
* cpu_asm.S: Fixed handling of FP enable bit so it is properly
|
||||||
|
managed on a per-task basis, improved handling of interrupt levels,
|
||||||
|
and made deferred FP contexts work on the MIPS.
|
||||||
|
* rtems/score/cpu.h: Modified to support above changes.
|
||||||
|
|
||||||
2002-01-28 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
|
2002-01-28 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
|
||||||
|
|
||||||
* rtems/Makefile.am: Removed.
|
* rtems/Makefile.am: Removed.
|
||||||
|
|||||||
@@ -82,51 +82,66 @@ unsigned32 _CPU_ISR_Get_level( void )
|
|||||||
mips_get_sr(sr);
|
mips_get_sr(sr);
|
||||||
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
return ((sr & SR_EXL) >> 1);
|
/* EXL bit and shift down hardware ints into bits 1 thru 6 */
|
||||||
|
return ((sr & SR_EXL) >> 1) + ((sr & 0xfc00) >> 9);
|
||||||
|
|
||||||
#elif __mips == 1
|
#elif __mips == 1
|
||||||
return ((sr & SR_IEC) ? 0 : 1);
|
/* IEC bit and shift down hardware ints into bits 1 thru 6 */
|
||||||
|
return (sr & SR_IEC) + ((sr & 0xfc00) >> 9);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#error "CPU ISR level: unknown MIPS level for SR handling"
|
#error "CPU ISR level: unknown MIPS level for SR handling"
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void _CPU_ISR_Set_level( unsigned32 new_level )
|
void _CPU_ISR_Set_level( unsigned32 new_level )
|
||||||
{
|
{
|
||||||
unsigned int sr;
|
unsigned int sr, srbits;
|
||||||
|
|
||||||
|
/*
|
||||||
|
** mask off the int level bits only so we can
|
||||||
|
** preserve software int settings and FP enable
|
||||||
|
** for this thread. Note we don't force software ints
|
||||||
|
** enabled when changing level, they were turned on
|
||||||
|
** when this task was created, but may have been turned
|
||||||
|
** off since, so we'll just leave them alone.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
mips_get_sr(sr);
|
mips_get_sr(sr);
|
||||||
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
|
mips_set_sr(sr & ~SR_IE); /* first disable ie bit (recommended) */
|
||||||
|
|
||||||
|
srbits = sr & ~(0xfc00 | SR_EXL | SR_IE);
|
||||||
|
|
||||||
|
sr = srbits | ((new_level==0)? (0xfc00 | SR_EXL | SR_IE): \
|
||||||
|
(((new_level<<9) & 0xfc000) | \
|
||||||
|
(new_level & 1)?(SR_EXL | SR_IE):0));
|
||||||
|
/*
|
||||||
if ( (new_level & SR_EXL) == (sr & SR_EXL) )
|
if ( (new_level & SR_EXL) == (sr & SR_EXL) )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( (new_level & SR_EXL) == 0 ) {
|
if ( (new_level & SR_EXL) == 0 ) {
|
||||||
sr &= ~SR_EXL; /* clear the EXL bit */
|
sr &= ~SR_EXL; * clear the EXL bit *
|
||||||
mips_set_sr(sr);
|
mips_set_sr(sr);
|
||||||
} else {
|
} else {
|
||||||
sr &= ~SR_IE;
|
|
||||||
mips_set_sr(sr); /* first disable ie bit (recommended) */
|
|
||||||
|
|
||||||
sr |= SR_EXL|SR_IE; /* enable exception level */
|
sr |= SR_EXL|SR_IE; * enable exception level *
|
||||||
mips_set_sr(sr); /* first disable ie bit (recommended) */
|
mips_set_sr(sr); * first disable ie bit (recommended) *
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
#elif __mips == 1
|
#elif __mips == 1
|
||||||
if ( (new_level & SR_IEC) == (sr & SR_IEC) )
|
mips_set_sr( (sr & ~SR_IEC) );
|
||||||
return;
|
|
||||||
|
|
||||||
sr &= ~SR_IEC; /* clear the IEC bit */
|
|
||||||
if ( !new_level )
|
|
||||||
sr |= SR_IEC; /* enable interrupts */
|
|
||||||
|
|
||||||
mips_set_sr(sr);
|
|
||||||
|
|
||||||
|
srbits = sr & ~(0xfc00 | SR_IEC);
|
||||||
|
sr = srbits | ((new_level==0)?0xfc01:( ((new_level<<9) & 0xfc000) | (new_level & 1)));
|
||||||
#else
|
#else
|
||||||
#error "CPU ISR level: unknown MIPS level for SR handling"
|
#error "CPU ISR level: unknown MIPS level for SR handling"
|
||||||
#endif
|
#endif
|
||||||
|
mips_set_sr( sr );
|
||||||
}
|
}
|
||||||
|
|
||||||
/*PAGE
|
/*PAGE
|
||||||
|
|||||||
@@ -23,6 +23,13 @@
|
|||||||
* 2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
|
* 2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
|
||||||
* rewriting as much as possible in C and added the JMR3904 BSP
|
* rewriting as much as possible in C and added the JMR3904 BSP
|
||||||
* so testing could be performed on a simulator.
|
* so testing could be performed on a simulator.
|
||||||
|
* 2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
|
||||||
|
* performance, tweaking this code and the isr vectoring routines
|
||||||
|
* to reduce overhead & latencies. Added optional
|
||||||
|
* instrumentation as well.
|
||||||
|
* 2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
|
||||||
|
* cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
|
||||||
|
* and deferred FP contexts.
|
||||||
*
|
*
|
||||||
* COPYRIGHT (c) 1989-2000.
|
* COPYRIGHT (c) 1989-2000.
|
||||||
* On-Line Applications Research Corporation (OAR).
|
* On-Line Applications Research Corporation (OAR).
|
||||||
@@ -42,8 +49,9 @@
|
|||||||
* Mongoose-ism debug tool- but may be handy in the future so we
|
* Mongoose-ism debug tool- but may be handy in the future so we
|
||||||
* left it in...
|
* left it in...
|
||||||
*/
|
*/
|
||||||
/* #define INSTRUMENT */
|
|
||||||
|
|
||||||
|
#define INSTRUMENT_ISR_VECTORING
|
||||||
|
//#define INSTRUMENT_EXECUTING_THREAD
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -165,9 +173,27 @@
|
|||||||
* );
|
* );
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if ( CPU_HARDWARE_FP == TRUE )
|
#if ( CPU_HARDWARE_FP == FALSE )
|
||||||
FRAME(_CPU_Context_save_fp,sp,0,ra)
|
FRAME(_CPU_Context_save_fp,sp,0,ra)
|
||||||
.set noat
|
.set noat
|
||||||
|
|
||||||
|
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
|
||||||
|
/*
|
||||||
|
** Make sure the FPU is on before we save state. This code is here
|
||||||
|
** because the FPU context switch might occur when an integer
|
||||||
|
** task is switching out w/ an FP task switching in, but the current
|
||||||
|
** FPU state was left by a sometime previously scheduled FP task.
|
||||||
|
**
|
||||||
|
** In non-deferred FP context switch, if the exiting task is FP, then
|
||||||
|
** the FPU is already on so we don't need to do this.
|
||||||
|
*/
|
||||||
|
|
||||||
|
MFC0 t0,C0_SR
|
||||||
|
li k0,SR_CU1
|
||||||
|
or t0,k0
|
||||||
|
MTC0 t0,C0_SR
|
||||||
|
#endif
|
||||||
|
|
||||||
ld a1,(a0)
|
ld a1,(a0)
|
||||||
NOP
|
NOP
|
||||||
swc1 $f0,FP0_OFFSET*F_SZ(a1)
|
swc1 $f0,FP0_OFFSET*F_SZ(a1)
|
||||||
@@ -226,9 +252,21 @@ ENDFRAME(_CPU_Context_save_fp)
|
|||||||
* )
|
* )
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if ( CPU_HARDWARE_FP == TRUE )
|
#if ( CPU_HARDWARE_FP == FALSE )
|
||||||
FRAME(_CPU_Context_restore_fp,sp,0,ra)
|
FRAME(_CPU_Context_restore_fp,sp,0,ra)
|
||||||
.set noat
|
.set noat
|
||||||
|
|
||||||
|
/*
|
||||||
|
** Make sure the FPU is on before we retrieve state. This code
|
||||||
|
** is here because the FPU context switch might occur when an
|
||||||
|
** integer task is switching out with a FP task switching in.
|
||||||
|
*/
|
||||||
|
|
||||||
|
MFC0 t0,C0_SR
|
||||||
|
li k0,SR_CU1
|
||||||
|
or t0,k0
|
||||||
|
MTC0 t0,C0_SR
|
||||||
|
|
||||||
ld a1,(a0)
|
ld a1,(a0)
|
||||||
NOP
|
NOP
|
||||||
lwc1 $f0,FP0_OFFSET*4(a1)
|
lwc1 $f0,FP0_OFFSET*4(a1)
|
||||||
@@ -284,13 +322,12 @@ FRAME(_CPU_Context_switch,sp,0,ra)
|
|||||||
|
|
||||||
MFC0 t0,C0_SR
|
MFC0 t0,C0_SR
|
||||||
li t1,~(SR_INTERRUPT_ENABLE_BITS)
|
li t1,~(SR_INTERRUPT_ENABLE_BITS)
|
||||||
STREG t0,C0_SR_OFFSET*4(a0) /* save status register */
|
STREG t0,C0_SR_OFFSET*R_SZ(a0)
|
||||||
and t0,t1
|
and t0,t1
|
||||||
MTC0 t0,C0_SR /* first disable ie bit (recommended) */
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
ori t0,SR_EXL|SR_IE /* enable exception level to disable interrupts */
|
ori t0,(SR_EXL|SR_IE) /* enable exception level to disable interrupts */
|
||||||
MTC0 t0,C0_SR
|
|
||||||
#endif
|
#endif
|
||||||
|
MTC0 t0,C0_SR
|
||||||
|
|
||||||
STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */
|
STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */
|
||||||
STREG sp,SP_OFFSET*R_SZ(a0)
|
STREG sp,SP_OFFSET*R_SZ(a0)
|
||||||
@@ -304,7 +341,7 @@ FRAME(_CPU_Context_switch,sp,0,ra)
|
|||||||
STREG s6,S6_OFFSET*R_SZ(a0)
|
STREG s6,S6_OFFSET*R_SZ(a0)
|
||||||
STREG s7,S7_OFFSET*R_SZ(a0)
|
STREG s7,S7_OFFSET*R_SZ(a0)
|
||||||
|
|
||||||
/*
|
/* EPC is readonly...
|
||||||
MFC0 t0,C0_EPC
|
MFC0 t0,C0_EPC
|
||||||
NOP
|
NOP
|
||||||
STREG t0,C0_EPC_OFFSET*R_SZ(a0)
|
STREG t0,C0_EPC_OFFSET*R_SZ(a0)
|
||||||
@@ -323,35 +360,64 @@ _CPU_Context_switch_restore:
|
|||||||
LDREG s6,S6_OFFSET*R_SZ(a1)
|
LDREG s6,S6_OFFSET*R_SZ(a1)
|
||||||
LDREG s7,S7_OFFSET*R_SZ(a1)
|
LDREG s7,S7_OFFSET*R_SZ(a1)
|
||||||
|
|
||||||
/*
|
/* EPC is readonly...
|
||||||
LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
|
LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
|
||||||
NOP
|
NOP
|
||||||
MTC0 t0,C0_EPC
|
MTC0 t0,C0_EPC
|
||||||
*/
|
*/
|
||||||
|
|
||||||
LDREG t0, C0_SR_OFFSET*R_SZ(a1)
|
LDREG t0, C0_SR_OFFSET*R_SZ(a1)
|
||||||
NOP
|
|
||||||
|
// NOP
|
||||||
|
//#if __mips == 3
|
||||||
|
// andi t0,SR_EXL
|
||||||
|
// bnez t0,_CPU_Context_1 /* set exception level from restore context */
|
||||||
|
// li t0,~SR_EXL
|
||||||
|
// MFC0 t1,C0_SR
|
||||||
|
// NOP
|
||||||
|
// and t1,t0
|
||||||
|
// MTC0 t1,C0_SR
|
||||||
|
//
|
||||||
|
//#elif __mips == 1
|
||||||
|
//
|
||||||
|
// andi t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
|
||||||
|
// beq t0,$0,_CPU_Context_1 /* set level from restore context */
|
||||||
|
// MFC0 t0,C0_SR
|
||||||
|
// NOP
|
||||||
|
// or t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled */
|
||||||
|
// MTC0 t0,C0_SR /* set with enabled */
|
||||||
|
// NOP
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
** Incorporate the new thread's FP coprocessor state and interrupt mask/enable
|
||||||
|
** into the status register. We jump thru the requisite hoops to ensure we
|
||||||
|
** maintain all other SR bits as global values.
|
||||||
|
**
|
||||||
|
** Get the thread's FPU enable, int mask & int enable bits. Although we keep the
|
||||||
|
** software int enables on a per-task basis, the rtems_task_create
|
||||||
|
** Interrupt Level & int level manipulation functions cannot enable/disable them,
|
||||||
|
** so they are automatically enabled for all tasks. To turn them off, a thread
|
||||||
|
** must itself manipulate the SR register.
|
||||||
|
*/
|
||||||
|
|
||||||
#if __mips == 3
|
#if __mips == 3
|
||||||
andi t0,SR_EXL
|
li k0,(SR_CU1 | SR_IMASK | SR_EXL | SR_IE)
|
||||||
bnez t0,_CPU_Context_1 /* set exception level from restore context */
|
|
||||||
li t0,~SR_EXL
|
|
||||||
MFC0 t1,C0_SR
|
|
||||||
NOP
|
|
||||||
and t1,t0
|
|
||||||
MTC0 t1,C0_SR
|
|
||||||
|
|
||||||
#elif __mips == 1
|
#elif __mips == 1
|
||||||
andi t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
|
li k0,(SR_CU1 | SR_IMASK | SR_IEC)
|
||||||
beq t0,$0,_CPU_Context_1 /* set level from restore context */
|
|
||||||
MFC0 t0,C0_SR
|
|
||||||
NOP
|
|
||||||
or t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled */
|
|
||||||
MTC0 t0,C0_SR /* set with enabled */
|
|
||||||
#endif
|
#endif
|
||||||
|
and t0,k0
|
||||||
|
|
||||||
|
MFC0 t1,C0_SR /* grab the current SR */
|
||||||
|
not k0 /* invert k0 so we can clear out the SR bits */
|
||||||
|
and t1,k0
|
||||||
|
|
||||||
_CPU_Context_1:
|
or t0,t1 /* setup the new task's SR value */
|
||||||
|
|
||||||
|
MTC0 t0,C0_SR /* and load the new SR */
|
||||||
|
NOP
|
||||||
|
|
||||||
|
/* _CPU_Context_1: */
|
||||||
j ra
|
j ra
|
||||||
NOP
|
NOP
|
||||||
ENDFRAME(_CPU_Context_switch)
|
ENDFRAME(_CPU_Context_switch)
|
||||||
@@ -452,37 +518,10 @@ FRAME(_ISR_Handler,sp,0,ra)
|
|||||||
STREG t1,R_EPC*R_SZ(sp)
|
STREG t1,R_EPC*R_SZ(sp)
|
||||||
|
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
lw t2, _Thread_Executing
|
lw t2, _Thread_Executing
|
||||||
nop
|
nop
|
||||||
sw t2, 0x8001FFF0
|
sw t2, 0x8001FFF0
|
||||||
|
|
||||||
sw t0, 0x8001F050
|
|
||||||
sw t1, 0x8001F054
|
|
||||||
|
|
||||||
li t0, 0xdeadbeef
|
|
||||||
li t1, 0xdeadbeef
|
|
||||||
li t2, 0xdeadbeef
|
|
||||||
|
|
||||||
sw ra, 0x8001F000
|
|
||||||
sw v0, 0x8001F004
|
|
||||||
sw v1, 0x8001F008
|
|
||||||
sw a0, 0x8001F00c
|
|
||||||
sw a1, 0x8001F010
|
|
||||||
sw a2, 0x8001F014
|
|
||||||
sw a3, 0x8001F018
|
|
||||||
sw t0, 0x8001F01c
|
|
||||||
sw t1, 0x8001F020
|
|
||||||
sw t2, 0x8001F024
|
|
||||||
sw t3, 0x8001F028
|
|
||||||
sw t4, 0x8001F02c
|
|
||||||
sw t5, 0x8001F030
|
|
||||||
sw t6, 0x8001F034
|
|
||||||
sw t7, 0x8001F038
|
|
||||||
sw t8, 0x8001F03c
|
|
||||||
sw t9, 0x8001F040
|
|
||||||
sw gp, 0x8001F044
|
|
||||||
sw fp, 0x8001F048
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* determine if an interrupt generated this exception */
|
/* determine if an interrupt generated this exception */
|
||||||
@@ -516,7 +555,9 @@ _ISR_Handler_1:
|
|||||||
|
|
||||||
beq k0,zero,_ISR_Handler_exit
|
beq k0,zero,_ISR_Handler_exit
|
||||||
|
|
||||||
li t2,1 /* set a flag so we process interrupts */
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* save some or all context on stack
|
* save some or all context on stack
|
||||||
@@ -547,10 +588,23 @@ _ISR_Handler_1:
|
|||||||
* Call the CPU model or BSP specific routine to decode the
|
* Call the CPU model or BSP specific routine to decode the
|
||||||
* interrupt source and actually vector to device ISR handlers.
|
* interrupt source and actually vector to device ISR handlers.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef INSTRUMENT_ISR_VECTORING
|
||||||
|
nop
|
||||||
|
li t1, 1
|
||||||
|
sw t1, 0x8001e000
|
||||||
|
#endif
|
||||||
|
|
||||||
move a0,sp
|
move a0,sp
|
||||||
jal mips_vector_isr_handlers
|
jal mips_vector_isr_handlers
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
#ifdef INSTRUMENT_ISR_VECTORING
|
||||||
|
li t1, 0
|
||||||
|
sw t1, 0x8001e000
|
||||||
|
nop
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* --_ISR_Nest_level;
|
* --_ISR_Nest_level;
|
||||||
*/
|
*/
|
||||||
@@ -572,6 +626,14 @@ _ISR_Handler_1:
|
|||||||
or t0,t2,t1
|
or t0,t2,t1
|
||||||
bne t0,zero,_ISR_Handler_exit
|
bne t0,zero,_ISR_Handler_exit
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
|
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
|
||||||
* restore stack
|
* restore stack
|
||||||
@@ -589,32 +651,35 @@ _ISR_Handler_1:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
li t0,0x11111111
|
lw t0,_Thread_Executing
|
||||||
sw t0,0x8001F104
|
nop
|
||||||
|
sw t0,0x8001FFF4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* restore interrupt state from the saved status register,
|
/*
|
||||||
* if the isr vectoring didn't so we allow nested interrupts to
|
** Turn on interrupts before entering Thread_Dispatch which
|
||||||
* occur */
|
** will run for a while, thus allowing new interrupts to
|
||||||
|
** be serviced. Observe the Thread_Dispatch_disable_level interlock
|
||||||
|
** that prevents recursive entry into Thread_Dispatch.
|
||||||
|
*/
|
||||||
|
|
||||||
LDREG t0,R_SR*R_SZ(sp)
|
MFC0 t0, C0_SR
|
||||||
NOP
|
NOP
|
||||||
|
or t0, SR_INTERRUPT_ENABLE_BITS
|
||||||
MTC0 t0, C0_SR
|
MTC0 t0, C0_SR
|
||||||
rfe
|
NOP
|
||||||
|
|
||||||
|
|
||||||
jal _Thread_Dispatch
|
jal _Thread_Dispatch
|
||||||
|
NOP
|
||||||
|
|
||||||
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
|
lw t0,_Thread_Executing
|
||||||
nop
|
nop
|
||||||
|
sw t0,0x8001FFF8
|
||||||
#ifdef INSTRUMENT
|
|
||||||
li t0,0x22222222
|
|
||||||
sw t0,0x8001F100
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* prepare to get out of interrupt
|
* prepare to get out of interrupt
|
||||||
* return from interrupt (maybe to _ISR_Dispatch)
|
* return from interrupt (maybe to _ISR_Dispatch)
|
||||||
@@ -625,16 +690,19 @@ _ISR_Handler_1:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
_ISR_Handler_exit:
|
_ISR_Handler_exit:
|
||||||
LDREG t0, R_SR*R_SZ(sp)
|
/*
|
||||||
NOP
|
** Skip the SR restore because its a global register. _CPU_Context_switch_restore
|
||||||
MTC0 t0, C0_SR
|
** adjusts it according to each task's configuration. If we didn't dispatch, the
|
||||||
|
** SR value isn't changing, so all we need to do is return.
|
||||||
|
**
|
||||||
|
*/
|
||||||
|
|
||||||
/* restore context from stack */
|
/* restore context from stack */
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
#ifdef INSTRUMENT_EXECUTING_THREAD
|
||||||
lw t0,_Thread_Executing
|
lw t0,_Thread_Executing
|
||||||
nop
|
nop
|
||||||
sw t0, 0x8001FFF4
|
sw t0, 0x8001FFFC
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
LDREG k0, R_MDLO*R_SZ(sp)
|
LDREG k0, R_MDLO*R_SZ(sp)
|
||||||
@@ -661,28 +729,6 @@ _ISR_Handler_exit:
|
|||||||
LDREG v1, R_V1*R_SZ(sp)
|
LDREG v1, R_V1*R_SZ(sp)
|
||||||
LDREG v0, R_V0*R_SZ(sp)
|
LDREG v0, R_V0*R_SZ(sp)
|
||||||
|
|
||||||
#ifdef INSTRUMENT
|
|
||||||
sw ra, 0x8001F000
|
|
||||||
sw v0, 0x8001F004
|
|
||||||
sw v1, 0x8001F008
|
|
||||||
sw a0, 0x8001F00c
|
|
||||||
sw a1, 0x8001F010
|
|
||||||
sw a2, 0x8001F014
|
|
||||||
sw a3, 0x8001F018
|
|
||||||
sw t0, 0x8001F01c
|
|
||||||
sw t1, 0x8001F020
|
|
||||||
sw t2, 0x8001F024
|
|
||||||
sw t3, 0x8001F028
|
|
||||||
sw t4, 0x8001F02c
|
|
||||||
sw t5, 0x8001F030
|
|
||||||
sw t6, 0x8001F034
|
|
||||||
sw t7, 0x8001F038
|
|
||||||
sw t8, 0x8001F03c
|
|
||||||
sw t9, 0x8001F040
|
|
||||||
sw gp, 0x8001F044
|
|
||||||
sw fp, 0x8001F048
|
|
||||||
#endif
|
|
||||||
|
|
||||||
LDREG k0, R_EPC*R_SZ(sp)
|
LDREG k0, R_EPC*R_SZ(sp)
|
||||||
|
|
||||||
.set noat
|
.set noat
|
||||||
@@ -697,6 +743,7 @@ _ISR_Handler_exit:
|
|||||||
.set reorder
|
.set reorder
|
||||||
ENDFRAME(_ISR_Handler)
|
ENDFRAME(_ISR_Handler)
|
||||||
|
|
||||||
|
|
||||||
FRAME(mips_break,sp,0,ra)
|
FRAME(mips_break,sp,0,ra)
|
||||||
#if 1
|
#if 1
|
||||||
break 0x0
|
break 0x0
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ extern "C" {
|
|||||||
* one subroutine call is avoided entirely.]
|
* one subroutine call is avoided entirely.]
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define CPU_INLINE_ENABLE_DISPATCH TRUE
|
#define CPU_INLINE_ENABLE_DISPATCH FALSE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should the body of the search loops in _Thread_queue_Enqueue_priority
|
* Should the body of the search loops in _Thread_queue_Enqueue_priority
|
||||||
@@ -207,7 +207,7 @@ extern "C" {
|
|||||||
* must be saved as part of the preemption.
|
* must be saved as part of the preemption.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define CPU_IDLE_TASK_IS_FP FALSE
|
#define CPU_IDLE_TASK_IS_FP TRUE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should the saving of the floating point registers be deferred
|
* Should the saving of the floating point registers be deferred
|
||||||
@@ -554,13 +554,6 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
#define CPU_INTERRUPT_NUMBER_OF_VECTORS (mips_interrupt_number_of_vectors)
|
#define CPU_INTERRUPT_NUMBER_OF_VECTORS (mips_interrupt_number_of_vectors)
|
||||||
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
|
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
|
||||||
|
|
||||||
/*
|
|
||||||
* This is defined if the port has a special way to report the ISR nesting
|
|
||||||
* level. Most ports maintain the variable _ISR_Nest_level.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should be large enough to run all RTEMS tests. This insures
|
* Should be large enough to run all RTEMS tests. This insures
|
||||||
* that a "reasonable" small application should not have any problems.
|
* that a "reasonable" small application should not have any problems.
|
||||||
@@ -632,7 +625,8 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
#define _CPU_ISR_Disable( _level ) \
|
#define _CPU_ISR_Disable( _level ) \
|
||||||
do { \
|
do { \
|
||||||
mips_get_sr( _level ); \
|
mips_get_sr( _level ); \
|
||||||
mips_set_sr( (_level) & ~SR_INTERRUPT_ENABLE_BITS ); \
|
mips_set_sr( _level & ~SR_INTERRUPT_ENABLE_BITS ); \
|
||||||
|
_level &= SR_INTERRUPT_ENABLE_BITS; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -643,7 +637,9 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
|
|
||||||
#define _CPU_ISR_Enable( _level ) \
|
#define _CPU_ISR_Enable( _level ) \
|
||||||
do { \
|
do { \
|
||||||
mips_set_sr(_level); \
|
unsigned int _scratch; \
|
||||||
|
mips_get_sr( _scratch ); \
|
||||||
|
mips_set_sr( (_scratch & ~SR_INTERRUPT_ENABLE_BITS) | (_level & SR_INTERRUPT_ENABLE_BITS) ); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -655,9 +651,8 @@ extern unsigned int mips_interrupt_number_of_vectors;
|
|||||||
|
|
||||||
#define _CPU_ISR_Flash( _xlevel ) \
|
#define _CPU_ISR_Flash( _xlevel ) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int _scratch; \
|
|
||||||
_CPU_ISR_Enable( _xlevel ); \
|
_CPU_ISR_Enable( _xlevel ); \
|
||||||
_CPU_ISR_Disable( _scratch ); \
|
_CPU_ISR_Disable( _xlevel ); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -701,22 +696,52 @@ void _CPU_ISR_Set_level( unsigned32 ); /* in cpu.c */
|
|||||||
* point thread. This is typically only used on CPUs where the
|
* point thread. This is typically only used on CPUs where the
|
||||||
* FPU may be easily disabled by software such as on the SPARC
|
* FPU may be easily disabled by software such as on the SPARC
|
||||||
* where the PSR contains an enable FPU bit.
|
* where the PSR contains an enable FPU bit.
|
||||||
|
*
|
||||||
|
* The per-thread status register holds the interrupt enable, FP enable
|
||||||
|
* and global interrupt enable for that thread. It means each thread can
|
||||||
|
* enable its own set of interrupts. If interrupts are disabled, RTEMS
|
||||||
|
* can still dispatch via blocking calls. This is the function of the
|
||||||
|
* "Interrupt Level", and on the MIPS, it controls the IEC bit and all
|
||||||
|
* the hardware interrupts as defined in the SR. Software ints
|
||||||
|
* are automatically enabled for all threads, as they will only occur under
|
||||||
|
* program control anyhow. Besides, the interrupt level parm is only 8 bits,
|
||||||
|
* and controlling the software ints plus the others would require 9.
|
||||||
|
*
|
||||||
|
* If the Interrupt Level is 0, all ints are on. Otherwise, the
|
||||||
|
* Interrupt Level should supply a bit pattern to impose on the SR
|
||||||
|
* interrupt bits; bit 0 applies to the mips1 IEC bit/mips3 EXL&IE, bits 1 thru 6
|
||||||
|
* apply to the SR register Intr bits from bit 10 thru bit 15. Bit 7 of
|
||||||
|
* the Interrupt Level parameter is unused at this time.
|
||||||
|
*
|
||||||
|
* These are the only per-thread SR bits, the others are maintained
|
||||||
|
* globally & explicitly preserved by the Context Switch code in cpu_asm.s
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
|
|
||||||
_isr, _entry_point, _is_fp ) \
|
#if __mips == 3
|
||||||
|
#define _INTON (SR_EXL | SR_IE)
|
||||||
|
#endif
|
||||||
|
#if __mips == 1
|
||||||
|
#define _INTON SR_IEC
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, _isr, _entry_point, _is_fp ) \
|
||||||
{ \
|
{ \
|
||||||
unsigned32 _stack_tmp = \
|
unsigned32 _stack_tmp = \
|
||||||
(unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
|
(unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
|
||||||
|
unsigned32 _intlvl = _isr & 0xff; \
|
||||||
_stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
|
_stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
|
||||||
(_the_context)->sp = _stack_tmp; \
|
(_the_context)->sp = _stack_tmp; \
|
||||||
(_the_context)->fp = _stack_tmp; \
|
(_the_context)->fp = _stack_tmp; \
|
||||||
(_the_context)->ra = (unsigned64)_entry_point; \
|
(_the_context)->ra = (unsigned64)_entry_point; \
|
||||||
(_the_context)->c0_sr = ((_the_context)->c0_sr & 0x0fff0000) | \
|
(_the_context)->c0_sr = ((_intlvl==0)?(0xFF00 | _INTON):( ((_intlvl<<9) & 0xfc00) | \
|
||||||
((_isr)?0xff00:0xff01) | \
|
0x300 | \
|
||||||
((_is_fp)?0x20000000:0x10000000); \
|
((_intlvl & 1)?_INTON:0)) ) | \
|
||||||
|
SR_CU0 | ((_is_fp)?SR_CU1:0); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine is responsible for somehow restarting the currently
|
* This routine is responsible for somehow restarting the currently
|
||||||
* executing task. If you are lucky, then all that is necessary
|
* executing task. If you are lucky, then all that is necessary
|
||||||
|
|||||||
Reference in New Issue
Block a user