forked from Imagelibrary/rtems
2011-07-28 Jennifer Averett <Jennifer.Averett@OARcorp.com>
PR 1801 * shared/irq_asm.S: Modifications to synch the sparc with the smp working tree.
This commit is contained in:
@@ -1,3 +1,9 @@
|
|||||||
|
2011-07-28 Jennifer Averett <Jennifer.Averett@OARcorp.com>
|
||||||
|
|
||||||
|
PR 1801
|
||||||
|
* shared/irq_asm.S: Modifications to synch the sparc with the smp
|
||||||
|
working tree.
|
||||||
|
|
||||||
2011-06-28 Joel Sherrill <joel.sherrill@oarcorp.com>
|
2011-06-28 Joel Sherrill <joel.sherrill@oarcorp.com>
|
||||||
Jennifer Averett <jennifer.averett@OARcorp.com>
|
Jennifer Averett <jennifer.averett@OARcorp.com>
|
||||||
|
|
||||||
|
|||||||
@@ -163,69 +163,6 @@ save_isf:
|
|||||||
|
|
||||||
mov %sp, %o1 ! 2nd arg to ISR Handler
|
mov %sp, %o1 ! 2nd arg to ISR Handler
|
||||||
|
|
||||||
/*
|
|
||||||
* Increment ISR nest level and Thread dispatch disable level.
|
|
||||||
*
|
|
||||||
* Register usage for this section:
|
|
||||||
*
|
|
||||||
* l4 = _Thread_Dispatch_disable_level pointer
|
|
||||||
* l5 = _ISR_Nest_level pointer
|
|
||||||
* l6 = _Thread_Dispatch_disable_level value
|
|
||||||
* l7 = _ISR_Nest_level value
|
|
||||||
*
|
|
||||||
* NOTE: It is assumed that l4 - l7 will be preserved until the ISR
|
|
||||||
* nest and thread dispatch disable levels are unnested.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
|
|
||||||
ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
|
|
||||||
|
|
||||||
PUBLIC(_ISR_PER_CPU)
|
|
||||||
SYM(_ISR_PER_CPU):
|
|
||||||
#if defined(RTEMS_SMP)
|
|
||||||
sethi %hi(_Per_CPU_Information_p), %l5
|
|
||||||
add %l5, %lo(_Per_CPU_Information_p), %l5
|
|
||||||
#if BSP_LEON3_SMP
|
|
||||||
/* LEON3 SMP support */
|
|
||||||
rd %asr17, %l7
|
|
||||||
srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */
|
|
||||||
sll %l7, 2, %l7 /* l7 = offset */
|
|
||||||
add %l5, %l7, %l5
|
|
||||||
#endif
|
|
||||||
ld [%l5], %l5 /* l5 = pointer to per CPU */
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
#else
|
|
||||||
sethi %hi(_Per_CPU_Information), %l5
|
|
||||||
add %l5, %lo(_Per_CPU_Information), %l5
|
|
||||||
#endif
|
|
||||||
ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
|
|
||||||
|
|
||||||
add %l6, 1, %l6
|
|
||||||
st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
|
|
||||||
|
|
||||||
add %l7, 1, %l7
|
|
||||||
st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If ISR nest level was zero (now 1), then switch stack.
|
|
||||||
*/
|
|
||||||
|
|
||||||
mov %sp, %fp
|
|
||||||
subcc %l7, 1, %l7 ! outermost interrupt handler?
|
|
||||||
bnz dont_switch_stacks ! No, then do not switch stacks
|
|
||||||
|
|
||||||
ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
|
|
||||||
|
|
||||||
dont_switch_stacks:
|
|
||||||
/*
|
|
||||||
* Make sure we have a place on the stack for the window overflow
|
|
||||||
* trap handler to write into. At this point it is safe to
|
|
||||||
* enable traps again.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
|
* Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
|
||||||
* set the PIL in the %psr to mask off interrupts with lower priority.
|
* set the PIL in the %psr to mask off interrupts with lower priority.
|
||||||
@@ -316,6 +253,86 @@ pil_fixed:
|
|||||||
wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
|
wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
|
||||||
dont_fix_pil2:
|
dont_fix_pil2:
|
||||||
|
|
||||||
|
PUBLIC(_ISR_PER_CPU)
|
||||||
|
SYM(_ISR_PER_CPU):
|
||||||
|
|
||||||
|
#if defined(RTEMS_SMP)
|
||||||
|
sethi %hi(_Per_CPU_Information_p), %l5
|
||||||
|
add %l5, %lo(_Per_CPU_Information_p), %l5
|
||||||
|
#if BSP_LEON3_SMP
|
||||||
|
/* LEON3 SMP support */
|
||||||
|
rd %asr17, %l7
|
||||||
|
srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */
|
||||||
|
sll %l7, 2, %l7 /* l7 = offset */
|
||||||
|
add %l5, %l7, %l5
|
||||||
|
#endif
|
||||||
|
ld [%l5], %l5 /* l5 = pointer to per CPU */
|
||||||
|
nop
|
||||||
|
nop
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On multi-core system, we need to use SMP safe versions
|
||||||
|
* of ISR and Thread Dispatch critical sections.
|
||||||
|
*
|
||||||
|
* _ISR_SMP_Enter returns the interrupt nest level. If we are
|
||||||
|
* outermost interrupt, then we need to switch stacks.
|
||||||
|
*/
|
||||||
|
mov %sp, %fp
|
||||||
|
call SYM(_ISR_SMP_Enter), 0
|
||||||
|
nop ! delay slot
|
||||||
|
cmp %o0, 0
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* On single core system, we can directly use variables.
|
||||||
|
*
|
||||||
|
* Increment ISR nest level and Thread dispatch disable level.
|
||||||
|
*
|
||||||
|
* Register usage for this section:
|
||||||
|
*
|
||||||
|
* l4 = _Thread_Dispatch_disable_level pointer
|
||||||
|
* l5 = _ISR_Nest_level pointer
|
||||||
|
* l6 = _Thread_Dispatch_disable_level value
|
||||||
|
* l7 = _ISR_Nest_level value
|
||||||
|
*
|
||||||
|
* NOTE: It is assumed that l4 - l7 will be preserved until the ISR
|
||||||
|
* nest and thread dispatch disable levels are unnested.
|
||||||
|
*/
|
||||||
|
sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
|
||||||
|
ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
|
||||||
|
|
||||||
|
sethi %hi(_Per_CPU_Information), %l5
|
||||||
|
add %l5, %lo(_Per_CPU_Information), %l5
|
||||||
|
|
||||||
|
ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
|
||||||
|
|
||||||
|
add %l6, 1, %l6
|
||||||
|
st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
|
||||||
|
|
||||||
|
add %l7, 1, %l7
|
||||||
|
st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If ISR nest level was zero (now 1), then switch stack.
|
||||||
|
*/
|
||||||
|
mov %sp, %fp
|
||||||
|
subcc %l7, 1, %l7 ! outermost interrupt handler?
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do we need to switch to the interrupt stack?
|
||||||
|
*/
|
||||||
|
bnz dont_switch_stacks ! No, then do not switch stacks
|
||||||
|
ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
|
||||||
|
|
||||||
|
dont_switch_stacks:
|
||||||
|
/*
|
||||||
|
* Make sure we have a place on the stack for the window overflow
|
||||||
|
* trap handler to write into. At this point it is safe to
|
||||||
|
* enable traps again.
|
||||||
|
*/
|
||||||
|
|
||||||
|
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Vector to user's handler.
|
* Vector to user's handler.
|
||||||
*
|
*
|
||||||
@@ -336,6 +353,17 @@ dont_fix_pil2:
|
|||||||
call %g4, 0
|
call %g4, 0
|
||||||
nop ! delay slot
|
nop ! delay slot
|
||||||
|
|
||||||
|
#if defined(RTEMS_SMP)
|
||||||
|
call SYM(_ISR_SMP_Exit), 0
|
||||||
|
nop ! delay slot
|
||||||
|
cmp %o0, 0
|
||||||
|
bz simple_return
|
||||||
|
#else
|
||||||
|
!sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
|
||||||
|
!ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
|
||||||
|
!ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Redisable traps so we can finish up the interrupt processing.
|
* Redisable traps so we can finish up the interrupt processing.
|
||||||
* This is a VERY conservative place to do this.
|
* This is a VERY conservative place to do this.
|
||||||
@@ -346,6 +374,7 @@ dont_fix_pil2:
|
|||||||
mov %l0, %psr ! **** DISABLE TRAPS ****
|
mov %l0, %psr ! **** DISABLE TRAPS ****
|
||||||
nop; nop; nop
|
nop; nop; nop
|
||||||
|
|
||||||
|
#if !defined(RTEMS_SMP)
|
||||||
/*
|
/*
|
||||||
* Decrement ISR nest level and Thread dispatch disable level.
|
* Decrement ISR nest level and Thread dispatch disable level.
|
||||||
*
|
*
|
||||||
@@ -389,7 +418,7 @@ dont_fix_pil2:
|
|||||||
|
|
||||||
orcc %l5, %g0, %g0 ! Is thread switch necessary?
|
orcc %l5, %g0, %g0 ! Is thread switch necessary?
|
||||||
bz simple_return ! No, then return
|
bz simple_return ! No, then return
|
||||||
|
#endif
|
||||||
/*
|
/*
|
||||||
* Invoke interrupt dispatcher.
|
* Invoke interrupt dispatcher.
|
||||||
*/
|
*/
|
||||||
|
|||||||
Reference in New Issue
Block a user