forked from Imagelibrary/rtems
bsps/sparc: Revert most SMP related changes
As a side-effect the PR2082 is fixed with this and later changes. The commit restores the _ISR_Handler code to the original version in "cpukit/score/sparc/cpu_asm.S" in commit6d42b4c60a. A list of reverted changes follows. commitc236082873Author: Sebastian Huber <sebastian.huber@embedded-brains.de> Date: Tue Jul 30 15:54:53 2013 +0200 smp: Provide cache optimized Per_CPU_Control Delete _Per_CPU_Information_p. This commit was completely reverted. commite517714b7cAuthor: Jennifer Averett <jennifer.averett@oarcorp.com> Date: Tue Feb 26 12:31:23 2013 -0600 sparc: Remove dead code that was leftover from SMP development. This commit was completely reverted. commit47a61aa16fAuthor: Joel Sherrill <joel.sherrill@OARcorp.com> Date: Fri Oct 7 14:35:03 2011 +0000 2011-10-07 Daniel Hellstrom <daniel@gaisler.com> PR 1933/cpukit * shared/irq_asm.S: From code inspection I have found the following issues (most SMP), and some improvements in irq_asm.S. I would need a long test with interrupts to verify the interrupt handler better, however I can not see that these patches hurt. Please see comment per hunk below, One should go through the file to indent delay-slots correctly, I have fixed some in the patch areas. An extra space is added in front of delay slots to indicate a delay slot. This commit was completely reverted. commit0bd3f7e5d1Author: Jennifer Averett <Jennifer.Averett@OARcorp.com> Date: Thu Jul 28 17:33:07 2011 +0000 2011-07-28 Jennifer Averett <Jennifer.Averett@OARcorp.com> PR 1801 * shared/irq_asm.S: Modifications to synch the sparc with the smp working tree. This commit was completely reverted. commit5d69cd33e9Author: Joel Sherrill <joel.sherrill@OARcorp.com> Date: Wed Mar 16 20:05:30 2011 +0000 2011-03-16 Jennifer Averett <jennifer.averett@OARcorp.com> PR 1729/cpukit * shared/irq_asm.S: New file. The parts modifying the original code of _ISR_Handler were reverted. Only the content move remains.
This commit is contained in:
@@ -21,7 +21,6 @@
|
||||
|
||||
#include <rtems/asm.h>
|
||||
#include <rtems/system.h>
|
||||
#include <bspopts.h>
|
||||
|
||||
/*
|
||||
* void _ISR_Handler()
|
||||
@@ -161,6 +160,54 @@ save_isf:
|
||||
|
||||
mov %sp, %o1 ! 2nd arg to ISR Handler
|
||||
|
||||
/*
|
||||
* Increment ISR nest level and Thread dispatch disable level.
|
||||
*
|
||||
* Register usage for this section:
|
||||
*
|
||||
* l4 = _Thread_Dispatch_disable_level pointer
|
||||
* l5 = per cpu info pointer
|
||||
* l6 = _Thread_Dispatch_disable_level value
|
||||
* l7 = _ISR_Nest_level value
|
||||
*
|
||||
* NOTE: It is assumed that l4 - l7 will be preserved until the ISR
|
||||
* nest and thread dispatch disable levels are unnested.
|
||||
*/
|
||||
|
||||
sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
|
||||
ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
|
||||
|
||||
sethi %hi(_Per_CPU_Information), %l5
|
||||
add %l5, %lo(_Per_CPU_Information), %l5
|
||||
|
||||
ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
|
||||
|
||||
add %l6, 1, %l6
|
||||
st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
|
||||
|
||||
add %l7, 1, %l7
|
||||
st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
|
||||
|
||||
/*
|
||||
* If ISR nest level was zero (now 1), then switch stack.
|
||||
*/
|
||||
|
||||
mov %sp, %fp
|
||||
subcc %l7, 1, %l7 ! outermost interrupt handler?
|
||||
bnz dont_switch_stacks ! No, then do not switch stacks
|
||||
|
||||
nop
|
||||
ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
|
||||
|
||||
dont_switch_stacks:
|
||||
/*
|
||||
* Make sure we have a place on the stack for the window overflow
|
||||
* trap handler to write into. At this point it is safe to
|
||||
* enable traps again.
|
||||
*/
|
||||
|
||||
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
|
||||
|
||||
/*
|
||||
* Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
|
||||
* set the PIL in the %psr to mask off interrupts with lower priority.
|
||||
@@ -251,85 +298,6 @@ pil_fixed:
|
||||
wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
|
||||
dont_fix_pil2:
|
||||
|
||||
PUBLIC(_ISR_PER_CPU)
|
||||
SYM(_ISR_PER_CPU):
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
sethi %hi(_Per_CPU_Information), %l5
|
||||
add %l5, %lo(_Per_CPU_Information), %l5
|
||||
#if BSP_LEON3_SMP
|
||||
/* LEON3 SMP support */
|
||||
rd %asr17, %l7
|
||||
srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */
|
||||
#else
|
||||
mov 0, %l7
|
||||
nop
|
||||
#endif
|
||||
sll %l7, PER_CPU_CONTROL_SIZE_LOG2, %l7 /* l7 = offset */
|
||||
add %l5, %l7, %l5 /* l5 = pointer to per CPU */
|
||||
|
||||
/*
|
||||
* On multi-core system, we need to use SMP safe versions
|
||||
* of ISR and Thread Dispatch critical sections.
|
||||
*
|
||||
* _ISR_SMP_Enter returns the interrupt nest level. If we are
|
||||
* outermost interrupt, then we need to switch stacks.
|
||||
*/
|
||||
call SYM(_ISR_SMP_Enter), 0
|
||||
mov %sp, %fp ! delay slot
|
||||
cmp %o0, 0
|
||||
#else
|
||||
/*
|
||||
* On single core system, we can directly use variables.
|
||||
*
|
||||
* Increment ISR nest level and Thread dispatch disable level.
|
||||
*
|
||||
* Register usage for this section:
|
||||
*
|
||||
* l4 = _Thread_Dispatch_disable_level pointer
|
||||
* l5 = _ISR_Nest_level pointer
|
||||
* l6 = _Thread_Dispatch_disable_level value
|
||||
* l7 = _ISR_Nest_level value
|
||||
*
|
||||
* NOTE: It is assumed that l4 - l7 will be preserved until the ISR
|
||||
* nest and thread dispatch disable levels are unnested.
|
||||
*/
|
||||
sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
|
||||
ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
|
||||
|
||||
sethi %hi(_Per_CPU_Information), %l5
|
||||
add %l5, %lo(_Per_CPU_Information), %l5
|
||||
|
||||
ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
|
||||
|
||||
add %l6, 1, %l6
|
||||
st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
|
||||
|
||||
add %l7, 1, %l7
|
||||
st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
|
||||
|
||||
/*
|
||||
* If ISR nest level was zero (now 1), then switch stack.
|
||||
*/
|
||||
mov %sp, %fp
|
||||
subcc %l7, 1, %l7 ! outermost interrupt handler?
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do we need to switch to the interrupt stack?
|
||||
*/
|
||||
beq,a dont_switch_stacks ! No, then do not switch stacks
|
||||
ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
|
||||
|
||||
dont_switch_stacks:
|
||||
/*
|
||||
* Make sure we have a place on the stack for the window overflow
|
||||
* trap handler to write into. At this point it is safe to
|
||||
* enable traps again.
|
||||
*/
|
||||
|
||||
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
|
||||
|
||||
/*
|
||||
* Vector to user's handler.
|
||||
*
|
||||
@@ -350,14 +318,6 @@ dont_switch_stacks:
|
||||
call %g4, 0
|
||||
nop ! delay slot
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
call SYM(_ISR_SMP_Exit), 0
|
||||
nop ! delay slot
|
||||
cmp %o0, 0
|
||||
bz simple_return
|
||||
nop
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Redisable traps so we can finish up the interrupt processing.
|
||||
* This is a VERY conservative place to do this.
|
||||
@@ -368,7 +328,6 @@ dont_switch_stacks:
|
||||
mov %l0, %psr ! **** DISABLE TRAPS ****
|
||||
nop; nop; nop
|
||||
|
||||
#if !defined(RTEMS_SMP)
|
||||
/*
|
||||
* Decrement ISR nest level and Thread dispatch disable level.
|
||||
*
|
||||
@@ -401,16 +360,18 @@ dont_switch_stacks:
|
||||
bnz simple_return ! Yes, then do a "simple" exit
|
||||
nop
|
||||
|
||||
|
||||
/*
|
||||
* If a context switch is necessary, then do fudge stack to
|
||||
* return to the interrupt dispatcher.
|
||||
*/
|
||||
|
||||
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
|
||||
|
||||
orcc %l5, %g0, %g0 ! Is thread switch necessary?
|
||||
bz simple_return ! No, then return
|
||||
bz simple_return ! no, then do a simple return
|
||||
nop
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Invoke interrupt dispatcher.
|
||||
*/
|
||||
@@ -457,26 +418,13 @@ isr_dispatch:
|
||||
* _Thread_Dispatch before leaving this ISR Dispatch context.
|
||||
*/
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
sethi %hi(_Per_CPU_Information), %l5
|
||||
add %l5, %lo(_Per_CPU_Information), %l5
|
||||
#if BSP_LEON3_SMP
|
||||
/* LEON3 SMP support */
|
||||
rd %asr17, %l7
|
||||
srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */
|
||||
#else
|
||||
mov 0, %l7
|
||||
nop
|
||||
#endif
|
||||
sll %l7, PER_CPU_CONTROL_SIZE_LOG2, %l7 /* l7 = offset */
|
||||
add %l5, %l7, %l5 /* l5 = pointer to per CPU */
|
||||
#else
|
||||
sethi %hi(_Per_CPU_Information), %l5
|
||||
add %l5, %lo(_Per_CPU_Information), %l5
|
||||
#endif
|
||||
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
|
||||
orcc %l5, %g0, %g0 ! Is thread switch necessary?
|
||||
bz allow_nest_again
|
||||
|
||||
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l7
|
||||
|
||||
orcc %l7, %g0, %g0 ! Is thread switch necesary?
|
||||
bz allow_nest_again ! No, then clear out and return
|
||||
nop
|
||||
|
||||
! Yes, then invoke the dispatcher
|
||||
|
||||
Reference in New Issue
Block a user