bsps/sparc: Revert most SMP related changes

As a side-effect the PR2082 is fixed with this and later changes.

The commit restores the _ISR_Handler code to the original version in
"cpukit/score/sparc/cpu_asm.S" in commit
6d42b4c60a.  A list of reverted changes
follows.

commit c236082873
Author: Sebastian Huber <sebastian.huber@embedded-brains.de>
Date:   Tue Jul 30 15:54:53 2013 +0200

    smp: Provide cache optimized Per_CPU_Control

    Delete _Per_CPU_Information_p.

This commit was completely reverted.

commit e517714b7c
Author: Jennifer Averett <jennifer.averett@oarcorp.com>
Date:   Tue Feb 26 12:31:23 2013 -0600

    sparc: Remove dead code that was leftover from SMP development.

This commit was completely reverted.

commit 47a61aa16f
Author: Joel Sherrill <joel.sherrill@OARcorp.com>
Date:   Fri Oct 7 14:35:03 2011 +0000

    2011-10-07	Daniel Hellstrom <daniel@gaisler.com>

    	PR 1933/cpukit
    	* shared/irq_asm.S: From code inspection I have found the following
    	issues (most SMP), and some improvements in irq_asm.S. I would need a
    	long test with interrupts to verify the interrupt handler better,
    	however I can not see that these patches hurt. Please see comment per
    	hunk below, One should go through the file to indent delay-slots
    	correctly, I have fixed some in the patch areas. An extra space is
    	added in front of delay slots to indicate a delay slot.

This commit was completely reverted.

commit 0bd3f7e5d1
Author: Jennifer Averett <Jennifer.Averett@OARcorp.com>
Date:   Thu Jul 28 17:33:07 2011 +0000

    2011-07-28	Jennifer Averett <Jennifer.Averett@OARcorp.com>

    	PR 1801
    	* shared/irq_asm.S: Modifications to synch the sparc with the smp
    	working tree.

This commit was completely reverted.

commit 5d69cd33e9
Author: Joel Sherrill <joel.sherrill@OARcorp.com>
Date:   Wed Mar 16 20:05:30 2011 +0000

    2011-03-16	Jennifer Averett <jennifer.averett@OARcorp.com>

    	PR 1729/cpukit
    	* shared/irq_asm.S: New file.

The parts modifying the original code of _ISR_Handler were reverted.
Only the content move remains.
This commit is contained in:
Sebastian Huber
2013-08-01 11:35:16 +02:00
parent 07567903d2
commit 849bb7a332

View File

@@ -21,7 +21,6 @@
#include <rtems/asm.h> #include <rtems/asm.h>
#include <rtems/system.h> #include <rtems/system.h>
#include <bspopts.h>
/* /*
* void _ISR_Handler() * void _ISR_Handler()
@@ -161,6 +160,54 @@ save_isf:
mov %sp, %o1 ! 2nd arg to ISR Handler mov %sp, %o1 ! 2nd arg to ISR Handler
/*
* Increment ISR nest level and Thread dispatch disable level.
*
* Register usage for this section:
*
* l4 = _Thread_Dispatch_disable_level pointer
* l5 = per cpu info pointer
* l6 = _Thread_Dispatch_disable_level value
* l7 = _ISR_Nest_level value
*
* NOTE: It is assumed that l4 - l7 will be preserved until the ISR
* nest and thread dispatch disable levels are unnested.
*/
sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
sethi %hi(_Per_CPU_Information), %l5
add %l5, %lo(_Per_CPU_Information), %l5
ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
add %l6, 1, %l6
st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
add %l7, 1, %l7
st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
/*
* If ISR nest level was zero (now 1), then switch stack.
*/
mov %sp, %fp
subcc %l7, 1, %l7 ! outermost interrupt handler?
bnz dont_switch_stacks ! No, then do not switch stacks
nop
ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
dont_switch_stacks:
/*
* Make sure we have a place on the stack for the window overflow
* trap handler to write into. At this point it is safe to
* enable traps again.
*/
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
/* /*
* Check if we have an external interrupt (trap 0x11 - 0x1f). If so, * Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
* set the PIL in the %psr to mask off interrupts with lower priority. * set the PIL in the %psr to mask off interrupts with lower priority.
@@ -251,85 +298,6 @@ pil_fixed:
wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
dont_fix_pil2: dont_fix_pil2:
PUBLIC(_ISR_PER_CPU)
SYM(_ISR_PER_CPU):
#if defined(RTEMS_SMP)
sethi %hi(_Per_CPU_Information), %l5
add %l5, %lo(_Per_CPU_Information), %l5
#if BSP_LEON3_SMP
/* LEON3 SMP support */
rd %asr17, %l7
srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */
#else
mov 0, %l7
nop
#endif
sll %l7, PER_CPU_CONTROL_SIZE_LOG2, %l7 /* l7 = offset */
add %l5, %l7, %l5 /* l5 = pointer to per CPU */
/*
* On multi-core system, we need to use SMP safe versions
* of ISR and Thread Dispatch critical sections.
*
* _ISR_SMP_Enter returns the interrupt nest level. If we are
* outermost interrupt, then we need to switch stacks.
*/
call SYM(_ISR_SMP_Enter), 0
mov %sp, %fp ! delay slot
cmp %o0, 0
#else
/*
* On single core system, we can directly use variables.
*
* Increment ISR nest level and Thread dispatch disable level.
*
* Register usage for this section:
*
* l4 = _Thread_Dispatch_disable_level pointer
* l5 = _ISR_Nest_level pointer
* l6 = _Thread_Dispatch_disable_level value
* l7 = _ISR_Nest_level value
*
* NOTE: It is assumed that l4 - l7 will be preserved until the ISR
* nest and thread dispatch disable levels are unnested.
*/
sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
sethi %hi(_Per_CPU_Information), %l5
add %l5, %lo(_Per_CPU_Information), %l5
ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
add %l6, 1, %l6
st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
add %l7, 1, %l7
st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
/*
* If ISR nest level was zero (now 1), then switch stack.
*/
mov %sp, %fp
subcc %l7, 1, %l7 ! outermost interrupt handler?
#endif
/*
* Do we need to switch to the interrupt stack?
*/
beq,a dont_switch_stacks ! No, then do not switch stacks
ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
dont_switch_stacks:
/*
* Make sure we have a place on the stack for the window overflow
* trap handler to write into. At this point it is safe to
* enable traps again.
*/
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
/* /*
* Vector to user's handler. * Vector to user's handler.
* *
@@ -350,14 +318,6 @@ dont_switch_stacks:
call %g4, 0 call %g4, 0
nop ! delay slot nop ! delay slot
#if defined(RTEMS_SMP)
call SYM(_ISR_SMP_Exit), 0
nop ! delay slot
cmp %o0, 0
bz simple_return
nop
#endif
/* /*
* Redisable traps so we can finish up the interrupt processing. * Redisable traps so we can finish up the interrupt processing.
* This is a VERY conservative place to do this. * This is a VERY conservative place to do this.
@@ -368,7 +328,6 @@ dont_switch_stacks:
mov %l0, %psr ! **** DISABLE TRAPS **** mov %l0, %psr ! **** DISABLE TRAPS ****
nop; nop; nop nop; nop; nop
#if !defined(RTEMS_SMP)
/* /*
* Decrement ISR nest level and Thread dispatch disable level. * Decrement ISR nest level and Thread dispatch disable level.
* *
@@ -401,16 +360,18 @@ dont_switch_stacks:
bnz simple_return ! Yes, then do a "simple" exit bnz simple_return ! Yes, then do a "simple" exit
nop nop
/* /*
* If a context switch is necessary, then do fudge stack to * If a context switch is necessary, then do fudge stack to
* return to the interrupt dispatcher. * return to the interrupt dispatcher.
*/ */
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5 ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
orcc %l5, %g0, %g0 ! Is thread switch necessary? orcc %l5, %g0, %g0 ! Is thread switch necessary?
bz simple_return ! No, then return bz simple_return ! no, then do a simple return
nop nop
#endif
/* /*
* Invoke interrupt dispatcher. * Invoke interrupt dispatcher.
*/ */
@@ -457,26 +418,13 @@ isr_dispatch:
* _Thread_Dispatch before leaving this ISR Dispatch context. * _Thread_Dispatch before leaving this ISR Dispatch context.
*/ */
#if defined(RTEMS_SMP)
sethi %hi(_Per_CPU_Information), %l5 sethi %hi(_Per_CPU_Information), %l5
add %l5, %lo(_Per_CPU_Information), %l5 add %l5, %lo(_Per_CPU_Information), %l5
#if BSP_LEON3_SMP
/* LEON3 SMP support */ ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l7
rd %asr17, %l7
srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */ orcc %l7, %g0, %g0 ! Is thread switch necesary?
#else bz allow_nest_again ! No, then clear out and return
mov 0, %l7
nop
#endif
sll %l7, PER_CPU_CONTROL_SIZE_LOG2, %l7 /* l7 = offset */
add %l5, %l7, %l5 /* l5 = pointer to per CPU */
#else
sethi %hi(_Per_CPU_Information), %l5
add %l5, %lo(_Per_CPU_Information), %l5
#endif
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
orcc %l5, %g0, %g0 ! Is thread switch necessary?
bz allow_nest_again
nop nop
! Yes, then invoke the dispatcher ! Yes, then invoke the dispatcher