forked from Imagelibrary/rtems
sparc: Make _CPU_ISR_Dispatch_disable per-CPU
This variable must be available for each processor in the system.
This commit is contained in:
@@ -20,7 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <rtems/asm.h>
|
#include <rtems/asm.h>
|
||||||
#include <rtems/system.h>
|
#include <rtems/score/percpu.h>
|
||||||
#include <bspopts.h>
|
#include <bspopts.h>
|
||||||
|
|
||||||
.macro GET_SELF_CPU_CONTROL REG, TMP
|
.macro GET_SELF_CPU_CONTROL REG, TMP
|
||||||
@@ -59,19 +59,11 @@ SYM(_CPU_Context_switch):
|
|||||||
std %g4, [%o0 + G4_OFFSET]
|
std %g4, [%o0 + G4_OFFSET]
|
||||||
std %g6, [%o0 + G6_OFFSET]
|
std %g6, [%o0 + G6_OFFSET]
|
||||||
|
|
||||||
! load the address of the ISR stack nesting prevention flag
|
|
||||||
sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %g2
|
|
||||||
ld [%g2 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %g2
|
|
||||||
! save it a bit later so we do not waste a couple of cycles
|
|
||||||
|
|
||||||
std %l0, [%o0 + L0_OFFSET] ! save the local registers
|
std %l0, [%o0 + L0_OFFSET] ! save the local registers
|
||||||
std %l2, [%o0 + L2_OFFSET]
|
std %l2, [%o0 + L2_OFFSET]
|
||||||
std %l4, [%o0 + L4_OFFSET]
|
std %l4, [%o0 + L4_OFFSET]
|
||||||
std %l6, [%o0 + L6_OFFSET]
|
std %l6, [%o0 + L6_OFFSET]
|
||||||
|
|
||||||
! Now actually save ISR stack nesting prevention flag
|
|
||||||
st %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
|
|
||||||
|
|
||||||
std %i0, [%o0 + I0_OFFSET] ! save the input registers
|
std %i0, [%o0 + I0_OFFSET] ! save the input registers
|
||||||
std %i2, [%o0 + I2_OFFSET]
|
std %i2, [%o0 + I2_OFFSET]
|
||||||
std %i4, [%o0 + I4_OFFSET]
|
std %i4, [%o0 + I4_OFFSET]
|
||||||
@@ -82,13 +74,24 @@ SYM(_CPU_Context_switch):
|
|||||||
std %o4, [%o0 + O4_OFFSET]
|
std %o4, [%o0 + O4_OFFSET]
|
||||||
std %o6, [%o0 + O6_SP_OFFSET]
|
std %o6, [%o0 + O6_SP_OFFSET]
|
||||||
|
|
||||||
|
! o3 = self per-CPU control
|
||||||
|
GET_SELF_CPU_CONTROL %o3, %o4
|
||||||
|
|
||||||
|
! load the ISR stack nesting prevention flag
|
||||||
|
ld [%o3 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %o4
|
||||||
|
! save it a bit later so we do not waste a couple of cycles
|
||||||
|
|
||||||
rd %psr, %o2
|
rd %psr, %o2
|
||||||
st %o2, [%o0 + PSR_OFFSET] ! save status register
|
st %o2, [%o0 + PSR_OFFSET] ! save status register
|
||||||
|
|
||||||
|
! Now actually save ISR stack nesting prevention flag
|
||||||
|
st %o4, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is entered from _CPU_Context_restore with:
|
* This is entered from _CPU_Context_restore with:
|
||||||
* o1 = context to restore
|
* o1 = context to restore
|
||||||
* o2 = psr
|
* o2 = psr
|
||||||
|
* o3 = self per-CPU control
|
||||||
*/
|
*/
|
||||||
|
|
||||||
PUBLIC(_CPU_Context_restore_heir)
|
PUBLIC(_CPU_Context_restore_heir)
|
||||||
@@ -196,7 +199,6 @@ done_flushing:
|
|||||||
|
|
||||||
! Load thread specific ISR dispatch prevention flag
|
! Load thread specific ISR dispatch prevention flag
|
||||||
ld [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
|
ld [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
|
||||||
sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %o3
|
|
||||||
! Store it to memory later to use the cycles
|
! Store it to memory later to use the cycles
|
||||||
|
|
||||||
ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers
|
ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers
|
||||||
@@ -205,7 +207,7 @@ done_flushing:
|
|||||||
ldd [%o1 + L6_OFFSET], %l6
|
ldd [%o1 + L6_OFFSET], %l6
|
||||||
|
|
||||||
! Now restore thread specific ISR dispatch prevention flag
|
! Now restore thread specific ISR dispatch prevention flag
|
||||||
st %o2,[%o3 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
|
st %o2, [%o3 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
|
||||||
|
|
||||||
ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers
|
ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers
|
||||||
ldd [%o1 + I2_OFFSET], %i2
|
ldd [%o1 + I2_OFFSET], %i2
|
||||||
@@ -235,6 +237,7 @@ done_flushing:
|
|||||||
SYM(_CPU_Context_restore):
|
SYM(_CPU_Context_restore):
|
||||||
save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
|
save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
|
||||||
rd %psr, %o2
|
rd %psr, %o2
|
||||||
|
GET_SELF_CPU_CONTROL %o3, %o4
|
||||||
ba SYM(_CPU_Context_restore_heir)
|
ba SYM(_CPU_Context_restore_heir)
|
||||||
mov %i0, %o1 ! in the delay slot
|
mov %i0, %o1 ! in the delay slot
|
||||||
.align 4
|
.align 4
|
||||||
@@ -591,11 +594,10 @@ dont_fix_pil2:
|
|||||||
|
|
||||||
orcc %l6, %g0, %g0 ! Is dispatching disabled?
|
orcc %l6, %g0, %g0 ! Is dispatching disabled?
|
||||||
bnz simple_return ! Yes, then do a "simple" exit
|
bnz simple_return ! Yes, then do a "simple" exit
|
||||||
! NOTE: Use the delay slot
|
nop
|
||||||
sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
|
|
||||||
|
|
||||||
! Are we dispatching from a previous ISR in the interrupted thread?
|
! Are we dispatching from a previous ISR in the interrupted thread?
|
||||||
ld [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
|
ld [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %l7
|
||||||
orcc %l7, %g0, %g0 ! Is this thread already doing an ISR?
|
orcc %l7, %g0, %g0 ! Is this thread already doing an ISR?
|
||||||
bnz simple_return ! Yes, then do a "simple" exit
|
bnz simple_return ! Yes, then do a "simple" exit
|
||||||
nop
|
nop
|
||||||
@@ -606,9 +608,9 @@ dont_fix_pil2:
|
|||||||
* return to the interrupt dispatcher.
|
* return to the interrupt dispatcher.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
|
ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l6
|
||||||
|
|
||||||
orcc %l5, %g0, %g0 ! Is thread switch necessary?
|
orcc %l6, %g0, %g0 ! Is thread switch necessary?
|
||||||
bz simple_return ! no, then do a simple return
|
bz simple_return ! no, then do a simple return
|
||||||
nop
|
nop
|
||||||
|
|
||||||
@@ -616,12 +618,9 @@ dont_fix_pil2:
|
|||||||
* Invoke interrupt dispatcher.
|
* Invoke interrupt dispatcher.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
PUBLIC(_ISR_Dispatch)
|
|
||||||
SYM(_ISR_Dispatch):
|
|
||||||
! Set ISR dispatch nesting prevention flag
|
! Set ISR dispatch nesting prevention flag
|
||||||
mov 1,%l6
|
mov 1,%l6
|
||||||
sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
|
st %l6, [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
|
||||||
st %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following subtract should get us back on the interrupted
|
* The following subtract should get us back on the interrupted
|
||||||
@@ -676,8 +675,7 @@ dispatchAgain:
|
|||||||
allow_nest_again:
|
allow_nest_again:
|
||||||
|
|
||||||
! Zero out ISR stack nesting prevention flag
|
! Zero out ISR stack nesting prevention flag
|
||||||
sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
|
st %g0, [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
|
||||||
st %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The CWP in place at this point may be different from
|
* The CWP in place at this point may be different from
|
||||||
|
|||||||
@@ -19,8 +19,15 @@
|
|||||||
|
|
||||||
#include <rtems/system.h>
|
#include <rtems/system.h>
|
||||||
#include <rtems/score/isr.h>
|
#include <rtems/score/isr.h>
|
||||||
|
#include <rtems/score/percpu.h>
|
||||||
#include <rtems/rtems/cache.h>
|
#include <rtems/rtems/cache.h>
|
||||||
|
|
||||||
|
RTEMS_STATIC_ASSERT(
|
||||||
|
offsetof( Per_CPU_Control, cpu_per_cpu.isr_dispatch_disable)
|
||||||
|
== SPARC_PER_CPU_ISR_DISPATCH_DISABLE,
|
||||||
|
SPARC_PER_CPU_ISR_DISPATCH_DISABLE
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This initializes the set of opcodes placed in each trap
|
* This initializes the set of opcodes placed in each trap
|
||||||
* table entry. The routine which installs a handler is responsible
|
* table entry. The routine which installs a handler is responsible
|
||||||
@@ -65,13 +72,6 @@ void _CPU_Initialize(void)
|
|||||||
pointer = &_CPU_Null_fp_context;
|
pointer = &_CPU_Null_fp_context;
|
||||||
_CPU_Context_save_fp( &pointer );
|
_CPU_Context_save_fp( &pointer );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Since no tasks have been created yet and no interrupts have occurred,
|
|
||||||
* there is no way that the currently executing thread can have an
|
|
||||||
* _ISR_Dispatch stack frame on its stack.
|
|
||||||
*/
|
|
||||||
_CPU_ISR_Dispatch_disable = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t _CPU_ISR_Get_level( void )
|
uint32_t _CPU_ISR_Get_level( void )
|
||||||
|
|||||||
@@ -357,7 +357,13 @@ typedef struct {
|
|||||||
/** This defines the size of the minimum stack frame. */
|
/** This defines the size of the minimum stack frame. */
|
||||||
#define CPU_MINIMUM_STACK_FRAME_SIZE 0x60
|
#define CPU_MINIMUM_STACK_FRAME_SIZE 0x60
|
||||||
|
|
||||||
#define CPU_PER_CPU_CONTROL_SIZE 0
|
#define CPU_PER_CPU_CONTROL_SIZE 4
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Offset of the CPU_Per_CPU_control::isr_dispatch_disable field
|
||||||
|
* relative to the Per_CPU_Control begin.
|
||||||
|
*/
|
||||||
|
#define SPARC_PER_CPU_ISR_DISPATCH_DISABLE 0
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @defgroup Contexts SPARC Context Structures
|
* @defgroup Contexts SPARC Context Structures
|
||||||
@@ -383,7 +389,13 @@ typedef struct {
|
|||||||
#ifndef ASM
|
#ifndef ASM
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
/* There is no CPU specific per-CPU state */
|
/**
|
||||||
|
* This flag is context switched with each thread. It indicates
|
||||||
|
* that THIS thread has an _ISR_Dispatch stack frame on its stack.
|
||||||
|
* By using this flag, we can avoid nesting more interrupt dispatching
|
||||||
|
* attempts on a previously interrupted thread's stack.
|
||||||
|
*/
|
||||||
|
uint32_t isr_dispatch_disable;
|
||||||
} CPU_Per_CPU_control;
|
} CPU_Per_CPU_control;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -768,14 +780,6 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
|
SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
|
||||||
|
|
||||||
/**
|
|
||||||
* This flag is context switched with each thread. It indicates
|
|
||||||
* that THIS thread has an _ISR_Dispatch stack frame on its stack.
|
|
||||||
* By using this flag, we can avoid nesting more interrupt dispatching
|
|
||||||
* attempts on a previously interrupted thread's stack.
|
|
||||||
*/
|
|
||||||
SCORE_EXTERN volatile uint32_t _CPU_ISR_Dispatch_disable;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The following type defines an entry in the SPARC's trap table.
|
* The following type defines an entry in the SPARC's trap table.
|
||||||
*
|
*
|
||||||
|
|||||||
Reference in New Issue
Block a user