forked from Imagelibrary/rtems
score: Allow interrupts during thread dispatch
Use a processor-specific interrupt frame during context switches in case the executing thread is longer executes on the processor and the heir thread is about to start execution. During this period we must not use a thread stack for interrupt processing. Update #2809.
This commit is contained in:
@@ -178,17 +178,22 @@ done_flushing:
|
|||||||
mov %g4, %wim
|
mov %g4, %wim
|
||||||
|
|
||||||
#if defined(RTEMS_SMP)
|
#if defined(RTEMS_SMP)
|
||||||
! The executing context no longer executes on this processor
|
/*
|
||||||
|
* The executing thread no longer executes on this processor. Switch
|
||||||
|
* the stack to the temporary interrupt stack of this processor. Mark
|
||||||
|
* the context of the executing thread as not executing.
|
||||||
|
*/
|
||||||
|
add %g6, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE, %sp
|
||||||
st %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
|
st %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
|
||||||
|
|
||||||
! Try to update the is executing indicator of the heir context
|
! Try to update the is executing indicator of the heir context
|
||||||
mov 1, %g1
|
mov 1, %g1
|
||||||
|
|
||||||
try_update_is_executing:
|
.Ltry_update_is_executing:
|
||||||
|
|
||||||
swap [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
|
swap [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
|
||||||
cmp %g1, 0
|
cmp %g1, 0
|
||||||
bne check_is_executing
|
bne .Lcheck_is_executing
|
||||||
|
|
||||||
! The next load is in a delay slot, which is all right
|
! The next load is in a delay slot, which is all right
|
||||||
#endif
|
#endif
|
||||||
@@ -225,12 +230,12 @@ try_update_is_executing:
|
|||||||
nop ! delay slot
|
nop ! delay slot
|
||||||
|
|
||||||
#if defined(RTEMS_SMP)
|
#if defined(RTEMS_SMP)
|
||||||
check_is_executing:
|
.Lcheck_is_executing:
|
||||||
|
|
||||||
! Check the is executing indicator of the heir context
|
! Check the is executing indicator of the heir context
|
||||||
ld [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
|
ld [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
|
||||||
cmp %g1, 0
|
cmp %g1, 0
|
||||||
beq try_update_is_executing
|
beq .Ltry_update_is_executing
|
||||||
mov 1, %g1
|
mov 1, %g1
|
||||||
|
|
||||||
! We may have a new heir
|
! We may have a new heir
|
||||||
@@ -242,7 +247,7 @@ check_is_executing:
|
|||||||
! Update the executing only if necessary to avoid cache line
|
! Update the executing only if necessary to avoid cache line
|
||||||
! monopolization.
|
! monopolization.
|
||||||
cmp %g2, %g4
|
cmp %g2, %g4
|
||||||
beq try_update_is_executing
|
beq .Ltry_update_is_executing
|
||||||
mov 1, %g1
|
mov 1, %g1
|
||||||
|
|
||||||
! Calculate the heir context pointer
|
! Calculate the heir context pointer
|
||||||
@@ -252,7 +257,7 @@ check_is_executing:
|
|||||||
! Update the executing
|
! Update the executing
|
||||||
st %g4, [%g6 + PER_CPU_OFFSET_EXECUTING]
|
st %g4, [%g6 + PER_CPU_OFFSET_EXECUTING]
|
||||||
|
|
||||||
ba try_update_is_executing
|
ba .Ltry_update_is_executing
|
||||||
mov 1, %g1
|
mov 1, %g1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -402,23 +402,30 @@ PROC (_CPU_Context_switch):
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef RTEMS_SMP
|
#ifdef RTEMS_SMP
|
||||||
/* The executing context no longer executes on this processor */
|
/*
|
||||||
|
* The executing thread no longer executes on this processor. Switch
|
||||||
|
* the stack to the temporary interrupt stack of this processor. Mark
|
||||||
|
* the context of the executing thread as not executing.
|
||||||
|
*/
|
||||||
msync
|
msync
|
||||||
|
|
||||||
|
GET_SELF_CPU_CONTROL r12
|
||||||
|
addi r1, r12, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE
|
||||||
li r6, 0
|
li r6, 0
|
||||||
stw r6, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
|
stw r6, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
|
||||||
|
|
||||||
check_is_executing:
|
.Lcheck_is_executing:
|
||||||
|
|
||||||
/* Check the is executing indicator of the heir context */
|
/* Check the is executing indicator of the heir context */
|
||||||
addi r6, r5, PPC_CONTEXT_OFFSET_IS_EXECUTING
|
addi r6, r5, PPC_CONTEXT_OFFSET_IS_EXECUTING
|
||||||
lwarx r7, r0, r6
|
lwarx r7, r0, r6
|
||||||
cmpwi r7, 0
|
cmpwi r7, 0
|
||||||
bne get_potential_new_heir
|
bne .Lget_potential_new_heir
|
||||||
|
|
||||||
/* Try to update the is executing indicator of the heir context */
|
/* Try to update the is executing indicator of the heir context */
|
||||||
li r7, 1
|
li r7, 1
|
||||||
stwcx. r7, r0, r6
|
stwcx. r7, r0, r6
|
||||||
bne get_potential_new_heir
|
bne .Lget_potential_new_heir
|
||||||
isync
|
isync
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -537,22 +544,20 @@ PROC (_CPU_Context_restore):
|
|||||||
b restore_context
|
b restore_context
|
||||||
|
|
||||||
#ifdef RTEMS_SMP
|
#ifdef RTEMS_SMP
|
||||||
get_potential_new_heir:
|
.Lget_potential_new_heir:
|
||||||
|
|
||||||
GET_SELF_CPU_CONTROL r6
|
|
||||||
|
|
||||||
/* We may have a new heir */
|
/* We may have a new heir */
|
||||||
|
|
||||||
/* Read the executing and heir */
|
/* Read the executing and heir */
|
||||||
lwz r7, PER_CPU_OFFSET_EXECUTING(r6)
|
lwz r7, PER_CPU_OFFSET_EXECUTING(r12)
|
||||||
lwz r8, PER_CPU_OFFSET_HEIR(r6)
|
lwz r8, PER_CPU_OFFSET_HEIR(r12)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update the executing only if necessary to avoid cache line
|
* Update the executing only if necessary to avoid cache line
|
||||||
* monopolization.
|
* monopolization.
|
||||||
*/
|
*/
|
||||||
cmpw r7, r8
|
cmpw r7, r8
|
||||||
beq check_is_executing
|
beq .Lcheck_is_executing
|
||||||
|
|
||||||
/* Calculate the heir context pointer */
|
/* Calculate the heir context pointer */
|
||||||
sub r7, r4, r7
|
sub r7, r4, r7
|
||||||
@@ -560,7 +565,7 @@ get_potential_new_heir:
|
|||||||
clrrwi r5, r4, PPC_DEFAULT_CACHE_LINE_POWER
|
clrrwi r5, r4, PPC_DEFAULT_CACHE_LINE_POWER
|
||||||
|
|
||||||
/* Update the executing */
|
/* Update the executing */
|
||||||
stw r8, PER_CPU_OFFSET_EXECUTING(r6)
|
stw r8, PER_CPU_OFFSET_EXECUTING(r12)
|
||||||
|
|
||||||
b check_is_executing
|
b .Lcheck_is_executing
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -69,8 +69,14 @@ DEFINE_FUNCTION_ARM(_CPU_Context_switch)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef RTEMS_SMP
|
#ifdef RTEMS_SMP
|
||||||
/* The executing context no longer executes on this processor */
|
/*
|
||||||
|
* The executing thread no longer executes on this processor. Switch
|
||||||
|
* the stack to the temporary interrupt stack of this processor. Mark
|
||||||
|
* the context of the executing thread as not executing.
|
||||||
|
*/
|
||||||
dmb
|
dmb
|
||||||
|
GET_SELF_CPU_CONTROL r2
|
||||||
|
add sp, r2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
|
||||||
mov r3, #0
|
mov r3, #0
|
||||||
strb r3, [r0, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
|
strb r3, [r0, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
|
||||||
|
|
||||||
@@ -128,8 +134,6 @@ DEFINE_FUNCTION_ARM(_CPU_Context_restore)
|
|||||||
#ifdef RTEMS_SMP
|
#ifdef RTEMS_SMP
|
||||||
.L_get_potential_new_heir:
|
.L_get_potential_new_heir:
|
||||||
|
|
||||||
GET_SELF_CPU_CONTROL r2
|
|
||||||
|
|
||||||
/* We may have a new heir */
|
/* We may have a new heir */
|
||||||
|
|
||||||
/* Read the executing and heir */
|
/* Read the executing and heir */
|
||||||
|
|||||||
@@ -261,8 +261,16 @@ static inline void _User_extensions_Thread_switch(
|
|||||||
const Chain_Node *node = _Chain_Immutable_first( chain );
|
const Chain_Node *node = _Chain_Immutable_first( chain );
|
||||||
|
|
||||||
if ( node != tail ) {
|
if ( node != tail ) {
|
||||||
Per_CPU_Control *cpu_self = _Per_CPU_Get();
|
Per_CPU_Control *cpu_self;
|
||||||
|
#if defined(RTEMS_SMP)
|
||||||
|
ISR_Level level;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
cpu_self = _Per_CPU_Get();
|
||||||
|
|
||||||
|
#if defined(RTEMS_SMP)
|
||||||
|
_ISR_Local_disable( level );
|
||||||
|
#endif
|
||||||
_Per_CPU_Acquire( cpu_self );
|
_Per_CPU_Acquire( cpu_self );
|
||||||
|
|
||||||
while ( node != tail ) {
|
while ( node != tail ) {
|
||||||
@@ -275,6 +283,9 @@ static inline void _User_extensions_Thread_switch(
|
|||||||
}
|
}
|
||||||
|
|
||||||
_Per_CPU_Release( cpu_self );
|
_Per_CPU_Release( cpu_self );
|
||||||
|
#if defined(RTEMS_SMP)
|
||||||
|
_ISR_Local_enable( level );
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -174,14 +174,7 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
|
|||||||
if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
|
if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
|
||||||
heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
|
heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP the complete context switch must be atomic with respect to one
|
|
||||||
* processor. See also _Thread_Handler() since _Context_switch() may branch
|
|
||||||
* to this function.
|
|
||||||
*/
|
|
||||||
#if !defined( RTEMS_SMP )
|
|
||||||
_ISR_Local_enable( level );
|
_ISR_Local_enable( level );
|
||||||
#endif
|
|
||||||
|
|
||||||
_User_extensions_Thread_switch( executing, heir );
|
_User_extensions_Thread_switch( executing, heir );
|
||||||
_Thread_Save_fp( executing );
|
_Thread_Save_fp( executing );
|
||||||
@@ -195,16 +188,8 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
|
|||||||
*/
|
*/
|
||||||
cpu_self = _Per_CPU_Get();
|
cpu_self = _Per_CPU_Get();
|
||||||
|
|
||||||
#if !defined( RTEMS_SMP )
|
|
||||||
_ISR_Local_disable( level );
|
_ISR_Local_disable( level );
|
||||||
#endif
|
} while ( cpu_self->dispatch_necessary );
|
||||||
} while (
|
|
||||||
#if defined( RTEMS_SMP )
|
|
||||||
false
|
|
||||||
#else
|
|
||||||
cpu_self->dispatch_necessary
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
post_switch:
|
post_switch:
|
||||||
_Assert( cpu_self->thread_dispatch_disable_level == 1 );
|
_Assert( cpu_self->thread_dispatch_disable_level == 1 );
|
||||||
|
|||||||
@@ -38,9 +38,6 @@ void _Thread_Handler( void )
|
|||||||
_Context_Initialization_at_thread_begin();
|
_Context_Initialization_at_thread_begin();
|
||||||
executing = _Thread_Executing;
|
executing = _Thread_Executing;
|
||||||
|
|
||||||
/* On SMP we enter _Thread_Handler() with interrupts disabled */
|
|
||||||
_SMP_Assert( _ISR_Get_level() != 0 );
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* have to put level into a register for those cpu's that use
|
* have to put level into a register for those cpu's that use
|
||||||
* inline asm here
|
* inline asm here
|
||||||
|
|||||||
@@ -25,8 +25,6 @@ void _Thread_Load_environment(
|
|||||||
Thread_Control *the_thread
|
Thread_Control *the_thread
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
uint32_t isr_level;
|
|
||||||
|
|
||||||
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
|
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
|
||||||
if ( the_thread->Start.fp_context ) {
|
if ( the_thread->Start.fp_context ) {
|
||||||
the_thread->fp_context = the_thread->Start.fp_context;
|
the_thread->fp_context = the_thread->Start.fp_context;
|
||||||
@@ -38,25 +36,13 @@ void _Thread_Load_environment(
|
|||||||
the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
|
the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
|
||||||
the_thread->budget_callout = the_thread->Start.budget_callout;
|
the_thread->budget_callout = the_thread->Start.budget_callout;
|
||||||
|
|
||||||
#if defined( RTEMS_SMP )
|
|
||||||
/*
|
|
||||||
* On SMP we have to start the threads with interrupts disabled, see also
|
|
||||||
* _Thread_Handler() and _Thread_Dispatch(). In _Thread_Handler() the
|
|
||||||
* _ISR_Set_level() is used to set the desired interrupt state of the thread.
|
|
||||||
*/
|
|
||||||
isr_level = CPU_MODES_INTERRUPT_MASK;
|
|
||||||
#else
|
|
||||||
isr_level = the_thread->Start.isr_level;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
_Context_Initialize(
|
_Context_Initialize(
|
||||||
&the_thread->Registers,
|
&the_thread->Registers,
|
||||||
the_thread->Start.Initial_stack.area,
|
the_thread->Start.Initial_stack.area,
|
||||||
the_thread->Start.Initial_stack.size,
|
the_thread->Start.Initial_stack.size,
|
||||||
isr_level,
|
the_thread->Start.isr_level,
|
||||||
_Thread_Handler,
|
_Thread_Handler,
|
||||||
the_thread->is_fp,
|
the_thread->is_fp,
|
||||||
the_thread->Start.tls_area
|
the_thread->Start.tls_area
|
||||||
);
|
);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user