score: Rework ask for help requests

Process ask for help requests on the current processor.  This avoids
using inter-processor interrupts to make the system behaviour a bit more
predictable.

Update #4531.
This commit is contained in:
Sebastian Huber
2021-10-29 09:30:13 +02:00
parent 4d90289e71
commit fc64e837c3
6 changed files with 112 additions and 102 deletions

View File

@@ -174,30 +174,6 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Is_non_preempt_mode_supported(
}
#endif
#if defined(RTEMS_SMP)
void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
/**
* @brief Registers an ask for help request if necessary.
*
* The actual ask for help operation is carried out during
* _Thread_Do_dispatch() on a processor related to the thread. This yields a
* better separation of scheduler instances. A thread of one scheduler
* instance should not be forced to carry out too much work for threads on
* other scheduler instances.
*
* @param the_thread The thread in need for help.
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
{
_Assert( _Thread_State_is_owner( the_thread ) );
if ( the_thread->Scheduler.helping_nodes > 0 ) {
_Scheduler_Request_ask_for_help( the_thread );
}
}
#endif
/**
* The preferred method to add a new scheduler is to define the jump table
* entries and add a case to the _Scheduler_Initialize routine.

View File

@@ -562,6 +562,73 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
return cpu->Scheduler.context == context;
}
/**
* @brief Removes the thread's ask for help request from the processor.
*
* The caller must be the owner of the thread's scheduler lock.
*
* @param[in, out] thread is the thread of the ask for help request.
*
* @param[in, out] cpu is the processor from which the ask for help request
* should be removed.
*/
void _Scheduler_SMP_Remove_ask_for_help_from_processor(
Thread_Control *thread,
Per_CPU_Control *cpu
);
/**
* @brief Cancels the thread's ask for help request.
*
* The caller must be the owner of the thread's scheduler lock.
*
* @param[in, out] thread is the thread of the ask help request.
*/
static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
{
Per_CPU_Control *cpu;
_Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
cpu = thread->Scheduler.ask_for_help_cpu;
if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
_Scheduler_SMP_Remove_ask_for_help_from_processor( thread, cpu );
}
}
/**
* @brief Requests to ask for help for the thread.
*
* The actual ask for help operations are carried out during
* _Thread_Do_dispatch() on the current processor.
*
* An alternative approach would be to carry out the requests on a processor
* related to the thread. This could reduce the overhead for the preempting
* thread a bit, however, there are at least two problems with this approach.
* Firstly, we have to figure out what is a processor related to the thread.
* Secondly, we may need an inter-processor interrupt.
*
* @param[in, out] thread is the thread in need for help.
*/
static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
{
ISR_lock_Context lock_context;
Per_CPU_Control *cpu_self;
cpu_self = _Per_CPU_Get();
_Assert( thread->Scheduler.ask_for_help_cpu == NULL );
thread->Scheduler.ask_for_help_cpu = cpu_self;
cpu_self->dispatch_necessary = true;
_Per_CPU_Acquire( cpu_self, &lock_context );
_Chain_Append_unprotected(
&cpu_self->Threads_in_need_for_help,
&thread->Scheduler.Help_node
);
_Per_CPU_Release( cpu_self, &lock_context );
}
/**
* @brief This enumeration defines what a scheduler should do with a node which
* could be scheduled.
@@ -616,7 +683,7 @@ static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
owner_sticky_level = node->sticky_level;
if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
_Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
_Scheduler_SMP_Cancel_ask_for_help( owner );
_Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
_Thread_Scheduler_release_critical( owner, &lock_context );
return SCHEDULER_SMP_DO_SCHEDULE;
@@ -769,22 +836,15 @@ static inline void _Scheduler_SMP_Preempt(
_Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
cpu = _Thread_Get_CPU( victim_owner );
if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
_Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
if ( victim_owner->Scheduler.helping_nodes > 0 ) {
ISR_lock_Context lock_context_2;
_Per_CPU_Acquire( cpu, &lock_context_2 );
_Chain_Append_unprotected(
&cpu->Threads_in_need_for_help,
&victim_owner->Scheduler.Help_node
);
_Per_CPU_Release( cpu, &lock_context_2 );
_Scheduler_SMP_Request_ask_for_help( victim_owner );
}
}
cpu = _Thread_Get_CPU( victim_owner );
} else {
cpu = _Thread_Get_CPU( victim_idle );
}
@@ -1030,10 +1090,7 @@ static inline void _Scheduler_SMP_Enqueue_scheduled(
if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
Per_CPU_Control *cpu;
_Thread_Scheduler_cancel_need_for_help(
owner,
_Thread_Get_CPU( owner )
);
_Scheduler_SMP_Cancel_ask_for_help( owner );
_Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
cpu = _Thread_Get_CPU( node_idle );
_Thread_Set_CPU( owner, cpu );
@@ -1252,8 +1309,8 @@ static inline void _Scheduler_SMP_Block(
_Assert( sticky_level >= 0 );
_Thread_Scheduler_acquire_critical( thread, &lock_context );
_Scheduler_SMP_Cancel_ask_for_help( thread );
cpu = _Thread_Get_CPU( thread );
_Thread_Scheduler_cancel_need_for_help( thread, cpu );
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1316,7 +1373,8 @@ static inline void _Scheduler_SMP_Unblock(
{
Scheduler_SMP_Node_state node_state;
Priority_Control priority;
bool needs_help;
_Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
++node->sticky_level;
_Assert( node->sticky_level > 0 );
@@ -1346,18 +1404,19 @@ static inline void _Scheduler_SMP_Unblock(
if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
Priority_Control insert_priority;
bool needs_help;
insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
needs_help = ( *enqueue )( context, node, insert_priority );
if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
_Scheduler_SMP_Request_ask_for_help( thread );
}
} else {
_Assert( node_state == SCHEDULER_SMP_NODE_READY );
_Assert( node->sticky_level > 0 );
_Assert( node->idle == NULL );
needs_help = true;
}
if ( needs_help ) {
_Scheduler_Ask_for_help( thread );
_Scheduler_SMP_Request_ask_for_help( thread );
}
}
@@ -1562,10 +1621,7 @@ static inline bool _Scheduler_SMP_Ask_for_help(
) {
Thread_Control *lowest_scheduled_idle;
_Thread_Scheduler_cancel_need_for_help(
thread,
_Thread_Get_CPU( thread )
);
_Scheduler_SMP_Cancel_ask_for_help( thread );
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1595,10 +1651,7 @@ static inline bool _Scheduler_SMP_Ask_for_help(
success = false;
}
} else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
_Thread_Scheduler_cancel_need_for_help(
thread,
_Thread_Get_CPU( thread )
);
_Scheduler_SMP_Cancel_ask_for_help( thread );
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
_Thread_Scheduler_release_critical( thread, &lock_context );
_Scheduler_Discard_idle_thread(

View File

@@ -306,10 +306,24 @@ typedef struct {
Chain_Control Scheduler_nodes;
/**
* @brief Node for the Per_CPU_Control::Threads_in_need_for_help chain.
* @brief If an ask for help request for the thread is pending, then this
* member references the processor on which the ask for help request is
* registered, otherwise it is NULL.
*
* This chain is protected by the Per_CPU_Control::Lock lock of the assigned
* processor.
* Depending on the state of the thread and usage context, this member is
* protected by the Per_CPU_Control::Lock lock of the referenced processor,
* the scheduler lock of the thread (Thread_Scheduler_control::Lock), or the
* thread state lock.
*/
struct Per_CPU_Control *ask_for_help_cpu;
/**
* @brief This member is the node for the
* Per_CPU_Control::Threads_in_need_for_help chain.
*
* This chain is protected by the Per_CPU_Control::Lock lock of the processor
* on which the ask for help request is registered
* (Thread_Scheduler_control::ask_for_help_cpu).
*/
Chain_Node Help_node;

View File

@@ -1494,32 +1494,6 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
}
#endif
#if defined(RTEMS_SMP)
/**
* @brief Cancels the thread's need for help.
*
* @param the_thread The thread to cancel the help request of.
* @param cpu The cpu to get the lock context of in order to
* cancel the help request.
*/
RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
Thread_Control *the_thread,
Per_CPU_Control *cpu
)
{
ISR_lock_Context lock_context;
_Per_CPU_Acquire( cpu, &lock_context );
if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
_Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
_Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
}
_Per_CPU_Release( cpu, &lock_context );
}
#endif
/**
* @brief Gets the home scheduler of the thread.
*

View File

@@ -21,28 +21,21 @@
#include <rtems/score/schedulersmpimpl.h>
void _Scheduler_Request_ask_for_help( Thread_Control *the_thread )
void _Scheduler_SMP_Remove_ask_for_help_from_processor(
Thread_Control *thread,
Per_CPU_Control *cpu
)
{
ISR_lock_Context scheduler_lock_context;
ISR_lock_Context lock_context;
_Thread_Scheduler_acquire_critical( the_thread, &scheduler_lock_context );
_Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
if ( _Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
Per_CPU_Control *cpu;
ISR_lock_Context per_cpu_lock_context;
_Per_CPU_Acquire( cpu, &lock_context );
cpu = _Thread_Get_CPU( the_thread );
_Per_CPU_Acquire( cpu, &per_cpu_lock_context );
_Chain_Append_unprotected(
&cpu->Threads_in_need_for_help,
&the_thread->Scheduler.Help_node
);
_Per_CPU_Release( cpu, &per_cpu_lock_context );
_Thread_Dispatch_request( _Per_CPU_Get(), cpu );
if ( thread->Scheduler.ask_for_help_cpu == cpu ) {
_Chain_Extract_unprotected( &thread->Scheduler.Help_node );
thread->Scheduler.ask_for_help_cpu = NULL;
}
_Thread_Scheduler_release_critical( the_thread, &scheduler_lock_context );
_Per_CPU_Release( cpu, &lock_context );
}

View File

@@ -172,8 +172,8 @@ static ISR_Level _Thread_Preemption_intervention(
Thread_Control *the_thread;
node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
_Chain_Set_off_chain( node );
the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
the_thread->Scheduler.ask_for_help_cpu = NULL;
_Per_CPU_Release( cpu_self, &lock_context );