score: Use an ISR lock for Per_CPU_Control::Lock

The use of a hand crafted lock for Per_CPU_Control::Lock was necessary
at some point in the SMP support development, but it is no longer
justified.
This commit is contained in:
Sebastian Huber
2019-04-11 13:47:50 +02:00
parent 18d45d9d25
commit e97b7c9a7a
11 changed files with 138 additions and 135 deletions

View File

@@ -28,7 +28,6 @@
#include <rtems/score/chain.h>
#include <rtems/score/isrlock.h>
#include <rtems/score/smp.h>
#include <rtems/score/smplock.h>
#include <rtems/score/timestamp.h>
#include <rtems/score/watchdog.h>
#endif
@@ -333,7 +332,7 @@ typedef struct Per_CPU_Control {
*
* It is volatile since interrupts may alter this flag.
*
* This field is not protected by a lock and must be accessed only by this
* This member is not protected by a lock and must be accessed only by this
* processor. Code (e.g. scheduler and post-switch action requests) running
* on another processors must use an inter-processor interrupt to set the
* thread dispatch necessary indicator to true.
@@ -352,7 +351,8 @@ typedef struct Per_CPU_Control {
/**
* @brief This is the thread executing on this processor.
*
* This field is not protected by a lock. The only writer is this processor.
* This member is not protected by a lock. The only writer is this
* processor.
*
* On SMP configurations a thread may be registered as executing on more than
* one processor in case a thread migration is in progress. On SMP
@@ -364,10 +364,10 @@ typedef struct Per_CPU_Control {
/**
* @brief This is the heir thread for this processor.
*
* This field is not protected by a lock. The only writer after multitasking
* start is the scheduler owning this processor. It is assumed that stores
* to pointers are atomic on all supported SMP architectures. The CPU port
* specific code (inter-processor interrupt handling and
* This member is not protected by a lock. The only writer after
* multitasking start is the scheduler owning this processor. It is assumed
* that stores to pointers are atomic on all supported SMP architectures.
* The CPU port specific code (inter-processor interrupt handling and
* _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
* last value written.
*
@@ -418,39 +418,31 @@ typedef struct Per_CPU_Control {
#if defined( RTEMS_SMP )
/**
* @brief This lock protects some parts of the low-level thread dispatching.
*
* We must use a ticket lock here since we cannot transport a local context
* through the context switch.
*
* @see _Thread_Dispatch().
* @brief This lock protects some members of this structure.
*/
SMP_ticket_lock_Control Lock;
ISR_lock_Control Lock;
#if defined( RTEMS_PROFILING )
/**
* @brief Lock statistics for the per-CPU lock.
*/
SMP_lock_Stats Lock_stats;
/**
* @brief Lock statistics context for the per-CPU lock.
*/
SMP_lock_Stats_context Lock_stats_context;
#endif
/**
* @brief Lock context used to acquire all per-CPU locks.
*
* This member is protected by the Per_CPU_Control::Lock lock.
*
* @see _Per_CPU_Acquire_all().
*/
ISR_lock_Context Lock_context;
/**
* @brief Chain of threads in need for help.
*
* This field is protected by the Per_CPU_Control::Lock lock.
* This member is protected by the Per_CPU_Control::Lock lock.
*/
Chain_Control Threads_in_need_for_help;
/**
* @brief Bit field for SMP messages.
*
* This bit field is not protected locks. Atomic operations are used to
* set and get the message bits.
* This member is not protected locks. Atomic operations are used to set
* and get the message bits.
*/
Atomic_Ulong message;
@@ -488,7 +480,7 @@ typedef struct Per_CPU_Control {
/**
* @brief Indicates the current state of the CPU.
*
* This field is protected by the _Per_CPU_State_lock lock.
* This member is protected by the _Per_CPU_State_lock lock.
*
* @see _Per_CPU_State_change().
*/
@@ -539,62 +531,11 @@ typedef struct {
*/
extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
#if defined( RTEMS_SMP )
#define _Per_CPU_Acquire( cpu ) \
_SMP_ticket_lock_Acquire( \
&( cpu )->Lock, \
&( cpu )->Lock_stats, \
&( cpu )->Lock_stats_context \
)
#else
#define _Per_CPU_Acquire( cpu ) \
do { \
(void) ( cpu ); \
} while ( 0 )
#endif
#define _Per_CPU_Acquire( cpu, lock_context ) \
_ISR_lock_Acquire( &( cpu )->Lock, lock_context )
#if defined( RTEMS_SMP )
#define _Per_CPU_Release( cpu ) \
_SMP_ticket_lock_Release( \
&( cpu )->Lock, \
&( cpu )->Lock_stats_context \
)
#else
#define _Per_CPU_Release( cpu ) \
do { \
(void) ( cpu ); \
} while ( 0 )
#endif
#if defined( RTEMS_SMP )
#define _Per_CPU_Acquire_all( isr_cookie ) \
do { \
uint32_t ncpus = _SMP_Get_processor_maximum(); \
uint32_t cpu; \
_ISR_Local_disable( isr_cookie ); \
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
_Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
} \
} while ( 0 )
#else
#define _Per_CPU_Acquire_all( isr_cookie ) \
_ISR_Local_disable( isr_cookie )
#endif
#if defined( RTEMS_SMP )
#define _Per_CPU_Release_all( isr_cookie ) \
do { \
uint32_t ncpus = _SMP_Get_processor_maximum(); \
uint32_t cpu; \
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
_Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
} \
_ISR_Local_enable( isr_cookie ); \
} while ( 0 )
#else
#define _Per_CPU_Release_all( isr_cookie ) \
_ISR_Local_enable( isr_cookie )
#endif
#define _Per_CPU_Release( cpu, lock_context ) \
_ISR_lock_Release( &( cpu )->Lock, lock_context )
/*
* If we get the current processor index in a context which allows thread
@@ -671,6 +612,60 @@ static inline bool _Per_CPU_Is_boot_processor(
#endif
}
RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
ISR_lock_Context *lock_context
)
{
#if defined(RTEMS_SMP)
uint32_t cpu_max;
uint32_t cpu_index;
Per_CPU_Control *previous_cpu;
cpu_max = _SMP_Get_processor_maximum();
previous_cpu = _Per_CPU_Get_by_index( 0 );
_ISR_lock_ISR_disable( lock_context );
_Per_CPU_Acquire( previous_cpu, lock_context );
for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
Per_CPU_Control *cpu;
cpu = _Per_CPU_Get_by_index( cpu_index );
_Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
previous_cpu = cpu;
}
#else
_ISR_lock_ISR_disable( lock_context );
#endif
}
RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
ISR_lock_Context *lock_context
)
{
#if defined(RTEMS_SMP)
uint32_t cpu_max;
uint32_t cpu_index;
Per_CPU_Control *cpu;
cpu_max = _SMP_Get_processor_maximum();
cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
Per_CPU_Control *previous_cpu;
previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
_Per_CPU_Release( cpu, &previous_cpu->Lock_context );
cpu = previous_cpu;
}
_Per_CPU_Release( cpu, lock_context );
_ISR_lock_ISR_enable( lock_context );
#else
_ISR_lock_ISR_enable( lock_context );
#endif
}
#if defined( RTEMS_SMP )
/**

View File

@@ -566,13 +566,13 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
)
{
Thread_Control *victim_thread;
ISR_lock_Context lock_context;
ISR_lock_Context scheduler_lock_context;
Per_CPU_Control *victim_cpu;
victim_thread = _Scheduler_Node_get_user( victim );
_Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
_Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
_Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
victim_cpu = _Thread_Get_CPU( victim_thread );
@@ -580,16 +580,18 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
_Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
if ( victim_thread->Scheduler.helping_nodes > 0 ) {
_Per_CPU_Acquire( victim_cpu );
ISR_lock_Context per_cpu_lock_context;
_Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
_Chain_Append_unprotected(
&victim_cpu->Threads_in_need_for_help,
&victim_thread->Scheduler.Help_node
);
_Per_CPU_Release( victim_cpu );
_Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
}
}
_Thread_Scheduler_release_critical( victim_thread, &lock_context );
_Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
_Scheduler_SMP_Allocate_processor(
context,

View File

@@ -990,14 +990,16 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
Per_CPU_Control *cpu
)
{
_Per_CPU_Acquire( cpu );
ISR_lock_Context lock_context;
_Per_CPU_Acquire( cpu, &lock_context );
if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
_Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
_Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
}
_Per_CPU_Release( cpu );
_Per_CPU_Release( cpu, &lock_context );
}
#endif

View File

@@ -261,34 +261,29 @@ static inline void _User_extensions_Thread_switch(
node = _Chain_Immutable_first( chain );
if ( node != tail ) {
Per_CPU_Control *cpu_self;
#if defined(RTEMS_SMP)
ISR_Level level;
#endif
ISR_lock_Context lock_context;
Per_CPU_Control *cpu_self;
cpu_self = _Per_CPU_Get();
#if defined(RTEMS_SMP)
_ISR_Local_disable( level );
#endif
_Per_CPU_Acquire( cpu_self );
_ISR_lock_ISR_disable( &lock_context );
_Per_CPU_Acquire( cpu_self, &lock_context );
#if defined(RTEMS_SMP)
node = _Chain_Immutable_first( chain );
#endif
while ( node != tail ) {
const User_extensions_Switch_control *extension =
(const User_extensions_Switch_control *) node;
(*extension->thread_switch)( executing, heir );
const User_extensions_Switch_control *extension;
extension = (const User_extensions_Switch_control *) node;
node = _Chain_Immutable_next( node );
(*extension->thread_switch)( executing, heir );
}
_Per_CPU_Release( cpu_self );
#if defined(RTEMS_SMP)
_ISR_Local_enable( level );
_Per_CPU_Release( cpu_self, &lock_context );
_ISR_lock_ISR_enable( &lock_context );
#endif
}
}

View File

@@ -58,20 +58,17 @@ int pthread_spin_lock( pthread_spinlock_t *spinlock )
POSIX_Spinlock_Control *the_spinlock;
ISR_Level level;
#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
Per_CPU_Control *cpu_self;
SMP_lock_Stats unused_stats;
SMP_lock_Stats_context unused_context;
#endif
the_spinlock = _POSIX_Spinlock_Get( spinlock );
_ISR_Local_disable( level );
#if defined(RTEMS_SMP)
#if defined(RTEMS_PROFILING)
/* The lock statistics are incorrect in case of nested pthread spinlocks */
cpu_self = _Per_CPU_Get();
#endif
_SMP_ticket_lock_Acquire(
&the_spinlock->Lock,
&cpu_self->Lock_stats,
&cpu_self->Lock_stats_context
&unused_stats,
&unused_context
);
#endif
the_spinlock->interrupt_state = level;

View File

@@ -28,13 +28,24 @@ int pthread_spin_unlock( pthread_spinlock_t *lock )
{
POSIX_Spinlock_Control *the_spinlock;
ISR_Level level;
#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
SMP_lock_Stats unused_stats;
SMP_lock_Stats_context unused_context;
#endif
the_spinlock = _POSIX_Spinlock_Get( lock );
level = the_spinlock->interrupt_state;
#if defined(RTEMS_SMP)
#if defined(RTEMS_PROFILING)
/* This is a hack to get around the lock profiling statistics */
unused_stats.total_section_time = 0;
unused_stats.max_section_time = 0;
unused_context.stats = &unused_stats;
unused_context.acquire_instant = 0;
#endif
_SMP_ticket_lock_Release(
&the_spinlock->Lock,
&_Per_CPU_Get()->Lock_stats_context
&unused_context
);
#endif
_ISR_Local_enable( level );

View File

@@ -14,25 +14,26 @@
void _Scheduler_Request_ask_for_help( Thread_Control *the_thread )
{
ISR_lock_Context lock_context;
ISR_lock_Context scheduler_lock_context;
_Thread_Scheduler_acquire_critical( the_thread, &lock_context );
_Thread_Scheduler_acquire_critical( the_thread, &scheduler_lock_context );
if ( _Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
Per_CPU_Control *cpu;
Per_CPU_Control *cpu;
ISR_lock_Context per_cpu_lock_context;
cpu = _Thread_Get_CPU( the_thread );
_Per_CPU_Acquire( cpu );
_Per_CPU_Acquire( cpu, &per_cpu_lock_context );
_Chain_Append_unprotected(
&cpu->Threads_in_need_for_help,
&the_thread->Scheduler.Help_node
);
_Per_CPU_Release( cpu );
_Per_CPU_Release( cpu, &per_cpu_lock_context );
_Thread_Dispatch_request( _Per_CPU_Get(), cpu );
}
_Thread_Scheduler_release_critical( the_thread, &lock_context );
_Thread_Scheduler_release_critical( the_thread, &scheduler_lock_context );
}

View File

@@ -117,8 +117,7 @@ void _SMP_Handler_initialize( void )
Per_CPU_Control *cpu;
cpu = _Per_CPU_Get_by_index( cpu_index );
_SMP_ticket_lock_Initialize( &cpu->Lock );
_SMP_lock_Stats_initialize( &cpu->Lock_stats, "Per-CPU" );
_ISR_lock_Set_name( &cpu->Lock, "Per-CPU" );
_ISR_lock_Set_name( &cpu->Watchdog.Lock, "Per-CPU Watchdog" );
_Chain_Initialize_empty( &cpu->Threads_in_need_for_help );
}

View File

@@ -158,27 +158,30 @@ static ISR_Level _Thread_Preemption_intervention(
)
{
#if defined(RTEMS_SMP)
ISR_lock_Context lock_context;
level = _Thread_Check_pinning( executing, cpu_self, level );
_Per_CPU_Acquire( cpu_self );
_Per_CPU_Acquire( cpu_self, &lock_context );
while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
Chain_Node *node;
Thread_Control *the_thread;
ISR_lock_Context lock_context;
Chain_Node *node;
Thread_Control *the_thread;
node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
_Chain_Set_off_chain( node );
the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
_Per_CPU_Release( cpu_self );
_Per_CPU_Release( cpu_self, &lock_context );
_Thread_State_acquire( the_thread, &lock_context );
_Thread_Ask_for_help( the_thread );
_Thread_State_release( the_thread, &lock_context );
_Per_CPU_Acquire( cpu_self );
_Per_CPU_Acquire( cpu_self, &lock_context );
}
_Per_CPU_Release( cpu_self );
_Per_CPU_Release( cpu_self, &lock_context );
#else
(void) cpu_self;
#endif

View File

@@ -41,17 +41,15 @@ void _User_extensions_Add_set(
*/
if ( the_extension->Callouts.thread_switch != NULL ) {
ISR_Level level;
the_extension->Switch.thread_switch =
the_extension->Callouts.thread_switch;
_Per_CPU_Acquire_all( level );
_Per_CPU_Acquire_all( &lock_context );
_Chain_Initialize_node( &the_extension->Switch.Node );
_Chain_Append_unprotected(
&_User_extensions_Switches_list,
&the_extension->Switch.Node
);
_Per_CPU_Release_all( level );
_Per_CPU_Release_all( &lock_context );
}
}

View File

@@ -41,10 +41,10 @@ void _User_extensions_Remove_set (
*/
if ( the_extension->Callouts.thread_switch != NULL ) {
ISR_Level level;
ISR_lock_Context lock_context;
_Per_CPU_Acquire_all( level );
_Per_CPU_Acquire_all( &lock_context );
_Chain_Extract_unprotected( &the_extension->Switch.Node );
_Per_CPU_Release_all( level );
_Per_CPU_Release_all( &lock_context );
}
}