forked from Imagelibrary/rtems
score: Critical fix for SMP
The _Scheduler_SMP_Allocate_processor() and _Thread_Dispatch() exchange information without locks. Make sure we use the right load/store ordering.
This commit is contained in:
@@ -54,23 +54,13 @@ typedef atomic_uintptr_t Atomic_Pointer;
|
|||||||
typedef atomic_flag Atomic_Flag;
|
typedef atomic_flag Atomic_Flag;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief the enumeration Atomic_Memory_barrier specifies the detailed regular
|
* @brief Memory order according to ISO/IEC 9899:2011.
|
||||||
* memory synchronization operations used in the atomic operation API
|
|
||||||
* definitions.
|
|
||||||
*/
|
*/
|
||||||
typedef enum {
|
typedef enum {
|
||||||
/** no operation orders memory. */
|
|
||||||
ATOMIC_ORDER_RELAXED = memory_order_relaxed,
|
ATOMIC_ORDER_RELAXED = memory_order_relaxed,
|
||||||
/** a load operation performs an acquire operation on the affected memory
|
|
||||||
* location. This flag guarantees that the effects of load operation are
|
|
||||||
* completed before the effects of any later data accesses.
|
|
||||||
*/
|
|
||||||
ATOMIC_ORDER_ACQUIRE = memory_order_acquire,
|
ATOMIC_ORDER_ACQUIRE = memory_order_acquire,
|
||||||
/** a store operation performs a release operation on the affected memory
|
ATOMIC_ORDER_RELEASE = memory_order_release,
|
||||||
* location. This flag guarantee that all effects of all previous data
|
ATOMIC_ORDER_SEQ_CST = memory_order_seq_cst
|
||||||
* accesses are completed before the store operation takes place.
|
|
||||||
*/
|
|
||||||
ATOMIC_ORDER_RELEASE = memory_order_release
|
|
||||||
} Atomic_Order;
|
} Atomic_Order;
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -92,12 +92,14 @@ static inline void _Scheduler_SMP_Allocate_processor(
|
|||||||
|
|
||||||
_Thread_Set_CPU( heir, cpu_of_victim );
|
_Thread_Set_CPU( heir, cpu_of_victim );
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: Here we need atomic store operations with a relaxed memory order.
|
|
||||||
* The _CPU_SMP_Send_interrupt() will ensure that the change can be
|
|
||||||
* observed consistently.
|
|
||||||
*/
|
|
||||||
cpu_of_victim->heir = heir;
|
cpu_of_victim->heir = heir;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is critical that we first update the heir and then the dispatch
|
||||||
|
* necessary so that _Thread_Dispatch() cannot miss an update.
|
||||||
|
*/
|
||||||
|
_Atomic_Fence( ATOMIC_ORDER_RELEASE );
|
||||||
|
|
||||||
cpu_of_victim->dispatch_necessary = true;
|
cpu_of_victim->dispatch_necessary = true;
|
||||||
|
|
||||||
if ( cpu_of_victim != cpu_of_executing ) {
|
if ( cpu_of_victim != cpu_of_executing ) {
|
||||||
|
|||||||
@@ -97,9 +97,20 @@ void _Thread_Dispatch( void )
|
|||||||
#else
|
#else
|
||||||
while ( per_cpu->dispatch_necessary ) {
|
while ( per_cpu->dispatch_necessary ) {
|
||||||
#endif
|
#endif
|
||||||
heir = per_cpu->heir;
|
|
||||||
per_cpu->dispatch_necessary = false;
|
per_cpu->dispatch_necessary = false;
|
||||||
|
|
||||||
|
#if defined( RTEMS_SMP )
|
||||||
|
/*
|
||||||
|
* It is critical that we first update the dispatch necessary and then the
|
||||||
|
* read the heir so that we don't miss an update by
|
||||||
|
* _Scheduler_SMP_Allocate_processor().
|
||||||
|
*/
|
||||||
|
_Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
|
||||||
|
#endif
|
||||||
|
|
||||||
|
heir = per_cpu->heir;
|
||||||
per_cpu->executing = heir;
|
per_cpu->executing = heir;
|
||||||
|
|
||||||
#if defined( RTEMS_SMP )
|
#if defined( RTEMS_SMP )
|
||||||
executing->is_executing = false;
|
executing->is_executing = false;
|
||||||
heir->is_executing = true;
|
heir->is_executing = true;
|
||||||
|
|||||||
Reference in New Issue
Block a user