score: Add thread pin/unpin support

Add support to temporarily pin a thread to its current processor.  This
may be used to access per-processor data structures in critical sections
with enabled thread dispatching, e.g. a pinned thread is allowed to
block.

Update #3508.
This commit is contained in:
Sebastian Huber
2018-08-29 09:43:44 +02:00
parent d8bc0730f7
commit 709796209c
23 changed files with 1145 additions and 53 deletions

View File

@@ -136,6 +136,36 @@ typedef struct {
Thread_Scheduler_state next_state
);
/**
* @brief Pin thread operation.
*
* @param[in] scheduler The scheduler instance of the specified processor.
* @param[in] the_thread The thread to pin.
* @param[in] node The scheduler node of the thread.
* @param[in] cpu The processor to pin the thread.
*/
void ( *pin )(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
struct Per_CPU_Control *cpu
);
/**
* @brief Unpin thread operation.
*
* @param[in] scheduler The scheduler instance of the specified processor.
* @param[in] the_thread The thread to unpin.
* @param[in] node The scheduler node of the thread.
* @param[in] cpu The processor to unpin the thread.
*/
void ( *unpin )(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
struct Per_CPU_Control *cpu
);
/**
* @brief Add processor operation.
*
@@ -405,10 +435,28 @@ Priority_Control _Scheduler_default_Unmap_priority(
Thread_Scheduler_state next_state
);
/**
* @brief Does nothing in a single processor system, otherwise a fatal error
* is issued.
*
* @param[in] scheduler Unused.
* @param[in] the_thread Unused.
* @param[in] node Unused.
* @param[in] cpu Unused.
*/
void _Scheduler_default_Pin_or_unpin(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
struct Per_CPU_Control *cpu
);
#define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
_Scheduler_default_Ask_for_help, \
_Scheduler_default_Reconsider_help_request, \
_Scheduler_default_Withdraw_node, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_default_Pin_or_unpin, \
NULL, \
NULL,
#else

View File

@@ -7,7 +7,7 @@
*/
/*
* Copyright (c) 2017 embedded brains GmbH.
* Copyright (c) 2017, 2018 embedded brains GmbH.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -43,13 +43,24 @@ typedef struct {
int64_t generation;
/**
* @brief The ready queue index depending on the processor affinity of the thread.
* @brief The ready queue index depending on the processor affinity and
* pinning of the thread.
*
* The ready queue index zero is used for threads with a one-to-all thread
* processor affinity. Threads with a one-to-one processor affinity use the
* processor index plus one as the ready queue index.
*/
uint32_t ready_queue_index;
uint8_t ready_queue_index;
/**
* @brief Ready queue index according to thread affinity.
*/
uint8_t affinity_ready_queue_index;
/**
* @brief Ready queue index according to thread pinning.
*/
uint8_t pinning_ready_queue_index;
} Scheduler_EDF_SMP_Node;
typedef struct {
@@ -105,6 +116,8 @@ typedef struct {
_Scheduler_EDF_SMP_Ask_for_help, \
_Scheduler_EDF_SMP_Reconsider_help_request, \
_Scheduler_EDF_SMP_Withdraw_node, \
_Scheduler_EDF_SMP_Pin, \
_Scheduler_EDF_SMP_Unpin, \
_Scheduler_EDF_SMP_Add_processor, \
_Scheduler_EDF_SMP_Remove_processor, \
_Scheduler_EDF_SMP_Node_initialize, \
@@ -162,6 +175,20 @@ void _Scheduler_EDF_SMP_Withdraw_node(
Thread_Scheduler_state next_state
);
void _Scheduler_EDF_SMP_Pin(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
struct Per_CPU_Control *cpu
);
void _Scheduler_EDF_SMP_Unpin(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
struct Per_CPU_Control *cpu
);
void _Scheduler_EDF_SMP_Add_processor(
const Scheduler_Control *scheduler,
Thread_Control *idle

View File

@@ -1095,7 +1095,13 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
&the_thread->Real_priority
);
if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
if (
!_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
#if defined(RTEMS_SMP)
|| !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
|| the_thread->Scheduler.pin_level != 0
#endif
) {
_Priority_Plain_insert(
&old_scheduler_node->Wait.Priority,
&the_thread->Real_priority,
@@ -1105,15 +1111,6 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
}
#if defined(RTEMS_SMP)
if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
_Priority_Plain_insert(
&old_scheduler_node->Wait.Priority,
&the_thread->Real_priority,
the_thread->Real_priority.priority
);
return STATUS_RESOURCE_IN_USE;
}
old_scheduler = _Thread_Scheduler_get_home( the_thread );
new_scheduler_node = _Thread_Scheduler_get_node_by_index(
the_thread,
@@ -1140,7 +1137,8 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
return STATUS_UNSATISFIED;
}
the_thread->Scheduler.home = new_scheduler;
_Assert( the_thread->Scheduler.pinned_scheduler == NULL );
the_thread->Scheduler.home_scheduler = new_scheduler;
_Scheduler_Release_critical( new_scheduler, &lock_context );

View File

@@ -61,6 +61,8 @@ extern "C" {
_Scheduler_priority_affinity_SMP_Ask_for_help, \
_Scheduler_priority_affinity_SMP_Reconsider_help_request, \
_Scheduler_priority_affinity_SMP_Withdraw_node, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_priority_affinity_SMP_Add_processor, \
_Scheduler_priority_affinity_SMP_Remove_processor, \
_Scheduler_priority_affinity_SMP_Node_initialize, \

View File

@@ -7,7 +7,7 @@
*/
/*
* Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
* Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -89,6 +89,8 @@ typedef struct {
_Scheduler_priority_SMP_Ask_for_help, \
_Scheduler_priority_SMP_Reconsider_help_request, \
_Scheduler_priority_SMP_Withdraw_node, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_priority_SMP_Add_processor, \
_Scheduler_priority_SMP_Remove_processor, \
_Scheduler_priority_SMP_Node_initialize, \

View File

@@ -9,7 +9,7 @@
/*
* Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
*
* Copyright (c) 2013, 2016 embedded brains GmbH.
* Copyright (c) 2013, 2018 embedded brains GmbH.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -72,6 +72,8 @@ typedef struct {
_Scheduler_simple_SMP_Ask_for_help, \
_Scheduler_simple_SMP_Reconsider_help_request, \
_Scheduler_simple_SMP_Withdraw_node, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_simple_SMP_Add_processor, \
_Scheduler_simple_SMP_Remove_processor, \
_Scheduler_simple_SMP_Node_initialize, \

View File

@@ -1185,6 +1185,14 @@ static inline bool _Scheduler_SMP_Ask_for_help(
ISR_lock_Context lock_context;
bool success;
if ( thread->Scheduler.pinned_scheduler != NULL ) {
/*
* Pinned threads are not allowed to ask for help. Return success to break
* the loop in _Thread_Ask_for_help() early.
*/
return true;
}
lowest_scheduled = ( *get_lowest_scheduled )( context, node );
_Thread_Scheduler_acquire_critical( thread, &lock_context );
@@ -1474,6 +1482,7 @@ static inline void _Scheduler_SMP_Set_affinity(
( *set_affinity )( context, node, arg );
( *enqueue )( context, node, insert_priority );
} else {
_Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
( *set_affinity )( context, node, arg );
}
}

View File

@@ -7,7 +7,7 @@
*/
/*
* Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
* Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -89,6 +89,8 @@ typedef struct {
_Scheduler_strong_APA_Ask_for_help, \
_Scheduler_strong_APA_Reconsider_help_request, \
_Scheduler_strong_APA_Withdraw_node, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_default_Pin_or_unpin, \
_Scheduler_strong_APA_Add_processor, \
_Scheduler_strong_APA_Remove_processor, \
_Scheduler_strong_APA_Node_initialize, \

View File

@@ -79,7 +79,8 @@ typedef enum {
SMP_FATAL_MULTITASKING_START_ON_UNASSIGNED_PROCESSOR,
SMP_FATAL_SHUTDOWN,
SMP_FATAL_SHUTDOWN_RESPONSE,
SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED
SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED,
SMP_FATAL_SCHEDULER_PIN_OR_UNPIN_NOT_SUPPORTED
} SMP_Fatal_code;
static inline void _SMP_Fatal( SMP_Fatal_code code )

View File

@@ -259,9 +259,14 @@ typedef struct {
Thread_Scheduler_state state;
/**
* @brief The home scheduler control of this thread.
* @brief The home scheduler of this thread.
*/
const struct _Scheduler_Control *home;
const struct _Scheduler_Control *home_scheduler;
/**
* @brief The pinned scheduler of this thread.
*/
const struct _Scheduler_Control *pinned_scheduler;
/**
* @brief The processor assigned by the current scheduler.
@@ -270,12 +275,12 @@ typedef struct {
/**
* @brief Scheduler nodes immediately available to the thread by its home
* scheduler instance and due to thread queue ownerships.
* scheduler and due to thread queue ownerships.
*
* This chain is protected by the thread wait lock.
*
* This chain is never empty. The first scheduler node on the chain is the
* scheduler node of the home scheduler instance.
* scheduler node of the home scheduler.
*/
Chain_Control Wait_nodes;
@@ -285,8 +290,12 @@ typedef struct {
*
* This chain is protected by the thread state lock.
*
* This chain is never empty. The first scheduler node on the chain is the
* scheduler node of the home scheduler instance.
* This chain is never empty for normal threads (the only exception are idle
* threads associated with an online processor which is not used by a
* scheduler). In case a pinned scheduler is set for this thread, then the
* first scheduler node of this chain belongs to the pinned scheduler,
* otherwise the first scheduler node of this chain belongs to the home
* scheduler.
*/
Chain_Control Scheduler_nodes;
@@ -312,6 +321,28 @@ typedef struct {
*/
Scheduler_Node *requests;
/**
* @brief The thread pinning to current processor level.
*
* Must be touched only by the executing thread with thread dispatching
* disabled. If non-zero, then the thread is pinned to its current
* processor. The pin level is incremented and decremented by two. The
* least-significant bit indicates that the thread was pre-empted and must
* undo the pinning with respect to the scheduler once the level changes from
* three to one.
*
* The thread pinning may be used to access per-processor data structures in
* critical sections with enabled thread dispatching, e.g. a pinned thread is
* allowed to block.
*
* Thread pinning should be used only for short critical sections and not all
* the time. Thread pinning is a very low overhead operation in case the
* thread is not preempted during the pinning.
*
* @see _Thread_Pin() and _Thread_Unpin().
*/
int pin_level;
/**
* @brief The thread processor affinity set.
*/

View File

@@ -1025,7 +1025,7 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
)
{
#if defined(RTEMS_SMP)
return the_thread->Scheduler.home;
return the_thread->Scheduler.home_scheduler;
#else
(void) the_thread;
return &_Scheduler_Table[ 0 ];
@@ -1953,6 +1953,56 @@ size_t _Thread_Get_name(
size_t buffer_size
);
#if defined(RTEMS_SMP)
#define THREAD_PIN_STEP 2
#define THREAD_PIN_PREEMPTION 1
void _Thread_Do_unpin(
Thread_Control *executing,
Per_CPU_Control *cpu_self
);
#endif
RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
{
#if defined(RTEMS_SMP)
_Assert( executing == _Thread_Executing );
executing->Scheduler.pin_level += THREAD_PIN_STEP;
#else
(void) executing;
#endif
}
RTEMS_INLINE_ROUTINE void _Thread_Unpin(
Thread_Control *executing,
Per_CPU_Control *cpu_self
)
{
#if defined(RTEMS_SMP)
unsigned int pin_level;
_Assert( executing == _Thread_Executing );
pin_level = executing->Scheduler.pin_level;
_Assert( pin_level > 0 );
if (
RTEMS_PREDICT_TRUE(
pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
)
) {
executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
} else {
_Thread_Do_unpin( executing, cpu_self );
}
#else
(void) executing;
(void) cpu_self;
#endif
}
/** @}*/
#ifdef __cplusplus

View File

@@ -73,7 +73,7 @@ rtems_status_code rtems_scheduler_add_processor(
_Assert( idle != NULL );
cpu->Scheduler.idle_if_online_and_unused = NULL;
idle->Scheduler.home = scheduler;
idle->Scheduler.home_scheduler = scheduler;
idle->Start.initial_priority = idle_priority;
scheduler_node =
_Thread_Scheduler_get_node_by_index( idle, scheduler_index );

View File

@@ -26,6 +26,7 @@ endif
if HAS_SMP
libscore_a_SOURCES += src/percpustatewait.c
libscore_a_SOURCES += src/profilingsmplock.c
libscore_a_SOURCES += src/schedulerdefaultpinunpin.c
libscore_a_SOURCES += src/scheduleredfsmp.c
libscore_a_SOURCES += src/schedulerpriorityaffinitysmp.c
libscore_a_SOURCES += src/schedulerprioritysmp.c
@@ -38,6 +39,7 @@ libscore_a_SOURCES += src/schedulerdefaultaskforhelp.c
libscore_a_SOURCES += src/schedulerdefaultsetaffinity.c
libscore_a_SOURCES += src/schedulersmp.c
libscore_a_SOURCES += src/schedulersmpstartidle.c
libscore_a_SOURCES += src/threadunpin.c
endif
## CORE_APIMUTEX_C_FILES

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2018 embedded brains GmbH
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/score/scheduler.h>
#include <rtems/score/interr.h>
#include <rtems/score/smpimpl.h>
void _Scheduler_default_Pin_or_unpin(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
Scheduler_Node *node,
struct Per_CPU_Control *cpu
)
{
(void) scheduler;
(void) the_thread;
(void) node;
(void) cpu;
if ( _SMP_Get_processor_count() > 1 ) {
_Terminate(
RTEMS_FATAL_SOURCE_SMP,
SMP_FATAL_SCHEDULER_PIN_OR_UNPIN_NOT_SUPPORTED
);
}
}

View File

@@ -143,7 +143,7 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *highest_ready;
Scheduler_EDF_SMP_Node *node;
uint32_t rqi;
uint8_t rqi;
const Chain_Node *tail;
Chain_Node *next;
@@ -199,7 +199,7 @@ static inline void _Scheduler_EDF_SMP_Set_scheduled(
static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
const Scheduler_EDF_SMP_Context *self,
uint32_t rqi
uint8_t rqi
)
{
return self->Ready[ rqi ].scheduled;
@@ -211,7 +211,7 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
)
{
Scheduler_EDF_SMP_Node *filter;
uint32_t rqi;
uint8_t rqi;
filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
rqi = filter->ready_queue_index;
@@ -240,7 +240,7 @@ static inline void _Scheduler_EDF_SMP_Insert_ready(
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
uint32_t rqi;
uint8_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
int generation_index;
int increment;
@@ -306,7 +306,7 @@ static inline void _Scheduler_EDF_SMP_Extract_from_ready(
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
uint32_t rqi;
uint8_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
self = _Scheduler_EDF_SMP_Get_self( context );
@@ -369,7 +369,7 @@ static inline void _Scheduler_EDF_SMP_Allocate_processor(
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *scheduled;
uint32_t rqi;
uint8_t rqi;
(void) victim_base;
self = _Scheduler_EDF_SMP_Get_self( context );
@@ -647,7 +647,7 @@ static inline void _Scheduler_EDF_SMP_Do_set_affinity(
)
{
Scheduler_EDF_SMP_Node *node;
const uint32_t *rqi;
const uint8_t *rqi;
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
rqi = arg;
@@ -672,16 +672,61 @@ void _Scheduler_EDF_SMP_Start_idle(
);
}
void _Scheduler_EDF_SMP_Pin(
const Scheduler_Control *scheduler,
Thread_Control *thread,
Scheduler_Node *node_base,
struct Per_CPU_Control *cpu
)
{
Scheduler_EDF_SMP_Node *node;
uint8_t rqi;
(void) scheduler;
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
_Assert(
_Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
);
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
node->ready_queue_index = rqi;
node->pinning_ready_queue_index = rqi;
}
void _Scheduler_EDF_SMP_Unpin(
const Scheduler_Control *scheduler,
Thread_Control *thread,
Scheduler_Node *node_base,
struct Per_CPU_Control *cpu
)
{
Scheduler_EDF_SMP_Node *node;
(void) scheduler;
(void) cpu;
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
_Assert(
_Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
);
node->ready_queue_index = node->affinity_ready_queue_index;
node->pinning_ready_queue_index = 0;
}
bool _Scheduler_EDF_SMP_Set_affinity(
const Scheduler_Control *scheduler,
Thread_Control *thread,
Scheduler_Node *node,
Scheduler_Node *node_base,
const Processor_mask *affinity
)
{
Scheduler_Context *context;
Processor_mask local_affinity;
uint32_t rqi;
Scheduler_Context *context;
Scheduler_EDF_SMP_Node *node;
Processor_mask local_affinity;
uint8_t rqi;
context = _Scheduler_Get_context( scheduler );
_Processor_mask_And( &local_affinity, &context->Processors, affinity );
@@ -696,18 +741,23 @@ bool _Scheduler_EDF_SMP_Set_affinity(
rqi = _Processor_mask_Find_last_set( &local_affinity );
}
_Scheduler_SMP_Set_affinity(
context,
thread,
node,
&rqi,
_Scheduler_EDF_SMP_Do_set_affinity,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Enqueue,
_Scheduler_EDF_SMP_Allocate_processor
);
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
node->affinity_ready_queue_index = rqi;
if ( node->pinning_ready_queue_index == 0 ) {
_Scheduler_SMP_Set_affinity(
context,
thread,
node_base,
&rqi,
_Scheduler_EDF_SMP_Do_set_affinity,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Enqueue,
_Scheduler_EDF_SMP_Allocate_processor
);
}
return true;
}

View File

@@ -9,7 +9,7 @@
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
* Copyright (c) 2014, 2016 embedded brains GmbH.
* Copyright (c) 2014, 2018 embedded brains GmbH.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -37,6 +37,80 @@ Thread_Control *_Thread_Allocated_fp;
CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
#if defined(RTEMS_SMP)
static ISR_Level _Thread_Check_pinning(
Thread_Control *executing,
Per_CPU_Control *cpu_self,
ISR_Level level
)
{
unsigned int pin_level;
pin_level = executing->Scheduler.pin_level;
if (
RTEMS_PREDICT_FALSE( pin_level != 0 )
&& ( pin_level & THREAD_PIN_PREEMPTION ) == 0
) {
ISR_lock_Context state_lock_context;
ISR_lock_Context scheduler_lock_context;
const Scheduler_Control *pinned_scheduler;
Scheduler_Node *pinned_node;
const Scheduler_Control *home_scheduler;
_ISR_Local_enable( level );
executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
_Thread_State_acquire( executing, &state_lock_context );
pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
pinned_node = _Thread_Scheduler_get_node_by_index(
executing,
_Scheduler_Get_index( pinned_scheduler )
);
if ( _Thread_Is_ready( executing ) ) {
_Scheduler_Block( executing);
}
home_scheduler = _Thread_Scheduler_get_home( executing );
executing->Scheduler.pinned_scheduler = pinned_scheduler;
if ( home_scheduler != pinned_scheduler ) {
_Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
_Chain_Prepend_unprotected(
&executing->Scheduler.Scheduler_nodes,
&pinned_node->Thread.Scheduler_node.Chain
);
}
_Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
( *pinned_scheduler->Operations.pin )(
pinned_scheduler,
executing,
pinned_node,
cpu_self
);
if ( _Thread_Is_ready( executing ) ) {
( *pinned_scheduler->Operations.unblock )(
pinned_scheduler,
executing,
pinned_node
);
}
_Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
_Thread_State_release( executing, &state_lock_context );
_ISR_Local_disable( level );
}
return level;
}
static void _Thread_Ask_for_help( Thread_Control *the_thread )
{
Chain_Node *node;
@@ -77,9 +151,15 @@ static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
}
#endif
static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
static ISR_Level _Thread_Preemption_intervention(
Thread_Control *executing,
Per_CPU_Control *cpu_self,
ISR_Level level
)
{
#if defined(RTEMS_SMP)
level = _Thread_Check_pinning( executing, cpu_self, level );
_Per_CPU_Acquire( cpu_self );
while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
@@ -102,6 +182,8 @@ static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
#else
(void) cpu_self;
#endif
return level;
}
static void _Thread_Post_switch_cleanup( Thread_Control *executing )
@@ -192,7 +274,7 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
do {
Thread_Control *heir;
_Thread_Preemption_intervention( cpu_self );
level = _Thread_Preemption_intervention( executing, cpu_self, level );
heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
/*

View File

@@ -245,7 +245,7 @@ bool _Thread_Initialize(
#if defined(RTEMS_SMP)
RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
the_thread->Scheduler.home = scheduler;
the_thread->Scheduler.home_scheduler = scheduler;
_ISR_lock_Initialize( &the_thread->Scheduler.Lock, "Thread Scheduler" );
_Processor_mask_Assign(
&the_thread->Scheduler.Affinity,

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2018 embedded brains GmbH
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/score/schedulerimpl.h>
void _Thread_Do_unpin( Thread_Control *executing, Per_CPU_Control *cpu_self )
{
ISR_lock_Context state_lock_context;
ISR_lock_Context scheduler_lock_context;
Scheduler_Node *pinned_node;
const Scheduler_Control *pinned_scheduler;
Scheduler_Node *home_node;
const Scheduler_Control *home_scheduler;
const Scheduler_Control *scheduler;
_Thread_State_acquire( executing, &state_lock_context );
executing->Scheduler.pin_level = 0;
pinned_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
_Chain_First( &executing->Scheduler.Scheduler_nodes )
);
pinned_scheduler = _Scheduler_Node_get_scheduler( pinned_node );
home_node = _Thread_Scheduler_get_home_node( executing );
home_scheduler = _Thread_Scheduler_get_home( executing );
scheduler = pinned_scheduler;
executing->Scheduler.pinned_scheduler = NULL;
_Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
if ( _Thread_Is_ready( executing ) ) {
( *scheduler->Operations.block )( scheduler, executing, pinned_node );
}
( *scheduler->Operations.unpin )(
scheduler,
executing,
pinned_node,
cpu_self
);
if ( home_node != pinned_node ) {
_Scheduler_Release_critical( scheduler, &scheduler_lock_context );
_Chain_Extract_unprotected( &home_node->Thread.Scheduler_node.Chain );
_Chain_Prepend_unprotected(
&executing->Scheduler.Scheduler_nodes,
&home_node->Thread.Scheduler_node.Chain
);
scheduler = home_scheduler;
_Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
}
if ( _Thread_Is_ready( executing ) ) {
( *scheduler->Operations.unblock )( scheduler, executing, home_node );
}
_Scheduler_Release_critical( scheduler, &scheduler_lock_context );
_Thread_State_release( executing, &state_lock_context );
}

View File

@@ -621,6 +621,17 @@ smpthreadlife01_CPPFLAGS = $(AM_CPPFLAGS) \
endif
endif
if HAS_SMP
if TEST_smpthreadpin01
smp_tests += smpthreadpin01
smp_screens += smpthreadpin01/smpthreadpin01.scn
smp_docs += smpthreadpin01/smpthreadpin01.doc
smpthreadpin01_SOURCES = smpthreadpin01/init.c
smpthreadpin01_CPPFLAGS = $(AM_CPPFLAGS) \
$(TEST_FLAGS_smpthreadpin01) $(support_includes)
endif
endif
if HAS_SMP
if TEST_smpunsupported01
smp_tests += smpunsupported01

View File

@@ -87,6 +87,7 @@ RTEMS_TEST_CHECK([smpsignal01])
RTEMS_TEST_CHECK([smpstrongapa01])
RTEMS_TEST_CHECK([smpswitchextension01])
RTEMS_TEST_CHECK([smpthreadlife01])
RTEMS_TEST_CHECK([smpthreadpin01])
RTEMS_TEST_CHECK([smpunsupported01])
RTEMS_TEST_CHECK([smpwakeafter01])

View File

@@ -0,0 +1,620 @@
/*
* Copyright (c) 2018 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <rtems/thread.h>
#include <rtems/score/threadimpl.h>
#include <tmacros.h>
const char rtems_test_name[] = "SMPTHREADPIN 1";
#define CPU_COUNT 2
#define SCHED_A rtems_build_name(' ', ' ', ' ', 'A')
#define SCHED_B rtems_build_name(' ', ' ', ' ', 'B')
#define EVENT_WAKEUP_MASTER RTEMS_EVENT_0
#define EVENT_MTX_LOCK RTEMS_EVENT_1
#define EVENT_MTX_UNLOCK RTEMS_EVENT_2
#define EVENT_MOVE_BUSY_TO_CPU_0 RTEMS_EVENT_3
#define EVENT_MOVE_BUSY_TO_CPU_1 RTEMS_EVENT_4
#define EVENT_MOVE_SELF_TO_CPU_0 RTEMS_EVENT_5
#define EVENT_MOVE_SELF_TO_CPU_1 RTEMS_EVENT_6
#define EVENT_SET_SELF_PRIO_TO_LOW RTEMS_EVENT_7
#define EVENT_SET_BUSY_PRIO_TO_IDLE RTEMS_EVENT_8
#define EVENT_SET_FLAG RTEMS_EVENT_9
#define PRIO_IDLE 6
#define PRIO_VERY_LOW 5
#define PRIO_LOW 4
#define PRIO_MIDDLE 3
#define PRIO_HIGH 2
#define PRIO_VERY_HIGH 1
typedef struct {
rtems_id master;
rtems_id event;
rtems_id event_2;
rtems_id busy;
rtems_id sched_a;
rtems_id sched_b;
rtems_mutex mtx;
volatile bool flag;
} test_context;
static test_context test_instance;
static rtems_task_priority set_prio(rtems_id id, rtems_task_priority prio)
{
rtems_status_code sc;
rtems_task_priority old_prio;
old_prio = 0xffffffff;
sc = rtems_task_set_priority(id, prio, &old_prio);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
return old_prio;
}
static void set_affinity(rtems_id task, uint32_t cpu_index)
{
rtems_status_code sc;
rtems_id sched_cpu;
rtems_id sched_task;
cpu_set_t set;
sc = rtems_scheduler_ident_by_processor(cpu_index, &sched_cpu);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_get_scheduler(task, &sched_task);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
if (sched_task != sched_cpu) {
rtems_task_priority prio;
CPU_FILL(&set);
sc = rtems_task_set_affinity(task, sizeof(set), &set);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
prio = set_prio(task, RTEMS_CURRENT_PRIORITY);
sc = rtems_task_set_scheduler(task, sched_cpu, prio);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
CPU_ZERO(&set);
CPU_SET((int) cpu_index, &set);
sc = rtems_task_set_affinity(task, sizeof(set), &set);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void send_events(rtems_id task, rtems_event_set events)
{
rtems_status_code sc;
sc = rtems_event_send(task, events);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static rtems_event_set wait_for_events(void)
{
rtems_event_set events;
rtems_status_code sc;
sc = rtems_event_receive(
RTEMS_ALL_EVENTS,
RTEMS_EVENT_ANY | RTEMS_WAIT,
RTEMS_NO_TIMEOUT,
&events
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
return events;
}
static void pin(bool blocked)
{
Per_CPU_Control *cpu_self;
Thread_Control *executing;
cpu_self = _Thread_Dispatch_disable();
executing = _Per_CPU_Get_executing(cpu_self);
if (blocked) {
_Thread_Set_state(executing, STATES_SUSPENDED);
}
_Thread_Pin(executing);
if (blocked) {
_Thread_Clear_state(executing, STATES_SUSPENDED);
}
_Thread_Dispatch_enable(cpu_self);
}
static void unpin(bool blocked)
{
Per_CPU_Control *cpu_self;
Thread_Control *executing;
cpu_self = _Thread_Dispatch_disable();
executing = _Per_CPU_Get_executing(cpu_self);
if (blocked) {
_Thread_Set_state(executing, STATES_SUSPENDED);
}
_Thread_Unpin(executing, cpu_self);
if (blocked) {
_Thread_Clear_state(executing, STATES_SUSPENDED);
}
_Thread_Dispatch_enable(cpu_self);
}
static void event_task(rtems_task_argument arg)
{
test_context *ctx;
ctx = (test_context *) arg;
while (true) {
rtems_event_set events;
events = wait_for_events();
/*
* The order of event processing is important!
*/
if ((events & EVENT_MTX_LOCK) != 0) {
rtems_mutex_lock(&ctx->mtx);
}
if ((events & EVENT_MTX_UNLOCK) != 0) {
rtems_mutex_unlock(&ctx->mtx);
}
if ((events & EVENT_MOVE_BUSY_TO_CPU_0) != 0) {
set_affinity(ctx->busy, 0);
}
if ((events & EVENT_MOVE_BUSY_TO_CPU_1) != 0) {
set_affinity(ctx->busy, 1);
}
if ((events & EVENT_MOVE_SELF_TO_CPU_0) != 0) {
set_affinity(RTEMS_SELF, 0);
}
if ((events & EVENT_MOVE_SELF_TO_CPU_1) != 0) {
set_affinity(RTEMS_SELF, 1);
}
if ((events & EVENT_SET_SELF_PRIO_TO_LOW) != 0) {
set_prio(RTEMS_SELF, PRIO_LOW);
}
if ((events & EVENT_SET_BUSY_PRIO_TO_IDLE) != 0) {
set_prio(ctx->busy, PRIO_IDLE);
}
if ((events & EVENT_SET_FLAG) != 0) {
ctx->flag = true;
}
if ((events & EVENT_WAKEUP_MASTER) != 0) {
send_events(ctx->master, EVENT_WAKEUP_MASTER);
}
}
}
static void busy_task(rtems_task_argument arg)
{
(void) arg;
#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
_CPU_Thread_Idle_body(0);
#else
while (true) {
/* Do nothing */
}
#endif
}
static const char *blocked_or_ready(bool blocked)
{
return blocked ? "blocked" : "ready";
}
static void reconfigure_scheduler(test_context *ctx)
{
rtems_status_code sc;
puts("reconfigure scheduler");
set_prio(ctx->master, PRIO_MIDDLE);
set_prio(ctx->event, PRIO_LOW);
set_prio(ctx->event_2, PRIO_VERY_LOW);
set_prio(ctx->busy, PRIO_IDLE);
set_affinity(ctx->master, 0);
set_affinity(ctx->event, 0);
set_affinity(ctx->event_2, 0);
set_affinity(ctx->busy, 0);
sc = rtems_scheduler_remove_processor(ctx->sched_a, 1);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_scheduler_add_processor(ctx->sched_b, 1);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void test_simple_pin_unpin(test_context *ctx, int run)
{
Per_CPU_Control *cpu_self;
Thread_Control *executing;
printf("test simple wait unpin (run %i)\n", run);
set_affinity(ctx->busy, 0);
set_prio(ctx->busy, PRIO_IDLE);
set_prio(RTEMS_SELF, PRIO_MIDDLE);
rtems_test_assert(rtems_get_current_processor() == 1);
cpu_self = _Thread_Dispatch_disable();
executing = _Per_CPU_Get_executing(cpu_self);
_Thread_Pin(executing);
rtems_test_assert(rtems_get_current_processor() == 1);
_Thread_Unpin(executing, cpu_self);
_Thread_Dispatch_enable(cpu_self);
rtems_test_assert(rtems_get_current_processor() == 1);
}
static void test_pin_wait_unpin(test_context *ctx, bool blocked, int run)
{
printf("test pin wait unpin (%s, run %i)\n", blocked_or_ready(blocked), run);
set_affinity(ctx->busy, 0);
set_prio(ctx->busy, PRIO_IDLE);
set_prio(RTEMS_SELF, PRIO_MIDDLE);
set_prio(ctx->event, PRIO_LOW);
set_affinity(ctx->event, 1);
rtems_test_assert(rtems_get_current_processor() == 1);
pin(blocked);
rtems_test_assert(rtems_get_current_processor() == 1);
send_events(ctx->event, EVENT_WAKEUP_MASTER);
rtems_test_assert(rtems_get_current_processor() == 1);
wait_for_events();
rtems_test_assert(rtems_get_current_processor() == 1);
set_prio(ctx->busy, PRIO_HIGH);
set_affinity(ctx->busy, 0);
unpin(blocked);
rtems_test_assert(rtems_get_current_processor() == 1);
}
static void test_pin_preempt_unpin(test_context *ctx, bool blocked, int run)
{
printf(
"test pin preempt unpin (%s, run %i)\n",
blocked_or_ready(blocked),
run
);
set_prio(RTEMS_SELF, PRIO_MIDDLE);
set_prio(ctx->event, PRIO_VERY_HIGH);
set_prio(ctx->busy, PRIO_HIGH);
set_affinity(ctx->event, 0);
set_affinity(ctx->busy, 0);
rtems_test_assert(rtems_get_current_processor() == 1);
pin(blocked);
rtems_test_assert(rtems_get_current_processor() == 1);
ctx->flag = false;
send_events(
ctx->event,
EVENT_MOVE_BUSY_TO_CPU_1 | EVENT_SET_SELF_PRIO_TO_LOW
| EVENT_SET_BUSY_PRIO_TO_IDLE | EVENT_SET_FLAG
);
while (!ctx->flag) {
rtems_test_assert(rtems_get_current_processor() == 1);
}
set_affinity(ctx->busy, 0);
unpin(blocked);
rtems_test_assert(rtems_get_current_processor() == 1);
}
static void test_pin_home_no_help_unpin(
test_context *ctx,
bool blocked,
int run
)
{
rtems_status_code sc;
printf(
"test pin home no help unpin (%s, run %i)\n",
blocked_or_ready(blocked),
run
);
set_affinity(ctx->busy, 1);
set_prio(ctx->busy, PRIO_IDLE);
set_prio(RTEMS_SELF, PRIO_MIDDLE);
rtems_test_assert(rtems_get_current_processor() == 0);
pin(blocked);
rtems_test_assert(rtems_get_current_processor() == 0);
sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->sched_b, 1);
rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
rtems_mutex_lock(&ctx->mtx);
rtems_test_assert(rtems_get_current_processor() == 0);
set_affinity(ctx->event, 1);
set_prio(ctx->event, PRIO_MIDDLE);
send_events(ctx->event, EVENT_MTX_LOCK);
set_prio(ctx->event_2, PRIO_LOW);
set_affinity(ctx->event_2, 1);
send_events(ctx->event_2, EVENT_WAKEUP_MASTER);
wait_for_events();
/* Now the event task can help us */
rtems_test_assert(ctx->mtx._Queue._heads != NULL);
rtems_test_assert(rtems_get_current_processor() == 0);
set_affinity(ctx->event_2, 0);
set_affinity(ctx->busy, 1);
set_prio(ctx->busy, PRIO_HIGH);
send_events(
ctx->event_2,
EVENT_MOVE_BUSY_TO_CPU_0 | EVENT_MOVE_SELF_TO_CPU_1
| EVENT_SET_SELF_PRIO_TO_LOW | EVENT_SET_BUSY_PRIO_TO_IDLE
);
set_prio(ctx->event_2, PRIO_VERY_HIGH);
rtems_test_assert(rtems_get_current_processor() == 0);
rtems_mutex_unlock(&ctx->mtx);
rtems_test_assert(rtems_get_current_processor() == 0);
send_events(ctx->event, EVENT_WAKEUP_MASTER | EVENT_MTX_UNLOCK);
wait_for_events();
rtems_test_assert(rtems_get_current_processor() == 0);
unpin(blocked);
rtems_test_assert(rtems_get_current_processor() == 0);
}
static void test_pin_foreign_no_help_unpin(
test_context *ctx,
bool blocked,
int run
)
{
printf(
"test pin foreign no help unpin (%s, run %i)\n",
blocked_or_ready(blocked),
run
);
set_affinity(ctx->busy, 1);
set_prio(ctx->busy, PRIO_IDLE);
set_prio(RTEMS_SELF, PRIO_MIDDLE);
rtems_test_assert(rtems_get_current_processor() == 0);
rtems_mutex_lock(&ctx->mtx);
rtems_test_assert(rtems_get_current_processor() == 0);
set_affinity(ctx->event, 1);
set_prio(ctx->event, PRIO_MIDDLE);
send_events(ctx->event, EVENT_MTX_LOCK);
set_prio(ctx->event_2, PRIO_LOW);
set_affinity(ctx->event_2, 1);
send_events(ctx->event_2, EVENT_WAKEUP_MASTER);
wait_for_events();
/* Now the event task can help us */
rtems_test_assert(ctx->mtx._Queue._heads != NULL);
rtems_test_assert(rtems_get_current_processor() == 0);
/* Request help */
set_affinity(ctx->busy, 0);
set_prio(ctx->busy, PRIO_HIGH);
rtems_test_assert(rtems_get_current_processor() == 1);
/* Pin while using foreign scheduler */
pin(blocked);
rtems_test_assert(rtems_get_current_processor() == 1);
set_affinity(ctx->event_2, 1);
send_events(
ctx->event_2,
EVENT_MOVE_BUSY_TO_CPU_1 | EVENT_MOVE_SELF_TO_CPU_0
| EVENT_SET_SELF_PRIO_TO_LOW | EVENT_SET_BUSY_PRIO_TO_IDLE
);
set_prio(ctx->event_2, PRIO_VERY_HIGH);
rtems_test_assert(rtems_get_current_processor() == 1);
unpin(blocked);
rtems_test_assert(rtems_get_current_processor() == 0);
set_prio(ctx->busy, PRIO_IDLE);
rtems_mutex_unlock(&ctx->mtx);
rtems_test_assert(rtems_get_current_processor() == 0);
send_events(ctx->event, EVENT_WAKEUP_MASTER | EVENT_MTX_UNLOCK);
wait_for_events();
rtems_test_assert(rtems_get_current_processor() == 0);
}
static void test(test_context *ctx)
{
rtems_status_code sc;
int run;
ctx->master = rtems_task_self();
rtems_mutex_init(&ctx->mtx, "test");
sc = rtems_scheduler_ident(SCHED_A, &ctx->sched_a);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_scheduler_ident(SCHED_B, &ctx->sched_b);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_create(
rtems_build_name('B', 'U', 'S', 'Y'),
PRIO_HIGH,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&ctx->busy
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(ctx->busy, busy_task, (rtems_task_argument) ctx);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
set_affinity(ctx->busy, 0);
set_prio(ctx->busy, PRIO_IDLE);
rtems_test_assert(rtems_get_current_processor() == 1);
sc = rtems_task_create(
rtems_build_name('E', 'V', 'T', '1'),
PRIO_LOW,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&ctx->event
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(ctx->event, event_task, (rtems_task_argument) ctx);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
send_events(ctx->event, EVENT_WAKEUP_MASTER);
wait_for_events();
sc = rtems_task_create(
rtems_build_name('E', 'V', 'T', '2'),
PRIO_LOW,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&ctx->event_2
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(ctx->event_2, event_task, (rtems_task_argument) ctx);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
send_events(ctx->event_2, EVENT_WAKEUP_MASTER);
wait_for_events();
for (run = 1; run <= 3; ++run) {
test_simple_pin_unpin(ctx, run);
test_pin_wait_unpin(ctx, true, run);
test_pin_wait_unpin(ctx, false, run);
test_pin_preempt_unpin(ctx, true, run);
test_pin_preempt_unpin(ctx, false, run);
}
reconfigure_scheduler(ctx);
for (run = 1; run <= 3; ++run) {
test_pin_home_no_help_unpin(ctx, true, run);
test_pin_home_no_help_unpin(ctx, false, run);
test_pin_foreign_no_help_unpin(ctx, true, run);
test_pin_foreign_no_help_unpin(ctx, false, run);
}
}
static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() == CPU_COUNT) {
test(&test_instance);
} else {
puts("warning: wrong processor count to run the test");
}
TEST_END();
rtems_test_exit(0);
}
#define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER
#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
#define CONFIGURE_MAXIMUM_TASKS 4
#define CONFIGURE_INIT_TASK_PRIORITY PRIO_MIDDLE
#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
#define CONFIGURE_SCHEDULER_EDF_SMP
#include <rtems/scheduler.h>
RTEMS_SCHEDULER_EDF_SMP(a, CONFIGURE_MAXIMUM_PROCESSORS);
RTEMS_SCHEDULER_EDF_SMP(b, CONFIGURE_MAXIMUM_PROCESSORS);
#define CONFIGURE_SCHEDULER_TABLE_ENTRIES \
RTEMS_SCHEDULER_TABLE_EDF_SMP(a, SCHED_A), \
RTEMS_SCHEDULER_TABLE_EDF_SMP(b, SCHED_B) \
#define CONFIGURE_SCHEDULER_ASSIGNMENTS \
RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
#define CONFIGURE_INIT
#include <rtems/confdefs.h>

View File

@@ -0,0 +1,12 @@
This file describes the directives and concepts tested by this test set.
test set name: smpthreadpin01
directives:
- _Thread_Pin()
- _Thread_Unpin()
concepts:
- Ensure that the thread to processor pinning works.

View File

@@ -0,0 +1,35 @@
*** BEGIN OF TEST SMPTHREADPIN 1 ***
*** TEST VERSION: 5.0.0.2d1c3dff12bbbfec63997985f11bc80edfc1e994
*** TEST STATE: EXPECTED-PASS
*** TEST BUILD: RTEMS_SMP
*** TEST TOOLS: 7.3.0 20180125 (RTEMS 5, RSB 9670d7541e0621915e521fe76e7bb33de8cee661, Newlib d13c84eb07e35984bf7a974cd786a6cdac29e6b9)
test simple wait unpin (run 1)
test pin wait unpin (blocked, run 1)
test pin wait unpin (ready, run 1)
test pin preempt unpin (blocked, run 1)
test pin preempt unpin (ready, run 1)
test simple wait unpin (run 2)
test pin wait unpin (blocked, run 2)
test pin wait unpin (ready, run 2)
test pin preempt unpin (blocked, run 2)
test pin preempt unpin (ready, run 2)
test simple wait unpin (run 3)
test pin wait unpin (blocked, run 3)
test pin wait unpin (ready, run 3)
test pin preempt unpin (blocked, run 3)
test pin preempt unpin (ready, run 3)
reconfigure scheduler
test pin home no help unpin (blocked, run 1)
test pin home no help unpin (ready, run 1)
test pin foreign no help unpin (blocked, run 1)
test pin foreign no help unpin (ready, run 1)
test pin home no help unpin (blocked, run 2)
test pin home no help unpin (ready, run 2)
test pin foreign no help unpin (blocked, run 2)
test pin foreign no help unpin (ready, run 2)
test pin home no help unpin (blocked, run 3)
test pin home no help unpin (ready, run 3)
test pin foreign no help unpin (blocked, run 3)
test pin foreign no help unpin (ready, run 3)
*** END OF TEST SMPTHREADPIN 1 ***