forked from Imagelibrary/rtems
score: Rework idle handling in SMP schedulers
This patch fixes an issue with the idle thread handling in the SMP scheduler framework used for the MrsP locking protocol. The approach to use a simple chain of unused idle threads is broken for schedulers which support thread to processor affinity. The reason is that the thread to processor affinity introduces another ordering indicator which may under certain conditions lead to a reordering of idle threads in the scheduled chain. This reordering is not propagated to the chain of unused idle threads. This could lead to use an idle thread for a sticky scheduler node which is already in use. This locks up the system in infinite loops in the thread context switch procedure. To fix this, the SMP scheduler implementations must now provide callbacks to get and release an unused idle thread. Update #4531.
This commit is contained in:
@@ -798,26 +798,26 @@ RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
/**
|
||||
* @brief Gets an idle thread from the scheduler instance.
|
||||
* @brief Gets a scheduler node which is owned by an unused idle thread.
|
||||
*
|
||||
* @param context The scheduler instance context.
|
||||
* @param arg is the handler argument.
|
||||
*
|
||||
* @return idle An idle thread for use. This function must always return an
|
||||
* idle thread. If none is available, then this is a fatal error.
|
||||
* @return Returns a scheduler node owned by an idle thread for use. This
|
||||
* handler must always return a node. If none is available, then this is a
|
||||
* fatal error.
|
||||
*/
|
||||
typedef Thread_Control *( *Scheduler_Get_idle_thread )(
|
||||
Scheduler_Context *context
|
||||
);
|
||||
typedef Scheduler_Node *( *Scheduler_Get_idle_node )( void *arg );
|
||||
|
||||
/**
|
||||
* @brief Releases an idle thread to the scheduler instance for reuse.
|
||||
* @brief Releases the scheduler node which is owned by an idle thread.
|
||||
*
|
||||
* @param context The scheduler instance context.
|
||||
* @param idle The idle thread to release.
|
||||
* @param node is the node to release.
|
||||
*
|
||||
* @param arg is the handler argument.
|
||||
*/
|
||||
typedef void ( *Scheduler_Release_idle_thread )(
|
||||
Scheduler_Context *context,
|
||||
Thread_Control *idle
|
||||
typedef void ( *Scheduler_Release_idle_node )(
|
||||
Scheduler_Node *node,
|
||||
void *arg
|
||||
);
|
||||
|
||||
/**
|
||||
@@ -841,189 +841,114 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Uses an idle thread for this scheduler node.
|
||||
* @brief Uses an idle thread for the scheduler node.
|
||||
*
|
||||
* A thread whose home scheduler node has a sticky level greater than zero may
|
||||
* use an idle thread in the home scheduler instance in the case it executes
|
||||
* currently in another scheduler instance or in the case it is in a blocking
|
||||
* state.
|
||||
* @param[in, out] node is the node which wants to use an idle thread.
|
||||
*
|
||||
* @param context The scheduler instance context.
|
||||
* @param[in, out] node The node which wants to use the idle thread.
|
||||
* @param cpu The processor for the idle thread.
|
||||
* @param get_idle_thread Function to get an idle thread.
|
||||
* @param get_idle_node is the get idle node handler.
|
||||
*
|
||||
* @param arg is the handler argument.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Get_idle_thread get_idle_thread
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Get_idle_node get_idle_node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Thread_Control *idle = ( *get_idle_thread )( context );
|
||||
Scheduler_Node *idle_node;
|
||||
Thread_Control *idle;
|
||||
|
||||
idle_node = ( *get_idle_node )( arg );
|
||||
idle = _Scheduler_Node_get_owner( idle_node );
|
||||
_Assert( idle->is_idle );
|
||||
_Scheduler_Node_set_idle_user( node, idle );
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This enumeration defines what a scheduler should do with a node which
|
||||
* could be scheduled.
|
||||
* @brief Releases the idle thread used by the scheduler node.
|
||||
*
|
||||
* @param[in, out] node is the node which wants to release the idle thread.
|
||||
*
|
||||
* @param idle is the idle thread to release.
|
||||
*
|
||||
* @param release_idle_node is the release idle node handler.
|
||||
*
|
||||
* @param arg is the handler argument.
|
||||
*/
|
||||
typedef enum {
|
||||
SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
|
||||
SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
|
||||
SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
|
||||
} Scheduler_Try_to_schedule_action;
|
||||
|
||||
/**
|
||||
* @brief Tries to schedule the scheduler node.
|
||||
*
|
||||
* When a scheduler needs to schedule a node, it shall use this function to
|
||||
* determine what it shall do with the node. The node replaces a victim node if
|
||||
* it can be scheduled.
|
||||
*
|
||||
* This function uses the state of the node and the scheduler state of the owner
|
||||
* thread to determine what shall be done. Each scheduler maintains its nodes
|
||||
* independent of other schedulers. This function ensures that a thread is
|
||||
* scheduled by at most one scheduler. If a node requires an executing thread
|
||||
* due to some locking protocol and the owner thread is already scheduled by
|
||||
* another scheduler, then an idle thread shall be attached to the node.
|
||||
*
|
||||
* @param[in, out] context is the scheduler context.
|
||||
* @param[in, out] node is the node which could be scheduled.
|
||||
* @param idle is an idle thread used by the victim node or NULL.
|
||||
* @param get_idle_thread points to a function to get an idle thread.
|
||||
*
|
||||
* @retval SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE The node shall be scheduled.
|
||||
*
|
||||
* @retval SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE The node shall be
|
||||
* scheduled and the provided idle thread shall be attached to the node. This
|
||||
* action is returned, if the node cannot use the owner thread and shall use
|
||||
* an idle thread instead. In this case, the idle thread is provided by the
|
||||
* victim node.
|
||||
*
|
||||
* @retval SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK The node shall be blocked. This
|
||||
* action is returned, if the owner thread is already scheduled by another
|
||||
* scheduler.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
|
||||
_Scheduler_Try_to_schedule_node(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Node *victim,
|
||||
Scheduler_Get_idle_thread get_idle_thread
|
||||
RTEMS_INLINE_ROUTINE void _Scheduler_Release_idle_thread(
|
||||
Scheduler_Node *node,
|
||||
const Thread_Control *idle,
|
||||
Scheduler_Release_idle_node release_idle_node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
ISR_lock_Context lock_context;
|
||||
Scheduler_Try_to_schedule_action action;
|
||||
Thread_Control *owner;
|
||||
Thread_Control *owner;
|
||||
Scheduler_Node *idle_node;
|
||||
|
||||
action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
|
||||
owner = _Scheduler_Node_get_owner( node );
|
||||
_Assert( _Scheduler_Node_get_user( node ) == owner );
|
||||
_Assert( _Scheduler_Node_get_idle( node ) == NULL );
|
||||
|
||||
_Thread_Scheduler_acquire_critical( owner, &lock_context );
|
||||
|
||||
if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
|
||||
_Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
|
||||
_Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
|
||||
} else if (
|
||||
owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
|
||||
&& node->sticky_level <= 1
|
||||
) {
|
||||
action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
|
||||
} else if ( node->sticky_level == 0 ) {
|
||||
action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
|
||||
} else if ( _Scheduler_Node_get_idle( victim ) != NULL ) {
|
||||
action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
|
||||
} else {
|
||||
Thread_Control *idle;
|
||||
Thread_Control *user;
|
||||
|
||||
idle = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
|
||||
user = _Scheduler_Node_get_user( node );
|
||||
_Thread_Set_CPU( idle, _Thread_Get_CPU( user ) );
|
||||
}
|
||||
|
||||
_Thread_Scheduler_release_critical( owner, &lock_context );
|
||||
return action;
|
||||
_Assert( _Scheduler_Node_get_user( node ) == idle );
|
||||
_Scheduler_Node_set_user( node, owner );
|
||||
node->idle = NULL;
|
||||
idle_node = _Thread_Scheduler_get_home_node( idle );
|
||||
( *release_idle_node )( idle_node, arg );
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Releases an idle thread using this scheduler node.
|
||||
* @brief Releases the idle thread used by the scheduler node if the node uses
|
||||
* an idle thread.
|
||||
*
|
||||
* @param context The scheduler instance context.
|
||||
* @param[in, out] node The node which may have an idle thread as user.
|
||||
* @param release_idle_thread Function to release an idle thread.
|
||||
* @param[in, out] node is the node which wants to release the idle thread.
|
||||
*
|
||||
* @retval idle The idle thread which used this node.
|
||||
* @retval NULL This node had no idle thread as an user.
|
||||
* @param release_idle_node is the release idle node handler.
|
||||
*
|
||||
* @param arg is the handler argument.
|
||||
*
|
||||
* @retval NULL The scheduler node did not use an idle thread.
|
||||
*
|
||||
* @return Returns the idle thread used by the scheduler node.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Release_idle_thread release_idle_thread
|
||||
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Release_idle_node release_idle_node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Thread_Control *idle = _Scheduler_Node_get_idle( node );
|
||||
Thread_Control *idle;
|
||||
|
||||
idle = _Scheduler_Node_get_idle( node );
|
||||
|
||||
if ( idle != NULL ) {
|
||||
Thread_Control *owner = _Scheduler_Node_get_owner( node );
|
||||
|
||||
node->idle = NULL;
|
||||
_Scheduler_Node_set_user( node, owner );
|
||||
( *release_idle_thread )( context, idle );
|
||||
_Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
|
||||
}
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Exchanges an idle thread from the scheduler node that uses it
|
||||
* right now to another scheduler node.
|
||||
*
|
||||
* @param needs_idle is the scheduler node that needs an idle thread.
|
||||
*
|
||||
* @param uses_idle is the scheduler node that used the idle thread.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
|
||||
Scheduler_Node *needs_idle,
|
||||
Scheduler_Node *uses_idle
|
||||
)
|
||||
{
|
||||
_Scheduler_Node_set_idle_user(
|
||||
needs_idle,
|
||||
_Scheduler_Node_get_idle( uses_idle )
|
||||
);
|
||||
_Scheduler_Node_set_user(
|
||||
uses_idle,
|
||||
_Scheduler_Node_get_owner( uses_idle )
|
||||
);
|
||||
uses_idle->idle = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocks this scheduler node.
|
||||
*
|
||||
* @param context The scheduler instance context.
|
||||
* @param[in, out] thread The thread which wants to get blocked referencing this
|
||||
* node. This is not necessarily the user of this node in case the node
|
||||
* participates in the scheduler helping protocol.
|
||||
* @param[in, out] node The node which wants to get blocked.
|
||||
* @param is_scheduled This node is scheduled.
|
||||
* @param get_idle_thread Function to get an idle thread.
|
||||
*
|
||||
* @param[in, out] node is the node which wants to get blocked.
|
||||
*
|
||||
* @param get_idle_node is the get idle node handler.
|
||||
*
|
||||
* @param arg is the get idle node handler argument.
|
||||
*
|
||||
* @retval thread_cpu The processor of the thread. Indicates to continue with
|
||||
* the blocking operation.
|
||||
* @retval NULL Otherwise.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
|
||||
Scheduler_Context *context,
|
||||
Thread_Control *thread,
|
||||
Scheduler_Node *node,
|
||||
bool is_scheduled,
|
||||
Scheduler_Get_idle_thread get_idle_thread
|
||||
Thread_Control *thread,
|
||||
Scheduler_Node *node,
|
||||
bool is_scheduled,
|
||||
Scheduler_Get_idle_node get_idle_node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
int sticky_level;
|
||||
@@ -1045,7 +970,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
|
||||
if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
|
||||
Thread_Control *idle;
|
||||
|
||||
idle = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
|
||||
idle = _Scheduler_Use_idle_thread( node, get_idle_node, arg );
|
||||
_Thread_Set_CPU( idle, thread_cpu );
|
||||
_Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
|
||||
}
|
||||
@@ -1058,31 +983,28 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Discard the idle thread from the scheduler node.
|
||||
* @brief Discards the idle thread used by the scheduler node.
|
||||
*
|
||||
* @param context The scheduler context.
|
||||
* @param[in, out] the_thread The thread for the operation.
|
||||
* @param[in, out] node The scheduler node to discard the idle thread from.
|
||||
* @param release_idle_thread Method to release the idle thread from the context.
|
||||
* @param[in, out] the_thread is the thread owning the node.
|
||||
*
|
||||
* @param[in, out] node is the node which wants to release the idle thread.
|
||||
*
|
||||
* @param release_idle_node is the release idle node handler.
|
||||
*
|
||||
* @param arg is the handler argument.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
|
||||
Scheduler_Context *context,
|
||||
Thread_Control *the_thread,
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Release_idle_thread release_idle_thread
|
||||
Thread_Control *the_thread,
|
||||
Scheduler_Node *node,
|
||||
Scheduler_Release_idle_node release_idle_node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Thread_Control *idle;
|
||||
Thread_Control *owner;
|
||||
Per_CPU_Control *cpu;
|
||||
|
||||
idle = _Scheduler_Node_get_idle( node );
|
||||
owner = _Scheduler_Node_get_owner( node );
|
||||
|
||||
node->idle = NULL;
|
||||
_Assert( _Scheduler_Node_get_user( node ) == idle );
|
||||
_Scheduler_Node_set_user( node, owner );
|
||||
( *release_idle_thread )( context, idle );
|
||||
_Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
|
||||
|
||||
cpu = _Thread_Get_CPU( idle );
|
||||
_Thread_Set_CPU( the_thread, cpu );
|
||||
@@ -1102,11 +1024,11 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
|
||||
* @retval false Do not continue with the unblocking operation.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
|
||||
Scheduler_Context *context,
|
||||
Thread_Control *the_thread,
|
||||
Scheduler_Node *node,
|
||||
bool is_scheduled,
|
||||
Scheduler_Release_idle_thread release_idle_thread
|
||||
Thread_Control *the_thread,
|
||||
Scheduler_Node *node,
|
||||
bool is_scheduled,
|
||||
Scheduler_Release_idle_node release_idle_node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
bool unblock;
|
||||
@@ -1115,13 +1037,13 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
|
||||
_Assert( node->sticky_level > 0 );
|
||||
|
||||
if ( is_scheduled ) {
|
||||
_Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
|
||||
_Scheduler_Discard_idle_thread(
|
||||
context,
|
||||
the_thread,
|
||||
node,
|
||||
release_idle_thread
|
||||
release_idle_node,
|
||||
arg
|
||||
);
|
||||
_Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
|
||||
unblock = false;
|
||||
} else {
|
||||
_Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
|
||||
|
||||
@@ -57,6 +57,7 @@ extern "C" {
|
||||
*/
|
||||
typedef struct {
|
||||
Scheduler_SMP_Context Base;
|
||||
Chain_Control *idle_ready_queue;
|
||||
Priority_bit_map_Control Bit_map;
|
||||
Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
|
||||
} Scheduler_priority_SMP_Context;
|
||||
|
||||
@@ -156,6 +156,41 @@ static inline void _Scheduler_priority_SMP_Extract_from_ready(
|
||||
);
|
||||
}
|
||||
|
||||
static inline Scheduler_Node *_Scheduler_priority_SMP_Get_idle( void *arg )
|
||||
{
|
||||
Scheduler_priority_SMP_Context *self;
|
||||
Scheduler_priority_SMP_Node *lowest_ready;
|
||||
|
||||
self = _Scheduler_priority_SMP_Get_self( arg );
|
||||
lowest_ready = (Scheduler_priority_SMP_Node *)
|
||||
_Chain_Last( self->idle_ready_queue );
|
||||
_Scheduler_priority_Ready_queue_extract(
|
||||
&lowest_ready->Base.Base.Node.Chain,
|
||||
&lowest_ready->Ready_queue,
|
||||
&self->Bit_map
|
||||
);
|
||||
|
||||
return &lowest_ready->Base.Base;
|
||||
}
|
||||
|
||||
static inline void _Scheduler_priority_SMP_Release_idle(
|
||||
Scheduler_Node *node_base,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Scheduler_priority_SMP_Context *self;
|
||||
Scheduler_priority_SMP_Node *node;
|
||||
|
||||
self = _Scheduler_priority_SMP_Get_self( arg );
|
||||
node = _Scheduler_priority_SMP_Node_downcast( node_base );
|
||||
|
||||
_Scheduler_priority_Ready_queue_enqueue(
|
||||
&node->Base.Base.Node.Chain,
|
||||
&node->Ready_queue,
|
||||
&self->Bit_map
|
||||
);
|
||||
}
|
||||
|
||||
static inline void _Scheduler_priority_SMP_Do_update(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *node_to_update,
|
||||
|
||||
@@ -55,15 +55,6 @@ typedef struct {
|
||||
* @brief The chain of scheduled nodes.
|
||||
*/
|
||||
Chain_Control Scheduled;
|
||||
|
||||
/**
|
||||
* @brief Chain of the available idle threads.
|
||||
*
|
||||
* Idle threads are used for the scheduler helping protocol. It is crucial
|
||||
* that the idle threads preserve their relative order. This is the case for
|
||||
* this priority based scheduler.
|
||||
*/
|
||||
Chain_Control Idle_threads;
|
||||
} Scheduler_SMP_Context;
|
||||
|
||||
/**
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -409,10 +409,36 @@ static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
|
||||
);
|
||||
}
|
||||
|
||||
static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_idle( void *arg )
|
||||
{
|
||||
Scheduler_EDF_SMP_Context *self;
|
||||
Scheduler_Node *lowest_ready;
|
||||
|
||||
self = _Scheduler_EDF_SMP_Get_self( arg );
|
||||
lowest_ready = (Scheduler_Node *) _RBTree_Maximum( &self->Ready[ 0 ].Queue );
|
||||
_Assert( lowest_ready != NULL );
|
||||
_RBTree_Extract( &self->Ready[ 0 ].Queue, &lowest_ready->Node.RBTree );
|
||||
_Chain_Initialize_node( &lowest_ready->Node.Chain );
|
||||
|
||||
return lowest_ready;
|
||||
}
|
||||
|
||||
static inline void _Scheduler_EDF_SMP_Release_idle(
|
||||
Scheduler_Node *node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Scheduler_EDF_SMP_Context *self;
|
||||
|
||||
self = _Scheduler_EDF_SMP_Get_self( arg );
|
||||
_RBTree_Initialize_node( &node->Node.RBTree );
|
||||
_RBTree_Append( &self->Ready[ 0 ].Queue, &node->Node.RBTree );
|
||||
}
|
||||
|
||||
static inline void _Scheduler_EDF_SMP_Allocate_processor(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *scheduled_base,
|
||||
Scheduler_Node *victim_base,
|
||||
Thread_Control *victim_thread,
|
||||
Per_CPU_Control *victim_cpu
|
||||
)
|
||||
{
|
||||
@@ -420,7 +446,7 @@ static inline void _Scheduler_EDF_SMP_Allocate_processor(
|
||||
Scheduler_EDF_SMP_Node *scheduled;
|
||||
uint8_t rqi;
|
||||
|
||||
(void) victim_base;
|
||||
(void) victim_thread;
|
||||
self = _Scheduler_EDF_SMP_Get_self( context );
|
||||
scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
|
||||
rqi = scheduled->ready_queue_index;
|
||||
@@ -471,7 +497,9 @@ void _Scheduler_EDF_SMP_Block(
|
||||
_Scheduler_EDF_SMP_Extract_from_ready,
|
||||
_Scheduler_EDF_SMP_Get_highest_ready,
|
||||
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -489,8 +517,11 @@ static inline bool _Scheduler_EDF_SMP_Enqueue(
|
||||
_Scheduler_EDF_SMP_Insert_ready,
|
||||
_Scheduler_EDF_SMP_Insert_scheduled,
|
||||
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_EDF_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -510,7 +541,9 @@ static inline void _Scheduler_EDF_SMP_Enqueue_scheduled(
|
||||
_Scheduler_EDF_SMP_Insert_ready,
|
||||
_Scheduler_EDF_SMP_Insert_scheduled,
|
||||
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -527,7 +560,8 @@ void _Scheduler_EDF_SMP_Unblock(
|
||||
thread,
|
||||
node,
|
||||
_Scheduler_EDF_SMP_Do_update,
|
||||
_Scheduler_EDF_SMP_Enqueue
|
||||
_Scheduler_EDF_SMP_Enqueue,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -546,7 +580,8 @@ static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
|
||||
_Scheduler_EDF_SMP_Insert_scheduled,
|
||||
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_EDF_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -616,7 +651,9 @@ void _Scheduler_EDF_SMP_Withdraw_node(
|
||||
_Scheduler_EDF_SMP_Extract_from_ready,
|
||||
_Scheduler_EDF_SMP_Get_highest_ready,
|
||||
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -649,7 +686,9 @@ void _Scheduler_EDF_SMP_Clean_sticky(
|
||||
_Scheduler_EDF_SMP_Extract_from_ready,
|
||||
_Scheduler_EDF_SMP_Get_highest_ready,
|
||||
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -695,7 +734,9 @@ Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
|
||||
cpu,
|
||||
_Scheduler_EDF_SMP_Extract_from_scheduled,
|
||||
_Scheduler_EDF_SMP_Extract_from_ready,
|
||||
_Scheduler_EDF_SMP_Enqueue
|
||||
_Scheduler_EDF_SMP_Enqueue,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -833,7 +874,9 @@ Status_Control _Scheduler_EDF_SMP_Set_affinity(
|
||||
_Scheduler_EDF_SMP_Get_highest_ready,
|
||||
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_EDF_SMP_Enqueue,
|
||||
_Scheduler_EDF_SMP_Allocate_processor
|
||||
_Scheduler_EDF_SMP_Allocate_processor,
|
||||
_Scheduler_EDF_SMP_Get_idle,
|
||||
_Scheduler_EDF_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -190,7 +190,9 @@ void _Scheduler_priority_affinity_SMP_Block(
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_affinity_SMP_Get_highest_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_exact
|
||||
_Scheduler_SMP_Allocate_processor_exact,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -262,8 +264,11 @@ static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
|
||||
_Scheduler_priority_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_exact
|
||||
_Scheduler_SMP_Allocate_processor_exact,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -342,7 +347,10 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
|
||||
lowest_scheduled,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_SMP_Allocate_processor_exact
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_exact,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -363,7 +371,8 @@ void _Scheduler_priority_affinity_SMP_Unblock(
|
||||
thread,
|
||||
node,
|
||||
_Scheduler_priority_SMP_Do_update,
|
||||
_Scheduler_priority_affinity_SMP_Enqueue_fifo
|
||||
_Scheduler_priority_affinity_SMP_Enqueue_fifo,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -390,8 +399,11 @@ static bool _Scheduler_priority_affinity_SMP_Enqueue(
|
||||
_Scheduler_priority_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_exact
|
||||
_Scheduler_SMP_Allocate_processor_exact,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -416,7 +428,9 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled(
|
||||
_Scheduler_priority_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_exact
|
||||
_Scheduler_SMP_Allocate_processor_exact,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -435,7 +449,8 @@ static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -510,7 +525,9 @@ void _Scheduler_priority_affinity_SMP_Withdraw_node(
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_affinity_SMP_Get_highest_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -543,7 +560,9 @@ void _Scheduler_priority_affinity_SMP_Clean_sticky(
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_affinity_SMP_Get_highest_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_exact
|
||||
_Scheduler_SMP_Allocate_processor_exact,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -575,7 +594,9 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
|
||||
cpu,
|
||||
_Scheduler_SMP_Extract_from_scheduled,
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_affinity_SMP_Enqueue
|
||||
_Scheduler_priority_affinity_SMP_Enqueue,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ void _Scheduler_priority_SMP_Initialize( const Scheduler_Control *scheduler )
|
||||
_Scheduler_priority_SMP_Get_context( scheduler );
|
||||
|
||||
_Scheduler_SMP_Initialize( &self->Base );
|
||||
self->idle_ready_queue = &self->Ready[ scheduler->maximum_priority ];
|
||||
_Priority_bit_map_Initialize( &self->Bit_map );
|
||||
_Scheduler_priority_Ready_queue_initialize(
|
||||
&self->Ready[ 0 ],
|
||||
@@ -118,7 +119,9 @@ void _Scheduler_priority_SMP_Block(
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_SMP_Get_highest_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -136,8 +139,11 @@ static bool _Scheduler_priority_SMP_Enqueue(
|
||||
_Scheduler_priority_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -157,7 +163,9 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled(
|
||||
_Scheduler_priority_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -174,7 +182,8 @@ void _Scheduler_priority_SMP_Unblock(
|
||||
thread,
|
||||
node,
|
||||
_Scheduler_priority_SMP_Do_update,
|
||||
_Scheduler_priority_SMP_Enqueue
|
||||
_Scheduler_priority_SMP_Enqueue,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -193,7 +202,8 @@ static bool _Scheduler_priority_SMP_Do_ask_for_help(
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -263,7 +273,9 @@ void _Scheduler_priority_SMP_Withdraw_node(
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_SMP_Get_highest_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -296,7 +308,9 @@ void _Scheduler_priority_SMP_Clean_sticky(
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_SMP_Get_highest_ready,
|
||||
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -328,7 +342,9 @@ Thread_Control *_Scheduler_priority_SMP_Remove_processor(
|
||||
cpu,
|
||||
_Scheduler_SMP_Extract_from_scheduled,
|
||||
_Scheduler_priority_SMP_Extract_from_ready,
|
||||
_Scheduler_priority_SMP_Enqueue
|
||||
_Scheduler_priority_SMP_Enqueue,
|
||||
_Scheduler_priority_SMP_Get_idle,
|
||||
_Scheduler_priority_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -172,6 +172,30 @@ static void _Scheduler_simple_SMP_Extract_from_ready(
|
||||
_Chain_Extract_unprotected( &node_to_extract->Node.Chain );
|
||||
}
|
||||
|
||||
static inline Scheduler_Node *_Scheduler_simple_SMP_Get_idle( void *arg )
|
||||
{
|
||||
Scheduler_simple_SMP_Context *self =
|
||||
_Scheduler_simple_SMP_Get_self( arg );
|
||||
Scheduler_Node *lowest_ready = (Scheduler_Node *) _Chain_Last( &self->Ready );
|
||||
|
||||
_Assert( &lowest_ready->Node.Chain != _Chain_Head( &self->Ready ) );
|
||||
_Chain_Extract_unprotected( &lowest_ready->Node.Chain );
|
||||
|
||||
return lowest_ready;
|
||||
}
|
||||
|
||||
static inline void _Scheduler_simple_SMP_Release_idle(
|
||||
Scheduler_Node *node,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Scheduler_simple_SMP_Context *self;
|
||||
|
||||
self = _Scheduler_simple_SMP_Get_self( arg );
|
||||
|
||||
_Chain_Append_unprotected( &self->Ready, &node->Node.Chain );
|
||||
}
|
||||
|
||||
void _Scheduler_simple_SMP_Block(
|
||||
const Scheduler_Control *scheduler,
|
||||
Thread_Control *thread,
|
||||
@@ -188,7 +212,9 @@ void _Scheduler_simple_SMP_Block(
|
||||
_Scheduler_simple_SMP_Extract_from_ready,
|
||||
_Scheduler_simple_SMP_Get_highest_ready,
|
||||
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_simple_SMP_Get_idle,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -206,8 +232,11 @@ static bool _Scheduler_simple_SMP_Enqueue(
|
||||
_Scheduler_simple_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_simple_SMP_Get_idle,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -227,7 +256,9 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled(
|
||||
_Scheduler_simple_SMP_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_simple_SMP_Get_idle,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -244,7 +275,8 @@ void _Scheduler_simple_SMP_Unblock(
|
||||
thread,
|
||||
node,
|
||||
_Scheduler_simple_SMP_Do_update,
|
||||
_Scheduler_simple_SMP_Enqueue
|
||||
_Scheduler_simple_SMP_Enqueue,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -263,7 +295,8 @@ static bool _Scheduler_simple_SMP_Do_ask_for_help(
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
|
||||
_Scheduler_SMP_Get_lowest_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -333,7 +366,9 @@ void _Scheduler_simple_SMP_Withdraw_node(
|
||||
_Scheduler_simple_SMP_Extract_from_ready,
|
||||
_Scheduler_simple_SMP_Get_highest_ready,
|
||||
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_simple_SMP_Get_idle,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -366,7 +401,9 @@ void _Scheduler_simple_SMP_Clean_sticky(
|
||||
_Scheduler_simple_SMP_Extract_from_ready,
|
||||
_Scheduler_simple_SMP_Get_highest_ready,
|
||||
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
|
||||
_Scheduler_SMP_Allocate_processor_lazy
|
||||
_Scheduler_SMP_Allocate_processor_lazy,
|
||||
_Scheduler_simple_SMP_Get_idle,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -398,7 +435,9 @@ Thread_Control *_Scheduler_simple_SMP_Remove_processor(
|
||||
cpu,
|
||||
_Scheduler_SMP_Extract_from_scheduled,
|
||||
_Scheduler_simple_SMP_Extract_from_ready,
|
||||
_Scheduler_simple_SMP_Enqueue
|
||||
_Scheduler_simple_SMP_Enqueue,
|
||||
_Scheduler_simple_SMP_Get_idle,
|
||||
_Scheduler_simple_SMP_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -157,14 +157,14 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
|
||||
static inline void _Scheduler_strong_APA_Allocate_processor(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *scheduled_base,
|
||||
Scheduler_Node *victim_base,
|
||||
Thread_Control *victim_thread,
|
||||
Per_CPU_Control *victim_cpu
|
||||
)
|
||||
{
|
||||
Scheduler_strong_APA_Node *scheduled;
|
||||
Scheduler_strong_APA_Context *self;
|
||||
|
||||
(void) victim_base;
|
||||
(void) victim_thread;
|
||||
|
||||
scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
|
||||
self = _Scheduler_strong_APA_Get_self( context );
|
||||
@@ -270,6 +270,63 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
|
||||
return highest_ready;
|
||||
}
|
||||
|
||||
static inline Scheduler_Node *_Scheduler_strong_APA_Get_idle( void *arg )
|
||||
{
|
||||
Scheduler_strong_APA_Context *self;
|
||||
Scheduler_strong_APA_Node *lowest_ready = NULL;
|
||||
Priority_Control max_priority_num;
|
||||
const Chain_Node *tail;
|
||||
Chain_Node *next;
|
||||
|
||||
self = _Scheduler_strong_APA_Get_self( arg );
|
||||
tail = _Chain_Immutable_tail( &self->Ready );
|
||||
next = _Chain_First( &self->Ready );
|
||||
max_priority_num = 0;
|
||||
|
||||
while ( next != tail ) {
|
||||
Scheduler_strong_APA_Node *node;
|
||||
Scheduler_SMP_Node_state curr_state;
|
||||
|
||||
node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
|
||||
curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
|
||||
|
||||
if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
|
||||
Priority_Control curr_priority;
|
||||
|
||||
curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
|
||||
|
||||
if ( curr_priority > max_priority_num ) {
|
||||
max_priority_num = curr_priority;
|
||||
lowest_ready = node;
|
||||
}
|
||||
}
|
||||
|
||||
next = _Chain_Next( next );
|
||||
}
|
||||
|
||||
_Assert( lowest_ready != NULL );
|
||||
_Chain_Extract_unprotected( &lowest_ready->Ready_node );
|
||||
_Chain_Set_off_chain( &lowest_ready->Ready_node );
|
||||
|
||||
return &lowest_ready->Base.Base;
|
||||
}
|
||||
|
||||
static inline void _Scheduler_strong_APA_Release_idle(
|
||||
Scheduler_Node *node_base,
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
Scheduler_strong_APA_Context *self;
|
||||
Scheduler_strong_APA_Node *node;
|
||||
|
||||
self = _Scheduler_strong_APA_Get_self( arg );
|
||||
node = _Scheduler_strong_APA_Node_downcast( node_base );
|
||||
|
||||
if ( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
|
||||
_Chain_Append_unprotected( &self->Ready, &node->Ready_node );
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
|
||||
Scheduler_Context *context,
|
||||
Scheduler_Node *ready_to_scheduled
|
||||
@@ -386,16 +443,24 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
|
||||
* So there is need for task shifting.
|
||||
*/
|
||||
while ( node->cpu_to_preempt != filter_cpu ) {
|
||||
Thread_Control *next_node_idle;
|
||||
|
||||
curr_node = &node->Base.Base;
|
||||
next_node = _Scheduler_strong_APA_Get_scheduled(
|
||||
self,
|
||||
node->cpu_to_preempt
|
||||
);
|
||||
next_node_idle = _Scheduler_Release_idle_thread_if_necessary(
|
||||
next_node,
|
||||
_Scheduler_strong_APA_Release_idle,
|
||||
context
|
||||
);
|
||||
|
||||
(void) _Scheduler_SMP_Preempt(
|
||||
context,
|
||||
curr_node,
|
||||
next_node,
|
||||
next_node_idle,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
);
|
||||
|
||||
@@ -587,6 +652,8 @@ static inline bool _Scheduler_strong_APA_Do_enqueue(
|
||||
self = _Scheduler_strong_APA_Get_self( context );
|
||||
CPU = self->CPU;
|
||||
|
||||
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
|
||||
|
||||
node_priority = _Scheduler_Node_get_priority( node );
|
||||
node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
|
||||
|
||||
@@ -633,7 +700,10 @@ static inline bool _Scheduler_strong_APA_Do_enqueue(
|
||||
next_node,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
|
||||
curr_node = next_node;
|
||||
@@ -641,13 +711,21 @@ static inline bool _Scheduler_strong_APA_Do_enqueue(
|
||||
curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
|
||||
|
||||
while ( curr_node != lowest_reachable ) {
|
||||
Thread_Control *next_node_idle;
|
||||
|
||||
curr_CPU = curr_strong_node->cpu_to_preempt;
|
||||
next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
|
||||
next_node_idle = _Scheduler_Release_idle_thread_if_necessary(
|
||||
next_node,
|
||||
_Scheduler_strong_APA_Release_idle,
|
||||
context
|
||||
);
|
||||
/* curr_node preempts the next_node; */
|
||||
_Scheduler_SMP_Preempt(
|
||||
context,
|
||||
curr_node,
|
||||
next_node,
|
||||
next_node_idle,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
);
|
||||
|
||||
@@ -755,7 +833,9 @@ static inline void _Scheduler_strong_APA_Enqueue_scheduled(
|
||||
_Scheduler_strong_APA_Insert_ready,
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -774,7 +854,8 @@ static inline bool _Scheduler_strong_APA_Do_ask_for_help(
|
||||
_Scheduler_SMP_Insert_scheduled,
|
||||
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
|
||||
_Scheduler_strong_APA_Get_lowest_scheduled,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -842,7 +923,9 @@ void _Scheduler_strong_APA_Block(
|
||||
_Scheduler_strong_APA_Extract_from_ready,
|
||||
_Scheduler_strong_APA_Get_highest_ready,
|
||||
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -859,7 +942,8 @@ void _Scheduler_strong_APA_Unblock(
|
||||
thread,
|
||||
node,
|
||||
_Scheduler_strong_APA_Do_update,
|
||||
_Scheduler_strong_APA_Enqueue
|
||||
_Scheduler_strong_APA_Enqueue,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -933,7 +1017,9 @@ void _Scheduler_strong_APA_Withdraw_node(
|
||||
_Scheduler_strong_APA_Extract_from_ready,
|
||||
_Scheduler_strong_APA_Get_highest_ready,
|
||||
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -966,7 +1052,9 @@ void _Scheduler_strong_APA_Clean_sticky(
|
||||
_Scheduler_strong_APA_Extract_from_ready,
|
||||
_Scheduler_strong_APA_Get_highest_ready,
|
||||
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1028,7 +1116,9 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor(
|
||||
cpu,
|
||||
_Scheduler_strong_APA_Extract_from_scheduled,
|
||||
_Scheduler_strong_APA_Extract_from_ready,
|
||||
_Scheduler_strong_APA_Enqueue
|
||||
_Scheduler_strong_APA_Enqueue,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1089,7 +1179,9 @@ Status_Control _Scheduler_strong_APA_Set_affinity(
|
||||
_Scheduler_strong_APA_Get_highest_ready,
|
||||
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
|
||||
_Scheduler_strong_APA_Enqueue,
|
||||
_Scheduler_strong_APA_Allocate_processor
|
||||
_Scheduler_strong_APA_Allocate_processor,
|
||||
_Scheduler_strong_APA_Get_idle,
|
||||
_Scheduler_strong_APA_Release_idle
|
||||
);
|
||||
|
||||
return STATUS_SUCCESSFUL;
|
||||
|
||||
Reference in New Issue
Block a user