score: Simplify SMP processor allocation

Avoid copy and paste and set the scheduler node state in one place.
This commit is contained in:
Sebastian Huber
2014-07-08 10:38:19 +02:00
parent 835b88b883
commit 19e417678a
4 changed files with 62 additions and 36 deletions

View File

@@ -313,8 +313,8 @@ typedef void ( *Scheduler_SMP_Enqueue )(
typedef void ( *Scheduler_SMP_Allocate_processor )( typedef void ( *Scheduler_SMP_Allocate_processor )(
Scheduler_Context *context, Scheduler_Context *context,
Scheduler_Node *scheduled, Thread_Control *scheduled,
Scheduler_Node *victim Thread_Control *victim
); );
static inline bool _Scheduler_SMP_Insert_priority_lifo_order( static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
@@ -410,24 +410,17 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
return cpu->scheduler_context == context; return cpu->scheduler_context == context;
} }
static inline void _Scheduler_SMP_Allocate_processor( static inline void _Scheduler_SMP_Allocate_processor_lazy(
Scheduler_Context *context, Scheduler_Context *context,
Scheduler_Node *scheduled, Thread_Control *scheduled_thread,
Scheduler_Node *victim Thread_Control *victim_thread
) )
{ {
Thread_Control *scheduled_thread = _Scheduler_Node_get_owner( scheduled );
Thread_Control *victim_thread = _Scheduler_Node_get_owner( victim );
Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread ); Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread ); Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
Per_CPU_Control *cpu_self = _Per_CPU_Get(); Per_CPU_Control *cpu_self = _Per_CPU_Get();
Thread_Control *heir; Thread_Control *heir;
_Scheduler_SMP_Node_change_state(
_Scheduler_SMP_Node_downcast( scheduled ),
SCHEDULER_SMP_NODE_SCHEDULED
);
_Assert( _ISR_Get_level() != 0 ); _Assert( _ISR_Get_level() != 0 );
if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) { if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
@@ -455,6 +448,24 @@ static inline void _Scheduler_SMP_Allocate_processor(
} }
} }
static inline void _Scheduler_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled,
Scheduler_Node *victim,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_owner( scheduled );
Thread_Control *victim_thread = _Scheduler_Node_get_owner( victim );
_Scheduler_SMP_Node_change_state(
_Scheduler_SMP_Node_downcast( scheduled ),
SCHEDULER_SMP_NODE_SCHEDULED
);
( *allocate_processor )( context, scheduled_thread, victim_thread );
}
static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled( static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
Scheduler_Context *context, Scheduler_Context *context,
Scheduler_Node *filter, Scheduler_Node *filter,
@@ -514,7 +525,14 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
_Scheduler_SMP_Node_downcast( lowest_scheduled ), _Scheduler_SMP_Node_downcast( lowest_scheduled ),
SCHEDULER_SMP_NODE_READY SCHEDULER_SMP_NODE_READY
); );
( *allocate_processor )( context, node, lowest_scheduled );
_Scheduler_SMP_Allocate_processor(
context,
node,
lowest_scheduled,
allocate_processor
);
( *insert_scheduled )( context, node ); ( *insert_scheduled )( context, node );
( *move_from_scheduled_to_ready )( context, lowest_scheduled ); ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
} else { } else {
@@ -565,7 +583,14 @@ static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
_Scheduler_SMP_Node_downcast( node ), _Scheduler_SMP_Node_downcast( node ),
SCHEDULER_SMP_NODE_READY SCHEDULER_SMP_NODE_READY
); );
( *allocate_processor) ( context, highest_ready, node );
_Scheduler_SMP_Allocate_processor(
context,
highest_ready,
node,
allocate_processor
);
( *insert_ready )( context, node ); ( *insert_ready )( context, node );
( *move_from_ready_to_scheduled )( context, highest_ready ); ( *move_from_ready_to_scheduled )( context, highest_ready );
} }
@@ -588,7 +613,13 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
{ {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim ); Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
( *allocate_processor )( context, highest_ready, victim ); _Scheduler_SMP_Allocate_processor(
context,
highest_ready,
victim,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready ); ( *move_from_ready_to_scheduled )( context, highest_ready );
} }

View File

@@ -104,28 +104,22 @@ void _Scheduler_priority_affinity_SMP_Node_initialize(
} }
/* /*
* This method is slightly different from _Scheduler_SMP_Allocate_processor() * This method is slightly different from
* in that it does what it is asked to do. _Scheduler_SMP_Allocate_processor() * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
* attempts to prevent migrations but does not take into account affinity * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
* but does not take into account affinity
*/ */
static inline void _Scheduler_SMP_Allocate_processor_exact( static inline void _Scheduler_SMP_Allocate_processor_exact(
Scheduler_Context *context, Scheduler_Context *context,
Scheduler_Node *scheduled, Thread_Control *scheduled_thread,
Scheduler_Node *victim Thread_Control *victim_thread
) )
{ {
Thread_Control *victim_thread = _Scheduler_Node_get_owner( victim );
Thread_Control *scheduled_thread = _Scheduler_Node_get_owner( scheduled );
Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread ); Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
Per_CPU_Control *cpu_self = _Per_CPU_Get(); Per_CPU_Control *cpu_self = _Per_CPU_Get();
(void) context; (void) context;
_Scheduler_SMP_Node_change_state(
_Scheduler_SMP_Node_downcast( scheduled ),
SCHEDULER_SMP_NODE_SCHEDULED
);
_Thread_Set_CPU( scheduled_thread, victim_cpu ); _Thread_Set_CPU( scheduled_thread, victim_cpu );
_Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread ); _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
} }
@@ -358,10 +352,11 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
SCHEDULER_SMP_NODE_READY SCHEDULER_SMP_NODE_READY
); );
_Scheduler_SMP_Allocate_processor_exact( _Scheduler_SMP_Allocate_processor(
context, context,
highest_ready, highest_ready,
lowest_scheduled lowest_scheduled,
_Scheduler_SMP_Allocate_processor_exact
); );
_Scheduler_priority_SMP_Move_from_ready_to_scheduled( _Scheduler_priority_SMP_Move_from_ready_to_scheduled(

View File

@@ -93,7 +93,7 @@ void _Scheduler_priority_SMP_Block(
_Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready, _Scheduler_priority_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled, _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor _Scheduler_SMP_Allocate_processor_lazy
); );
} }
@@ -113,7 +113,7 @@ static void _Scheduler_priority_SMP_Enqueue_ordered(
insert_scheduled, insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready, _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled, _Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor _Scheduler_SMP_Allocate_processor_lazy
); );
} }
@@ -161,7 +161,7 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
insert_ready, insert_ready,
insert_scheduled, insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled, _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor _Scheduler_SMP_Allocate_processor_lazy
); );
} }

View File

@@ -175,7 +175,7 @@ void _Scheduler_simple_SMP_Block(
_Scheduler_simple_SMP_Extract_from_ready, _Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready, _Scheduler_simple_SMP_Get_highest_ready,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled, _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor _Scheduler_SMP_Allocate_processor_lazy
); );
} }
@@ -195,7 +195,7 @@ static void _Scheduler_simple_SMP_Enqueue_ordered(
insert_scheduled, insert_scheduled,
_Scheduler_simple_SMP_Move_from_scheduled_to_ready, _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled, _Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor _Scheduler_SMP_Allocate_processor_lazy
); );
} }
@@ -243,7 +243,7 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
insert_ready, insert_ready,
insert_scheduled, insert_scheduled,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled, _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor _Scheduler_SMP_Allocate_processor_lazy
); );
} }