smp: Fix scheduler helping protocol

New test case for smptests/smpmrsp01.

Fix _Scheduler_Block_node() in case the node is in the
SCHEDULER_HELP_ACTIVE_RIVAL helping state.  For example a
rtems_task_suspend() on a task waiting for a MrsP semaphore.

Fix _Scheduler_Unblock_node() in case the node is in the
SCHEDULER_SMP_NODE_READY state.  For example a rtems_task_resume() on a
task owning or waiting for a MrsP semaphore.
This commit is contained in:
Luca Bonato
2014-11-21 11:01:34 +01:00
committed by Sebastian Huber
parent 6570876d0e
commit cceb19f4e5
4 changed files with 225 additions and 17 deletions

View File

@@ -1081,6 +1081,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
*/ */
RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node( RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
Scheduler_Context *context, Scheduler_Context *context,
Thread_Control *thread,
Scheduler_Node *node, Scheduler_Node *node,
bool is_scheduled, bool is_scheduled,
Scheduler_Get_idle_thread get_idle_thread Scheduler_Get_idle_thread get_idle_thread
@@ -1088,25 +1089,24 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
{ {
bool block; bool block;
Thread_Control *old_user = _Scheduler_Node_get_user( node ); Thread_Control *old_user = _Scheduler_Node_get_user( node );
Thread_Control *new_user; Thread_Control *new_user = NULL;
_Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED ); _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) { if ( is_scheduled ) {
new_user = _Scheduler_Node_get_owner( node ); if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
_Assert( new_user != old_user );
_Scheduler_Node_set_user( node, new_user );
} else if (
node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
&& is_scheduled
) {
new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
} else { } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
new_user = NULL; Thread_Control *owner = _Scheduler_Node_get_owner( node );
if ( thread == old_user && owner != old_user ) {
new_user = owner;
_Scheduler_Node_set_user( node, new_user );
}
}
} }
if ( new_user != NULL && is_scheduled ) { if ( new_user != NULL ) {
Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
_Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );

View File

@@ -793,13 +793,17 @@ static inline void _Scheduler_SMP_Block(
{ {
Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED; bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
bool block = _Scheduler_Block_node( bool block;
_Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
block = _Scheduler_Block_node(
context, context,
thread,
&node->Base, &node->Base,
is_scheduled, is_scheduled,
_Scheduler_SMP_Get_idle_thread _Scheduler_SMP_Get_idle_thread
); );
if ( block ) { if ( block ) {
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
@@ -838,9 +842,22 @@ static inline Thread_Control *_Scheduler_SMP_Unblock(
Thread_Control *needs_help; Thread_Control *needs_help;
if ( unblock ) { if ( unblock ) {
if ( node->state != SCHEDULER_SMP_NODE_READY ) {
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
needs_help = ( *enqueue_fifo )( context, &node->Base, thread ); needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
} else {
_Assert( node->state == SCHEDULER_SMP_NODE_READY );
_Assert( node->Base.idle == NULL );
if ( node->Base.accepts_help == thread ) {
_Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER );
needs_help = thread;
} else {
_Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
needs_help = NULL;
}
}
} else { } else {
needs_help = NULL; needs_help = NULL;
} }

View File

@@ -54,6 +54,8 @@ typedef struct {
typedef struct { typedef struct {
rtems_id main_task_id; rtems_id main_task_id;
rtems_id migration_task_id; rtems_id migration_task_id;
rtems_id high_task_id;
rtems_id timer_id;
rtems_id counting_sem_id; rtems_id counting_sem_id;
rtems_id mrsp_ids[MRSP_COUNT]; rtems_id mrsp_ids[MRSP_COUNT];
rtems_id scheduler_ids[CPU_COUNT]; rtems_id scheduler_ids[CPU_COUNT];
@@ -66,6 +68,7 @@ typedef struct {
SMP_lock_Control switch_lock; SMP_lock_Control switch_lock;
size_t switch_index; size_t switch_index;
switch_event switch_events[32]; switch_event switch_events[32];
volatile bool run;
} test_context; } test_context;
static test_context test_instance = { static test_context test_instance = {
@@ -728,6 +731,192 @@ static void run_task(rtems_task_argument arg)
} }
} }
static void ready_unlock_worker(rtems_task_argument arg)
{
test_context *ctx = &test_instance;
rtems_status_code sc;
SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
assert_prio(RTEMS_SELF, 4);
/* Obtain (F) */
barrier(ctx, &barrier_state);
sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
assert_prio(RTEMS_SELF, 4);
/* Done (G) */
barrier(ctx, &barrier_state);
rtems_task_suspend(RTEMS_SELF);
rtems_test_assert(0);
}
static void unblock_ready_timer(rtems_id timer_id, void *arg)
{
test_context *ctx = arg;
rtems_status_code sc;
sc = rtems_task_start(
ctx->high_task_id,
run_task,
(rtems_task_argument) &ctx->run
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_suspend(ctx->high_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_resume(ctx->high_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
/*
* At this point the scheduler node of the main thread is in the
* SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
* performed.
*/
sc = rtems_event_transient_send(ctx->main_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_suspend(ctx->high_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void unblock_ready_owner(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
assert_prio(RTEMS_SELF, 3);
sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(!ctx->run);
}
static void unblock_ready_rival(test_context *ctx)
{
rtems_status_code sc;
SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
/* Worker obtain (F) */
barrier(ctx, &barrier_state);
sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_suspend(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_resume(ctx->high_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
while (!ctx->run) {
/* Do noting */
}
sc = rtems_task_resume(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_suspend(ctx->high_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
assert_prio(RTEMS_SELF, 4);
/* Worker done (F) */
barrier(ctx, &barrier_state);
}
static void test_mrsp_unblock_ready(test_context *ctx)
{
rtems_status_code sc;
puts("test MrsP unblock ready");
ctx->run = false;
change_prio(RTEMS_SELF, 4);
sc = rtems_semaphore_create(
rtems_build_name(' ', ' ', ' ', 'A'),
1,
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
| RTEMS_BINARY_SEMAPHORE,
3,
&ctx->mrsp_ids[0]
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
assert_prio(RTEMS_SELF, 4);
sc = rtems_task_create(
rtems_build_name('H', 'I', 'G', 'H'),
2,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&ctx->high_task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_create(
rtems_build_name('W', 'O', 'R', 'K'),
4,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&ctx->worker_ids[0]
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_timer_create(
rtems_build_name('T', 'I', 'M', 'R'),
&ctx->timer_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
unblock_ready_owner(ctx);
unblock_ready_rival(ctx);
sc = rtems_timer_delete(ctx->timer_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(ctx->high_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
change_prio(RTEMS_SELF, 2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx) static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
{ {
rtems_status_code sc; rtems_status_code sc;
@@ -1232,6 +1421,7 @@ static void Init(rtems_task_argument arg)
test_mrsp_unlock_order_error(); test_mrsp_unlock_order_error();
test_mrsp_deadlock_error(ctx); test_mrsp_deadlock_error(ctx);
test_mrsp_multiple_obtain(); test_mrsp_multiple_obtain();
test_mrsp_unblock_ready(ctx);
test_mrsp_obtain_and_sleep_and_release(ctx); test_mrsp_obtain_and_sleep_and_release(ctx);
test_mrsp_obtain_and_release_with_help(ctx); test_mrsp_obtain_and_release_with_help(ctx);
test_mrsp_obtain_and_release(ctx); test_mrsp_obtain_and_release(ctx);

View File

@@ -5,6 +5,7 @@ test MrsP nested obtain error
test MrsP unlock order error test MrsP unlock order error
test MrsP deadlock error test MrsP deadlock error
test MrsP multiple obtain test MrsP multiple obtain
test MrsP unblock ready
test MrsP obtain and sleep and release test MrsP obtain and sleep and release
[0] MAIN -> RUN (prio 2, node RUN) [0] MAIN -> RUN (prio 2, node RUN)
[0] RUN -> MAIN (prio 1, node MAIN) [0] RUN -> MAIN (prio 1, node MAIN)