score: Scheduler node awareness for thread queues

Maintain the priority of a thread for each scheduler instance via the
thread queue enqueue, extract, priority actions and surrender
operations.  This replaces the primitive priority boosting.

Update #2556.
This commit is contained in:
Sebastian Huber
2016-09-09 11:00:06 +02:00
parent 8123cae864
commit f6142c19f1
8 changed files with 1225 additions and 357 deletions

View File

@@ -209,6 +209,11 @@ typedef struct Scheduler_Node {
extern const size_t _Scheduler_Node_size;
#endif
#if defined(RTEMS_SMP)
#define SCHEDULER_NODE_OF_THREAD_WAIT_NODE( node ) \
RTEMS_CONTAINER_OF( node, Scheduler_Node, Thread.Wait_node )
#endif
#ifdef __cplusplus
}
#endif /* __cplusplus */

View File

@@ -997,6 +997,20 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
#endif
}
RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
const Thread_Control *the_thread
)
{
#if defined(RTEMS_SMP)
_Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
_Chain_First( &the_thread->Scheduler.Wait_nodes )
);
#else
return the_thread->Scheduler.nodes;
#endif
}
RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
const Thread_Control *the_thread,
size_t scheduler_index
@@ -1308,21 +1322,22 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
}
/**
* @brief Claims the thread wait queue and operations.
* @brief Claims the thread wait queue.
*
* The caller must not be the owner of the default thread wait lock. The
* caller must be the owner of the corresponding thread queue lock.
* caller must be the owner of the corresponding thread queue lock. The
* registration of the corresponding thread queue operations is deferred and
* done after the deadlock detection. This is crucial to support timeouts on
* SMP configurations.
*
* @param[in] the_thread The thread.
* @param[in] queue The new thread queue.
* @param[in] operations The new thread operations.
*
* @see _Thread_Wait_restore_default().
* @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
*/
RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
Thread_Control *the_thread,
Thread_queue_Queue *queue,
const Thread_queue_Operations *operations
Thread_queue_Queue *queue
)
{
ISR_lock_Context lock_context;
@@ -1338,11 +1353,25 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
#endif
the_thread->Wait.queue = queue;
the_thread->Wait.operations = operations;
_Thread_Wait_release_default_critical( the_thread, &lock_context );
}
/**
* @brief Finalizes the thread wait queue claim via registration of the
* corresponding thread queue operations.
*
* @param[in] the_thread The thread.
* @param[in] operations The corresponding thread queue operations.
*/
RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
Thread_Control *the_thread,
const Thread_queue_Operations *operations
)
{
the_thread->Wait.operations = operations;
}
/**
* @brief Removes a thread wait lock request.
*

View File

@@ -216,6 +216,12 @@ typedef struct {
* @brief The start of a thread queue path.
*/
Thread_queue_Link Start;
/**
* @brief In case of a deadlock, a link for the first thread on the path
* that tries to enqueue on a thread queue.
*/
Thread_queue_Link Deadlock;
} Path;
#endif
@@ -344,11 +350,6 @@ typedef struct _Thread_queue_Heads {
Chain_Node Free_node;
#if defined(RTEMS_SMP)
/**
* @brief Boost priority.
*/
Priority_Node Boost_priority;
/**
* @brief One priority queue per scheduler instance.
*/

View File

@@ -280,12 +280,10 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
#if defined(RTEMS_SMP)
size_t i;
_Priority_Node_initialize( &heads->Boost_priority, 0 );
_Priority_Node_set_inactive( &heads->Boost_priority );
for ( i = 0; i < _Scheduler_Count; ++i ) {
_Chain_Initialize_node( &heads->Priority[ i ].Node );
_Priority_Initialize_empty( &heads->Priority[ i ].Queue );
heads->Priority[ i ].Queue.scheduler = &_Scheduler_Table[ i ];
}
#endif
@@ -955,6 +953,7 @@ void _Thread_queue_Unblock_proxy(
);
#endif
#if defined(RTEMS_SMP)
bool _Thread_queue_Path_acquire_critical(
Thread_queue_Queue *queue,
Thread_Control *the_thread,
@@ -964,6 +963,7 @@ bool _Thread_queue_Path_acquire_critical(
void _Thread_queue_Path_release_critical(
Thread_queue_Context *queue_context
);
#endif
/**
* @brief Helper structure to ensure that all objects containing a thread queue

View File

@@ -44,6 +44,16 @@ static void _Thread_Priority_action_add(
void *arg
)
{
Scheduler_Node *scheduler_node;
Thread_Control *the_thread;
scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY( priority_aggregation );
the_thread = arg;
_Chain_Append_unprotected(
&the_thread->Scheduler.Wait_nodes,
&scheduler_node->Thread.Wait_node
);
_Thread_Set_scheduler_node_priority( priority_aggregation, false );
_Priority_Set_action_type( priority_aggregation, PRIORITY_ACTION_ADD );
_Priority_Actions_add( priority_actions, priority_aggregation );
@@ -55,6 +65,11 @@ static void _Thread_Priority_action_remove(
void *arg
)
{
Scheduler_Node *scheduler_node;
scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY( priority_aggregation );
_Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
_Thread_Set_scheduler_node_priority( priority_aggregation, true );
_Priority_Set_action_type( priority_aggregation, PRIORITY_ACTION_REMOVE );
_Priority_Actions_add( priority_actions, priority_aggregation );
@@ -107,7 +122,7 @@ static void _Thread_Priority_do_perform_actions(
&queue_context->Priority.Actions,
_Thread_Priority_action_add,
_Thread_Priority_action_change,
NULL
the_thread
);
#else
_Priority_Non_empty_insert(
@@ -157,6 +172,7 @@ static void _Thread_Priority_do_perform_actions(
if ( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
_Thread_queue_Context_add_priority_update( queue_context, the_thread );
( *operations->priority_actions )(
queue,
&queue_context->Priority.Actions
@@ -169,29 +185,27 @@ void _Thread_Priority_perform_actions(
Thread_queue_Context *queue_context
)
{
#if defined(RTEMS_SMP)
Thread_queue_Link *link;
#endif
Thread_Control *the_thread;
size_t update_count;
_Assert( start_of_path != NULL );
#if defined(RTEMS_SMP)
link = &queue_context->Path.Start;
#endif
/*
* This function is tricky on SMP configurations. Please note that we do not
* use the thread queue path available via the thread queue context. Instead
* we directly use the thread wait information to traverse the thread queue
* path. Thus, we do not necessarily acquire all thread queue locks on our
* own. In case of a deadlock, we use locks acquired by other processors
* along the path.
*/
the_thread = start_of_path;
update_count = _Thread_queue_Context_save_priority_updates( queue_context );
while ( true ) {
Thread_queue_Queue *queue;
#if defined(RTEMS_SMP)
_Assert( link->owner == the_thread );
queue = link->Lock_context.Wait.queue;
#else
queue = the_thread->Wait.queue;
#endif
_Thread_Priority_do_perform_actions(
the_thread,
@@ -209,10 +223,6 @@ void _Thread_Priority_perform_actions(
the_thread = queue->owner;
_Assert( the_thread != NULL );
#if defined(RTEMS_SMP)
link = THREAD_QUEUE_LINK_OF_PATH_NODE( _Chain_Next( &link->Path_node ) );
#endif
/*
* In case the priority action list is non-empty, then the current thread
* is enqueued on a thread queue. There is no need to notify the scheduler
@@ -255,9 +265,13 @@ static void _Thread_Priority_apply(
);
if ( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
#if defined(RTEMS_SMP)
_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context );
#endif
_Thread_Priority_perform_actions( queue->owner, queue_context );
#if defined(RTEMS_SMP)
_Thread_queue_Path_release_critical( queue_context );
#endif
}
}

View File

@@ -114,6 +114,9 @@ static bool _Thread_queue_Link_add(
Thread_queue_Queue *recursive_target;
ISR_lock_Context lock_context;
link->source = source;
link->target = target;
links = &_Thread_queue_Links;
recursive_target = target;
@@ -136,8 +139,6 @@ static bool _Thread_queue_Link_add(
}
}
link->source = source;
link->target = target;
_RBTree_Insert_inline(
&links->Links,
&link->Registry_node,
@@ -162,6 +163,9 @@ static void _Thread_queue_Link_remove( Thread_queue_Link *link )
}
#endif
#if !defined(RTEMS_SMP)
static
#endif
void _Thread_queue_Path_release_critical(
Thread_queue_Context *queue_context
)
@@ -173,51 +177,80 @@ void _Thread_queue_Path_release_critical(
head = _Chain_Head( &queue_context->Path.Links );
node = _Chain_Last( &queue_context->Path.Links );
if ( head != node ) {
while ( head != node ) {
Thread_queue_Link *link;
/*
* The terminal link may have an owner which does not wait on a thread
* queue.
*/
link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
if ( link->Lock_context.Wait.queue == NULL ) {
_Thread_Wait_release_default_critical(
link->owner,
&link->Lock_context.Lock_context
);
node = _Chain_Previous( node );
#if defined(RTEMS_DEBUG)
_Chain_Set_off_chain( &link->Path_node );
#endif
}
while ( head != node ) {
/* The other links have an owner which waits on a thread queue */
link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
_Assert( link->Lock_context.Wait.queue != NULL );
if ( link->Lock_context.Wait.queue != NULL ) {
_Thread_queue_Link_remove( link );
_Thread_Wait_release_queue_critical(
link->Lock_context.Wait.queue,
&link->Lock_context
);
_Thread_Wait_remove_request( link->owner, &link->Lock_context );
} else {
_Thread_Wait_release_default_critical(
link->owner,
&link->Lock_context.Lock_context
);
}
node = _Chain_Previous( node );
#if defined(RTEMS_DEBUG)
_Chain_Set_off_chain( &link->Path_node );
#endif
}
}
#else
(void) queue_context;
#endif
}
#if defined(RTEMS_SMP)
static void _Thread_queue_Path_append_deadlock_thread(
Thread_Control *the_thread,
Thread_queue_Context *queue_context
)
{
Thread_Control *deadlock;
/*
* In case of a deadlock, we must obtain the thread wait default lock for the
* first thread on the path that tries to enqueue on a thread queue. This
* thread can be identified by the thread wait operations. This lock acquire
* is necessary for the timeout and explicit thread priority changes, see
* _Thread_Priority_perform_actions().
*/
deadlock = NULL;
while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) {
the_thread = the_thread->Wait.queue->owner;
deadlock = the_thread;
}
if ( deadlock != NULL ) {
Thread_queue_Link *link;
link = &queue_context->Path.Deadlock;
_Chain_Initialize_node( &link->Path_node );
_Chain_Append_unprotected(
&queue_context->Path.Links,
&link->Path_node
);
link->owner = deadlock;
link->Lock_context.Wait.queue = NULL;
_Thread_Wait_acquire_default_critical(
deadlock,
&link->Lock_context.Lock_context
);
}
}
#endif
#if !defined(RTEMS_SMP)
static
#endif
bool _Thread_queue_Path_acquire_critical(
Thread_queue_Queue *queue,
Thread_Control *the_thread,
@@ -249,12 +282,12 @@ bool _Thread_queue_Path_acquire_critical(
return false;
}
_RBTree_Initialize_node( &queue_context->Path.Start.Registry_node );
_Chain_Initialize_node( &queue_context->Path.Start.Path_node );
_Chain_Initialize_node(
&queue_context->Path.Start.Lock_context.Wait.Gate.Node
);
link = &queue_context->Path.Start;
_RBTree_Initialize_node( &link->Registry_node );
_Chain_Initialize_node( &link->Path_node );
do {
_Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
@@ -293,6 +326,7 @@ bool _Thread_queue_Path_acquire_critical(
}
} else {
link->Lock_context.Wait.queue = NULL;
_Thread_queue_Path_append_deadlock_thread( owner, queue_context );
return false;
}
} else {
@@ -353,7 +387,7 @@ void _Thread_queue_Enqueue_critical(
}
#endif
_Thread_Wait_claim( the_thread, queue, operations );
_Thread_Wait_claim( the_thread, queue );
if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
_Thread_queue_Path_release_critical( queue_context );
@@ -365,6 +399,7 @@ void _Thread_queue_Enqueue_critical(
}
_Thread_queue_Context_clear_priority_updates( queue_context );
_Thread_Wait_claim_finalize( the_thread, operations );
( *operations->enqueue )( queue, the_thread, queue_context );
_Thread_queue_Path_release_critical( queue_context );

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 embedded brains GmbH. All rights reserved.
* Copyright (c) 2015, 2016 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -28,11 +28,21 @@ const char rtems_test_name[] = "SMPMUTEX 1";
#define TASK_COUNT 9
#define PRIO_NONE 0
/* Value choosen for Qemu, 2 would be sufficient for real targets */
#define TIMEOUT_IN_TICKS 10
typedef enum {
REQ_WAKE_UP_MASTER = RTEMS_EVENT_0,
REQ_WAKE_UP_HELPER = RTEMS_EVENT_1,
REQ_MTX_OBTAIN = RTEMS_EVENT_2,
REQ_MTX_RELEASE = RTEMS_EVENT_3
REQ_MTX_OBTAIN_TIMEOUT = RTEMS_EVENT_3,
REQ_MTX_RELEASE = RTEMS_EVENT_4,
REQ_MTX_2_OBTAIN = RTEMS_EVENT_5,
REQ_MTX_2_RELEASE = RTEMS_EVENT_6,
REQ_SEM_OBTAIN_RELEASE = RTEMS_EVENT_7,
REQ_SEM_RELEASE = RTEMS_EVENT_8
} request_id;
typedef enum {
@@ -50,6 +60,8 @@ typedef enum {
typedef struct {
rtems_id mtx;
rtems_id mtx_2;
rtems_id sem;
rtems_id tasks[TASK_COUNT];
int generation[TASK_COUNT];
int expected_generation[TASK_COUNT];
@@ -156,6 +168,14 @@ static void obtain(test_context *ctx)
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void obtain_timeout(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, TIMEOUT_IN_TICKS);
rtems_test_assert(sc == RTEMS_TIMEOUT);
}
static void release(test_context *ctx)
{
rtems_status_code sc;
@@ -164,6 +184,46 @@ static void release(test_context *ctx)
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void obtain_2(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_semaphore_obtain(ctx->mtx_2, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void release_2(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_semaphore_release(ctx->mtx_2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void sem_obtain(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_semaphore_obtain(ctx->sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void sem_release(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_semaphore_release(ctx->sem);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void wait(void)
{
rtems_status_code sc;
sc = rtems_task_wake_after(TIMEOUT_IN_TICKS + 1);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void check_generations(test_context *ctx, task_id a, task_id b)
{
size_t i;
@@ -199,15 +259,51 @@ static void assert_prio(
rtems_test_assert(expected == actual);
}
static void assert_prio_by_scheduler(
test_context *ctx,
task_id id,
rtems_name scheduler,
rtems_task_priority expected
)
{
rtems_task_priority actual;
rtems_status_code sc;
rtems_id scheduler_id;
sc = rtems_scheduler_ident(scheduler, &scheduler_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
actual = PRIO_NONE;
sc = rtems_task_get_priority(
ctx->tasks[id],
scheduler_id,
&actual
);
if (expected == PRIO_NONE) {
rtems_test_assert(sc == RTEMS_NOT_DEFINED);
} else {
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
rtems_test_assert(actual == expected);
}
static void helper(rtems_task_argument arg)
{
test_context *ctx = &test_instance;
while (true) {
rtems_event_set events = wait_for_events();
rtems_test_assert(events == REQ_WAKE_UP_HELPER);
if ((events & REQ_WAKE_UP_HELPER) != 0) {
send_event(ctx, M, REQ_WAKE_UP_MASTER);
}
if ((events & REQ_SEM_RELEASE) != 0) {
sem_release(ctx);
}
}
}
static void worker(rtems_task_argument arg)
@@ -223,10 +319,31 @@ static void worker(rtems_task_argument arg)
++ctx->generation[id];
}
if ((events & REQ_MTX_OBTAIN_TIMEOUT) != 0) {
obtain_timeout(ctx);
++ctx->generation[id];
}
if ((events & REQ_MTX_RELEASE) != 0) {
release(ctx);
++ctx->generation[id];
}
if ((events & REQ_MTX_2_OBTAIN) != 0) {
obtain_2(ctx);
++ctx->generation[id];
}
if ((events & REQ_MTX_2_RELEASE) != 0) {
release_2(ctx);
++ctx->generation[id];
}
if ((events & REQ_SEM_OBTAIN_RELEASE) != 0) {
sem_obtain(ctx);
++ctx->generation[id];
sem_release(ctx);
}
}
}
@@ -245,13 +362,31 @@ static void test_init(test_context *ctx)
start_task(ctx, H_B, helper, 6, SCHED_B);
sc = rtems_semaphore_create(
rtems_build_name(' ', 'M', 'T', 'X'),
rtems_build_name('M', 'T', 'X', '1'),
1,
RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
0,
&ctx->mtx
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_create(
rtems_build_name('M', 'T', 'X', '2'),
1,
RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
0,
&ctx->mtx_2
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_create(
rtems_build_name(' ', 'S', 'E', 'M'),
0,
RTEMS_COUNTING_SEMAPHORE | RTEMS_PRIORITY,
0,
&ctx->sem
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void test_simple_inheritance(test_context *ctx)
@@ -287,67 +422,254 @@ static void test_dequeue_order_one_scheduler_instance(test_context *ctx)
check_generations(ctx, A_2_1, NONE);
}
static void test_simple_boosting(test_context *ctx)
static void test_mixed_queue_two_scheduler_instances(test_context *ctx)
{
obtain(ctx);
request(ctx, B_5_0, REQ_MTX_OBTAIN);
request(ctx, B_4, REQ_MTX_OBTAIN);
request(ctx, B_5_1, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
request(ctx, B_4, REQ_MTX_OBTAIN_TIMEOUT);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
check_generations(ctx, NONE, NONE);
assert_prio(ctx, M, 0);
release(ctx);
sync_with_helper(ctx);
assert_prio(ctx, M, 3);
wait();
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
check_generations(ctx, B_4, NONE);
request(ctx, B_4, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
check_generations(ctx, NONE, NONE);
request(ctx, B_5_0, REQ_SEM_OBTAIN_RELEASE);
send_event(ctx, H_A, REQ_SEM_RELEASE);
check_generations(ctx, NONE, NONE);
/*
* We are in scheduler instance A. Task B_5_0 of scheduler instance B issued
* the counting semaphore obtain before us. However, we inherited the
* priority of B_4, so we get the semaphore before B_5_0 (priority order
* within scheduler instance B).
*/
sem_obtain(ctx);
check_generations(ctx, NONE, NONE);
release(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
sync_with_helper(ctx);
check_generations(ctx, B_4, NONE);
request(ctx, B_4, REQ_MTX_RELEASE);
check_generations(ctx, B_4, NONE);
sem_release(ctx);
sync_with_helper(ctx);
check_generations(ctx, B_5_0, NONE);
sem_obtain(ctx);
}
static void test_mixed_queue_two_scheduler_instances_sem_only(test_context *ctx)
{
request(ctx, B_5_0, REQ_SEM_OBTAIN_RELEASE);
send_event(ctx, H_A, REQ_SEM_RELEASE);
check_generations(ctx, NONE, NONE);
/*
* We are in scheduler instance A. Task B_5_0 of scheduler instance B issued
* the counting semaphore obtain before us. No priority inheritance is
* involved, so task B_5_0 gets the counting semaphore first.
*/
sem_obtain(ctx);
check_generations(ctx, B_5_0, NONE);
sem_release(ctx);
}
static void test_simple_inheritance_two_scheduler_instances(test_context *ctx)
{
obtain(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
request(ctx, B_5_0, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
request(ctx, B_4, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
request(ctx, B_5_1, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
check_generations(ctx, NONE, NONE);
release(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
sync_with_helper(ctx);
check_generations(ctx, B_4, NONE);
request(ctx, B_4, REQ_MTX_RELEASE);
check_generations(ctx, B_4, B_5_0);
request(ctx, B_5_0, REQ_MTX_RELEASE);
check_generations(ctx, B_5_0, B_5_1);
request(ctx, B_5_1, REQ_MTX_RELEASE);
check_generations(ctx, B_5_1, NONE);
}
static void test_nested_inheritance_two_scheduler_instances(test_context *ctx)
{
obtain_2(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
request(ctx, B_5_0, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
check_generations(ctx, B_5_0, NONE);
request(ctx, B_5_0, REQ_MTX_2_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
request(ctx, B_4, REQ_MTX_OBTAIN_TIMEOUT);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 4);
wait();
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
check_generations(ctx, B_4, NONE);
request(ctx, B_4, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 4);
request(ctx, B_5_1, REQ_MTX_2_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 4);
check_generations(ctx, NONE, NONE);
release_2(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 4);
sync_with_helper(ctx);
check_generations(ctx, B_5_0, NONE);
request(ctx, B_5_0, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
check_generations(ctx, B_4, B_5_0);
request(ctx, B_4, REQ_MTX_RELEASE);
check_generations(ctx, B_4, NONE);
request(ctx, B_5_0, REQ_MTX_2_RELEASE);
check_generations(ctx, B_5_0, B_5_1);
request(ctx, B_5_1, REQ_MTX_2_RELEASE);
check_generations(ctx, B_5_1, NONE);
}
static void test_dequeue_order_two_scheduler_instances(test_context *ctx)
{
obtain(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
request(ctx, A_2_0, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 2);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
check_generations(ctx, NONE, NONE);
assert_prio(ctx, M, 2);
request(ctx, B_5_0, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 2);
assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
check_generations(ctx, NONE, NONE);
assert_prio(ctx, M, 0);
request(ctx, B_5_1, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 2);
assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
request(ctx, B_4, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 2);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
request(ctx, A_2_1, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 2);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
request(ctx, A_1, REQ_MTX_OBTAIN);
assert_prio_by_scheduler(ctx, M, SCHED_A, 1);
assert_prio_by_scheduler(ctx, M, SCHED_B, 4);
check_generations(ctx, NONE, NONE);
release(ctx);
sync_with_helper(ctx);
assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
assert_prio_by_scheduler(ctx, A_1, SCHED_A, 1);
assert_prio_by_scheduler(ctx, A_1, SCHED_B, 4);
check_generations(ctx, A_1, NONE);
assert_prio(ctx, M, 3);
assert_prio(ctx, A_1, 0);
request(ctx, A_1, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, A_1, SCHED_A, 1);
assert_prio_by_scheduler(ctx, A_1, SCHED_B, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_4, SCHED_A, 2);
assert_prio_by_scheduler(ctx, B_4, SCHED_B, 4);
check_generations(ctx, A_1, B_4);
assert_prio(ctx, A_1, 1);
assert_prio(ctx, B_4, 0);
request(ctx, B_4, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, B_4, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_4, SCHED_B, 4);
assert_prio_by_scheduler(ctx, A_2_0, SCHED_A, 2);
assert_prio_by_scheduler(ctx, A_2_0, SCHED_B, 5);
check_generations(ctx, B_4, A_2_0);
assert_prio(ctx, B_4, 4);
assert_prio(ctx, A_2_0, 0);
request(ctx, A_2_0, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, A_2_0, SCHED_A, 2);
assert_prio_by_scheduler(ctx, A_2_0, SCHED_B, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, 2);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
check_generations(ctx, A_2_0, B_5_0);
assert_prio(ctx, A_2_0, 2);
assert_prio(ctx, B_5_0, 0);
request(ctx, B_5_0, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
assert_prio_by_scheduler(ctx, A_2_1, SCHED_A, 2);
assert_prio_by_scheduler(ctx, A_2_1, SCHED_B, 5);
check_generations(ctx, B_5_0, A_2_1);
assert_prio(ctx, B_5_0, 5);
assert_prio(ctx, A_2_1, 2);
request(ctx, A_2_1, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, A_2_1, SCHED_A, 2);
assert_prio_by_scheduler(ctx, A_2_1, SCHED_B, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_1, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_1, SCHED_B, 5);
check_generations(ctx, A_2_1, B_5_1);
assert_prio(ctx, B_5_1, 5);
request(ctx, B_5_1, REQ_MTX_RELEASE);
assert_prio_by_scheduler(ctx, B_5_1, SCHED_A, PRIO_NONE);
assert_prio_by_scheduler(ctx, B_5_1, SCHED_B, 5);
check_generations(ctx, B_5_1, NONE);
assert_prio(ctx, B_5_1, 5);
}
static void test(void)
@@ -358,7 +680,10 @@ static void test(void)
test_task_get_priority_not_defined(ctx);
test_simple_inheritance(ctx);
test_dequeue_order_one_scheduler_instance(ctx);
test_simple_boosting(ctx);
test_mixed_queue_two_scheduler_instances(ctx);
test_mixed_queue_two_scheduler_instances_sem_only(ctx);
test_simple_inheritance_two_scheduler_instances(ctx);
test_nested_inheritance_two_scheduler_instances(ctx);
test_dequeue_order_two_scheduler_instances(ctx);
}
@@ -399,7 +724,7 @@ RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
#define CONFIGURE_MAXIMUM_TASKS TASK_COUNT
#define CONFIGURE_MAXIMUM_SEMAPHORES 1
#define CONFIGURE_MAXIMUM_SEMAPHORES 3
#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION