validation: Test Semaphore Manager

The test source code is generated from specification items
by the "./spec2modules.py" script contained in the
git://git.rtems.org/rtems-central.git Git repository.

Please read the "How-To" section in the "Software Requirements Engineering"
chapter of the RTEMS Software Engineering manual to get more information about
the process.

Update #3716.
This commit is contained in:
Sebastian Huber
2021-12-09 16:16:34 +01:00
parent 1c87bb585f
commit 43adf2ed59
16 changed files with 8806 additions and 0 deletions

View File

@@ -15,6 +15,7 @@ source:
- testsuites/validation/tc-event-performance.c - testsuites/validation/tc-event-performance.c
- testsuites/validation/tc-message-performance.c - testsuites/validation/tc-message-performance.c
- testsuites/validation/tc-part-performance.c - testsuites/validation/tc-part-performance.c
- testsuites/validation/tc-sem-performance.c
- testsuites/validation/ts-performance-no-clock-0.c - testsuites/validation/ts-performance-no-clock-0.c
stlib: [] stlib: []
target: testsuites/validation/ts-performance-no-clock-0.exe target: testsuites/validation/ts-performance-no-clock-0.exe

View File

@@ -63,6 +63,13 @@ source:
- testsuites/validation/tc-scheduler-ident.c - testsuites/validation/tc-scheduler-ident.c
- testsuites/validation/tc-scheduler-remove-processor.c - testsuites/validation/tc-scheduler-remove-processor.c
- testsuites/validation/tc-score-fatal.c - testsuites/validation/tc-score-fatal.c
- testsuites/validation/tc-sem-create.c
- testsuites/validation/tc-sem-flush.c
- testsuites/validation/tc-sem-ident.c
- testsuites/validation/tc-sem-obtain.c
- testsuites/validation/tc-sem-release.c
- testsuites/validation/tc-sem-set-priority.c
- testsuites/validation/tc-sem-timeout.c
- testsuites/validation/tr-event-constant.c - testsuites/validation/tr-event-constant.c
- testsuites/validation/tr-mtx-seize-try.c - testsuites/validation/tr-mtx-seize-try.c
- testsuites/validation/tr-mtx-seize-wait.c - testsuites/validation/tr-mtx-seize-wait.c

View File

@@ -17,6 +17,8 @@ source:
- testsuites/validation/tc-ratemon-get-status.c - testsuites/validation/tc-ratemon-get-status.c
- testsuites/validation/tc-ratemon-period.c - testsuites/validation/tc-ratemon-period.c
- testsuites/validation/tc-ratemon-timeout.c - testsuites/validation/tc-ratemon-timeout.c
- testsuites/validation/tc-sem-delete.c
- testsuites/validation/tc-sem-uni.c
- testsuites/validation/tc-score-fatal.c - testsuites/validation/tc-score-fatal.c
- testsuites/validation/ts-validation-one-cpu-0.c - testsuites/validation/ts-validation-one-cpu-0.c
stlib: [] stlib: []

View File

@@ -18,6 +18,8 @@ source:
- testsuites/validation/tc-intr-smp-only.c - testsuites/validation/tc-intr-smp-only.c
- testsuites/validation/tc-scheduler-smp-only.c - testsuites/validation/tc-scheduler-smp-only.c
- testsuites/validation/tc-score-tq-smp.c - testsuites/validation/tc-score-tq-smp.c
- testsuites/validation/tc-sem-smp.c
- testsuites/validation/tc-sem-mrsp-obtain.c
- testsuites/validation/ts-validation-smp-only-0.c - testsuites/validation/ts-validation-smp-only-0.c
stlib: [] stlib: []
target: testsuites/validation/ts-validation-smp-only-0.exe target: testsuites/validation/ts-validation-smp-only-0.exe

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,759 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemReqDelete
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <string.h>
#include "tx-support.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemReqDelete spec:/rtems/sem/req/delete
*
* @ingroup RTEMSTestSuiteTestsuitesValidationOneCpu0
*
* @{
*/
typedef enum {
RtemsSemReqDelete_Pre_Id_NoObj,
RtemsSemReqDelete_Pre_Id_Counting,
RtemsSemReqDelete_Pre_Id_Simple,
RtemsSemReqDelete_Pre_Id_Binary,
RtemsSemReqDelete_Pre_Id_PrioCeiling,
RtemsSemReqDelete_Pre_Id_PrioInherit,
RtemsSemReqDelete_Pre_Id_MrsP,
RtemsSemReqDelete_Pre_Id_NA
} RtemsSemReqDelete_Pre_Id;
typedef enum {
RtemsSemReqDelete_Pre_Discipline_FIFO,
RtemsSemReqDelete_Pre_Discipline_Priority,
RtemsSemReqDelete_Pre_Discipline_NA
} RtemsSemReqDelete_Pre_Discipline;
typedef enum {
RtemsSemReqDelete_Pre_State_GtZeroOrNoOwner,
RtemsSemReqDelete_Pre_State_Zero,
RtemsSemReqDelete_Pre_State_Blocked,
RtemsSemReqDelete_Pre_State_NA
} RtemsSemReqDelete_Pre_State;
typedef enum {
RtemsSemReqDelete_Post_Status_Ok,
RtemsSemReqDelete_Post_Status_InvId,
RtemsSemReqDelete_Post_Status_InUse,
RtemsSemReqDelete_Post_Status_NA
} RtemsSemReqDelete_Post_Status;
typedef enum {
RtemsSemReqDelete_Post_Name_Valid,
RtemsSemReqDelete_Post_Name_Invalid,
RtemsSemReqDelete_Post_Name_NA
} RtemsSemReqDelete_Post_Name;
typedef enum {
RtemsSemReqDelete_Post_Flush_FIFO,
RtemsSemReqDelete_Post_Flush_Priority,
RtemsSemReqDelete_Post_Flush_No,
RtemsSemReqDelete_Post_Flush_NA
} RtemsSemReqDelete_Post_Flush;
typedef struct {
uint16_t Skip : 1;
uint16_t Pre_Id_NA : 1;
uint16_t Pre_Discipline_NA : 1;
uint16_t Pre_State_NA : 1;
uint16_t Post_Status : 2;
uint16_t Post_Name : 2;
uint16_t Post_Flush : 2;
} RtemsSemReqDelete_Entry;
/**
* @brief Test context for spec:/rtems/sem/req/delete test case.
*/
typedef struct {
/**
* @brief This member contains the worker task identifiers.
*/
rtems_id worker_id[ 2 ];
/**
* @brief This member contains the worker activity counter.
*/
uint32_t counter;
/**
* @brief This member contains the worker activity counter of a specific
* worker.
*/
uint32_t worker_counter[ 2 ];
/**
* @brief This member specifies the expected rtems_semaphore_obtain() status.
*/
rtems_status_code obtain_status;
/**
* @brief This member specifies if the initial count of the semaphore.
*/
uint32_t count;
/**
* @brief This member specifies if the attribute set of the semaphore.
*/
rtems_attribute attribute_set;
/**
* @brief This member contains the semaphore identifier.
*/
rtems_id semaphore_id;
/**
* @brief If this member is true, then the ``name`` parameter shall be valid,
* otherwise it should be NULL.
*/
bool valid_id;
/**
* @brief If this member is true, then tasks shall be blocked on the
* semaphore, otherwise no tasks shall be blocked on the semaphore.
*/
bool blocked;
/**
* @brief This member specifies the ``name`` parameter for the
* rtems_semaphore_delete() call.
*/
rtems_id id;
/**
* @brief This member specifies the expected rtems_semaphore_delete() status.
*/
rtems_status_code delete_status;
struct {
/**
* @brief This member defines the pre-condition states for the next action.
*/
size_t pcs[ 3 ];
/**
* @brief If this member is true, then the test action loop is executed.
*/
bool in_action_loop;
/**
* @brief This member contains the next transition map index.
*/
size_t index;
/**
* @brief This member contains the current transition map entry.
*/
RtemsSemReqDelete_Entry entry;
/**
* @brief If this member is true, then the current transition variant
* should be skipped.
*/
bool skip;
} Map;
} RtemsSemReqDelete_Context;
static RtemsSemReqDelete_Context
RtemsSemReqDelete_Instance;
static const char * const RtemsSemReqDelete_PreDesc_Id[] = {
"NoObj",
"Counting",
"Simple",
"Binary",
"PrioCeiling",
"PrioInherit",
"MrsP",
"NA"
};
static const char * const RtemsSemReqDelete_PreDesc_Discipline[] = {
"FIFO",
"Priority",
"NA"
};
static const char * const RtemsSemReqDelete_PreDesc_State[] = {
"GtZeroOrNoOwner",
"Zero",
"Blocked",
"NA"
};
static const char * const * const RtemsSemReqDelete_PreDesc[] = {
RtemsSemReqDelete_PreDesc_Id,
RtemsSemReqDelete_PreDesc_Discipline,
RtemsSemReqDelete_PreDesc_State,
NULL
};
#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
#define EVENT_OBTAIN RTEMS_EVENT_0
typedef RtemsSemReqDelete_Context Context;
static void WakeUp( Context *ctx, size_t index )
{
SendEvents( ctx->worker_id[ index ], RTEMS_EVENT_0 );
}
static void Worker( rtems_task_argument arg, size_t index )
{
Context *ctx;
ctx = (Context *) arg;
/*
* In order to test the flush in FIFO order, we have to use the no-preempt
* mode.
*/
SetMode( RTEMS_NO_PREEMPT, RTEMS_PREEMPT_MASK );
while ( true ) {
rtems_status_code sc;
rtems_event_set events;
uint32_t counter;
events = ReceiveAnyEvents();
T_eq_u32( events, RTEMS_EVENT_0 );
sc = rtems_semaphore_obtain(
ctx->semaphore_id,
RTEMS_WAIT,
RTEMS_NO_TIMEOUT
);
T_rsc( sc, ctx->obtain_status );
counter = ctx->counter;
++counter;
ctx->counter = counter;
ctx->worker_counter[ index ] = counter;
if ( sc == RTEMS_SUCCESSFUL ) {
sc = rtems_semaphore_release( ctx->semaphore_id );
T_rsc_success( sc );
}
}
}
static void WorkerZero( rtems_task_argument arg )
{
Worker( arg, 0 );
}
static void WorkerOne( rtems_task_argument arg )
{
Worker( arg, 1 );
}
static void RtemsSemReqDelete_Pre_Id_Prepare(
RtemsSemReqDelete_Context *ctx,
RtemsSemReqDelete_Pre_Id state
)
{
switch ( state ) {
case RtemsSemReqDelete_Pre_Id_NoObj: {
/*
* While the ``id`` parameter is not associated with a semaphore.
*/
ctx->valid_id = false;
break;
}
case RtemsSemReqDelete_Pre_Id_Counting: {
/*
* While the ``id`` parameter is associated with a counting semaphore.
*/
ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
ctx->obtain_status = RTEMS_OBJECT_WAS_DELETED;
break;
}
case RtemsSemReqDelete_Pre_Id_Simple: {
/*
* While the ``id`` parameter is associated with a simple binary
* semaphore.
*/
ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
ctx->obtain_status = RTEMS_OBJECT_WAS_DELETED;
break;
}
case RtemsSemReqDelete_Pre_Id_Binary: {
/*
* While the ``id`` parameter is associated with a binary semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
break;
}
case RtemsSemReqDelete_Pre_Id_PrioCeiling: {
/*
* While the ``id`` parameter is associated with a priority ceiling
* semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
break;
}
case RtemsSemReqDelete_Pre_Id_PrioInherit: {
/*
* While the ``id`` parameter is associated with a priority inheritance
* semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
break;
}
case RtemsSemReqDelete_Pre_Id_MrsP: {
/*
* While the ``id`` parameter is associated with a MrsP semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
break;
}
case RtemsSemReqDelete_Pre_Id_NA:
break;
}
}
static void RtemsSemReqDelete_Pre_Discipline_Prepare(
RtemsSemReqDelete_Context *ctx,
RtemsSemReqDelete_Pre_Discipline state
)
{
switch ( state ) {
case RtemsSemReqDelete_Pre_Discipline_FIFO: {
/*
* While the semaphore uses the FIFO task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_FIFO;
break;
}
case RtemsSemReqDelete_Pre_Discipline_Priority: {
/*
* While the semaphore uses the priority task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_PRIORITY;
break;
}
case RtemsSemReqDelete_Pre_Discipline_NA:
break;
}
}
static void RtemsSemReqDelete_Pre_State_Prepare(
RtemsSemReqDelete_Context *ctx,
RtemsSemReqDelete_Pre_State state
)
{
switch ( state ) {
case RtemsSemReqDelete_Pre_State_GtZeroOrNoOwner: {
/*
* While the semaphore has a count greater than zero or no owner.
*/
ctx->blocked = false;
ctx->count = 1;
break;
}
case RtemsSemReqDelete_Pre_State_Zero: {
/*
* While the semaphore has a count of zero or an owner.
*/
ctx->blocked = false;
ctx->count = 0;
break;
}
case RtemsSemReqDelete_Pre_State_Blocked: {
/*
* While the semaphore has tasks blocked on the semaphore.
*/
ctx->blocked = true;
ctx->count = 0;
break;
}
case RtemsSemReqDelete_Pre_State_NA:
break;
}
}
static void RtemsSemReqDelete_Post_Status_Check(
RtemsSemReqDelete_Context *ctx,
RtemsSemReqDelete_Post_Status state
)
{
switch ( state ) {
case RtemsSemReqDelete_Post_Status_Ok: {
/*
* The return status of rtems_semaphore_delete() shall be
* RTEMS_SUCCESSFUL.
*/
ctx->semaphore_id = 0;
T_rsc_success( ctx->delete_status );
break;
}
case RtemsSemReqDelete_Post_Status_InvId: {
/*
* The return status of rtems_semaphore_delete() shall be
* RTEMS_INVALID_ID.
*/
T_rsc( ctx->delete_status, RTEMS_INVALID_ID );
break;
}
case RtemsSemReqDelete_Post_Status_InUse: {
/*
* The return status of rtems_semaphore_delete() shall be
* RTEMS_RESOURCE_IN_USE.
*/
T_rsc( ctx->delete_status, RTEMS_RESOURCE_IN_USE );
break;
}
case RtemsSemReqDelete_Post_Status_NA:
break;
}
}
static void RtemsSemReqDelete_Post_Name_Check(
RtemsSemReqDelete_Context *ctx,
RtemsSemReqDelete_Post_Name state
)
{
rtems_status_code sc;
rtems_id id;
switch ( state ) {
case RtemsSemReqDelete_Post_Name_Valid: {
/*
* The unique object name shall identify a semaphore.
*/
id = 0;
sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
T_rsc_success( sc );
T_eq_u32( id, ctx->semaphore_id );
break;
}
case RtemsSemReqDelete_Post_Name_Invalid: {
/*
* The unique object name shall not identify a semaphore.
*/
sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
T_rsc( sc, RTEMS_INVALID_NAME );
break;
}
case RtemsSemReqDelete_Post_Name_NA:
break;
}
}
static void RtemsSemReqDelete_Post_Flush_Check(
RtemsSemReqDelete_Context *ctx,
RtemsSemReqDelete_Post_Flush state
)
{
switch ( state ) {
case RtemsSemReqDelete_Post_Flush_FIFO: {
/*
* Tasks waiting at the semaphore shall be unblocked in FIFO order.
*/
T_eq_u32( ctx->worker_counter[ 0 ], 1 );
T_eq_u32( ctx->worker_counter[ 1 ], 2 );
break;
}
case RtemsSemReqDelete_Post_Flush_Priority: {
/*
* Tasks waiting at the semaphore shall be unblocked in priority order.
*/
T_eq_u32( ctx->worker_counter[ 0 ], 2 );
T_eq_u32( ctx->worker_counter[ 1 ], 1 );
break;
}
case RtemsSemReqDelete_Post_Flush_No: {
/*
* Tasks waiting at the semaphore shall remain blocked.
*/
T_eq_u32( ctx->worker_counter[ 0 ], 0 );
T_eq_u32( ctx->worker_counter[ 1 ], 0 );
break;
}
case RtemsSemReqDelete_Post_Flush_NA:
break;
}
}
static void RtemsSemReqDelete_Setup( RtemsSemReqDelete_Context *ctx )
{
memset( ctx, 0, sizeof( *ctx ) );
SetSelfPriority( PRIO_NORMAL );
ctx->worker_id[ 0 ] = CreateTask( "WRK0", PRIO_HIGH );
StartTask( ctx->worker_id[ 0 ], WorkerZero, ctx );
ctx->worker_id[ 1 ] = CreateTask( "WRK1", PRIO_VERY_HIGH );
StartTask( ctx->worker_id[ 1 ], WorkerOne, ctx );
}
static void RtemsSemReqDelete_Setup_Wrap( void *arg )
{
RtemsSemReqDelete_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqDelete_Setup( ctx );
}
static void RtemsSemReqDelete_Teardown( RtemsSemReqDelete_Context *ctx )
{
size_t i;
for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
DeleteTask( ctx->worker_id[ i ] );
}
RestoreRunnerPriority();
}
static void RtemsSemReqDelete_Teardown_Wrap( void *arg )
{
RtemsSemReqDelete_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqDelete_Teardown( ctx );
}
static void RtemsSemReqDelete_Prepare( RtemsSemReqDelete_Context *ctx )
{
ctx->counter = 0;
ctx->worker_counter[ 0 ] = 0;
ctx->worker_counter[ 1 ] = 0;
ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
ctx->valid_id = true;
ctx->obtain_status = RTEMS_SUCCESSFUL;
}
static void RtemsSemReqDelete_Action( RtemsSemReqDelete_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_create(
NAME,
ctx->count,
ctx->attribute_set,
PRIO_ULTRA_HIGH,
&ctx->semaphore_id
);
T_rsc_success( sc );
if ( ctx->blocked ) {
WakeUp( ctx, 0 );
WakeUp( ctx, 1 );
}
if ( ctx->valid_id ) {
ctx->id = ctx->semaphore_id;
} else {
ctx->id = 0;
}
ctx->delete_status = rtems_semaphore_delete( ctx->id );
}
static void RtemsSemReqDelete_Cleanup( RtemsSemReqDelete_Context *ctx )
{
if ( ctx->semaphore_id != 0 ) {
rtems_status_code sc;
if ( ctx->count == 0 ) {
sc = rtems_semaphore_release( ctx->semaphore_id );
T_rsc_success( sc );
}
sc = rtems_semaphore_delete( ctx->semaphore_id );
T_rsc_success( sc );
ctx->semaphore_id = 0;
}
}
static const RtemsSemReqDelete_Entry
RtemsSemReqDelete_Entries[] = {
{ 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_Ok,
RtemsSemReqDelete_Post_Name_Invalid, RtemsSemReqDelete_Post_Flush_No },
{ 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_InUse,
RtemsSemReqDelete_Post_Name_Valid, RtemsSemReqDelete_Post_Flush_No },
{ 1, 0, 0, 0, RtemsSemReqDelete_Post_Status_NA,
RtemsSemReqDelete_Post_Name_NA, RtemsSemReqDelete_Post_Flush_NA },
{ 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_InvId,
RtemsSemReqDelete_Post_Name_Valid, RtemsSemReqDelete_Post_Flush_No },
{ 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_Ok,
RtemsSemReqDelete_Post_Name_Invalid, RtemsSemReqDelete_Post_Flush_FIFO },
{ 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_Ok,
RtemsSemReqDelete_Post_Name_Invalid, RtemsSemReqDelete_Post_Flush_Priority }
};
static const uint8_t
RtemsSemReqDelete_Map[] = {
3, 3, 3, 3, 3, 3, 0, 0, 4, 0, 0, 5, 0, 0, 4, 0, 0, 5, 0, 1, 1, 0, 1, 1, 2, 2,
2, 0, 1, 1, 2, 2, 2, 0, 1, 1, 2, 2, 2, 0, 1, 1
};
static size_t RtemsSemReqDelete_Scope( void *arg, char *buf, size_t n )
{
RtemsSemReqDelete_Context *ctx;
ctx = arg;
if ( ctx->Map.in_action_loop ) {
return T_get_scope( RtemsSemReqDelete_PreDesc, buf, n, ctx->Map.pcs );
}
return 0;
}
static T_fixture RtemsSemReqDelete_Fixture = {
.setup = RtemsSemReqDelete_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemReqDelete_Teardown_Wrap,
.scope = RtemsSemReqDelete_Scope,
.initial_context = &RtemsSemReqDelete_Instance
};
static inline RtemsSemReqDelete_Entry RtemsSemReqDelete_PopEntry(
RtemsSemReqDelete_Context *ctx
)
{
size_t index;
index = ctx->Map.index;
ctx->Map.index = index + 1;
return RtemsSemReqDelete_Entries[
RtemsSemReqDelete_Map[ index ]
];
}
static void RtemsSemReqDelete_TestVariant( RtemsSemReqDelete_Context *ctx )
{
RtemsSemReqDelete_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 0 ] );
RtemsSemReqDelete_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
RtemsSemReqDelete_Pre_State_Prepare( ctx, ctx->Map.pcs[ 2 ] );
RtemsSemReqDelete_Action( ctx );
RtemsSemReqDelete_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
RtemsSemReqDelete_Post_Name_Check( ctx, ctx->Map.entry.Post_Name );
RtemsSemReqDelete_Post_Flush_Check( ctx, ctx->Map.entry.Post_Flush );
}
/**
* @fn void T_case_body_RtemsSemReqDelete( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemReqDelete, &RtemsSemReqDelete_Fixture )
{
RtemsSemReqDelete_Context *ctx;
ctx = T_fixture_context();
ctx->Map.in_action_loop = true;
ctx->Map.index = 0;
for (
ctx->Map.pcs[ 0 ] = RtemsSemReqDelete_Pre_Id_NoObj;
ctx->Map.pcs[ 0 ] < RtemsSemReqDelete_Pre_Id_NA;
++ctx->Map.pcs[ 0 ]
) {
for (
ctx->Map.pcs[ 1 ] = RtemsSemReqDelete_Pre_Discipline_FIFO;
ctx->Map.pcs[ 1 ] < RtemsSemReqDelete_Pre_Discipline_NA;
++ctx->Map.pcs[ 1 ]
) {
for (
ctx->Map.pcs[ 2 ] = RtemsSemReqDelete_Pre_State_GtZeroOrNoOwner;
ctx->Map.pcs[ 2 ] < RtemsSemReqDelete_Pre_State_NA;
++ctx->Map.pcs[ 2 ]
) {
ctx->Map.entry = RtemsSemReqDelete_PopEntry( ctx );
if ( ctx->Map.entry.Skip ) {
continue;
}
RtemsSemReqDelete_Prepare( ctx );
RtemsSemReqDelete_TestVariant( ctx );
RtemsSemReqDelete_Cleanup( ctx );
}
}
}
}
/** @} */

View File

@@ -0,0 +1,623 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemReqFlush
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <string.h>
#include "tr-tq-flush-fifo.h"
#include "tr-tq-flush-priority-inherit.h"
#include "tr-tq-flush-priority.h"
#include "tx-support.h"
#include "tx-thread-queue.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemReqFlush spec:/rtems/sem/req/flush
*
* @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
*
* @{
*/
typedef enum {
RtemsSemReqFlush_Pre_Class_Counting,
RtemsSemReqFlush_Pre_Class_Simple,
RtemsSemReqFlush_Pre_Class_Binary,
RtemsSemReqFlush_Pre_Class_PrioCeiling,
RtemsSemReqFlush_Pre_Class_PrioInherit,
RtemsSemReqFlush_Pre_Class_MrsP,
RtemsSemReqFlush_Pre_Class_NA
} RtemsSemReqFlush_Pre_Class;
typedef enum {
RtemsSemReqFlush_Pre_Discipline_FIFO,
RtemsSemReqFlush_Pre_Discipline_Priority,
RtemsSemReqFlush_Pre_Discipline_NA
} RtemsSemReqFlush_Pre_Discipline;
typedef enum {
RtemsSemReqFlush_Pre_Id_Valid,
RtemsSemReqFlush_Pre_Id_Invalid,
RtemsSemReqFlush_Pre_Id_NA
} RtemsSemReqFlush_Pre_Id;
typedef enum {
RtemsSemReqFlush_Post_Action_InvId,
RtemsSemReqFlush_Post_Action_NotDef,
RtemsSemReqFlush_Post_Action_FlushFIFO,
RtemsSemReqFlush_Post_Action_FlushPriority,
RtemsSemReqFlush_Post_Action_FlushPriorityCeiling,
RtemsSemReqFlush_Post_Action_FlushPriorityInherit,
RtemsSemReqFlush_Post_Action_NA
} RtemsSemReqFlush_Post_Action;
typedef struct {
uint8_t Skip : 1;
uint8_t Pre_Class_NA : 1;
uint8_t Pre_Discipline_NA : 1;
uint8_t Pre_Id_NA : 1;
uint8_t Post_Action : 3;
} RtemsSemReqFlush_Entry;
/**
* @brief Test context for spec:/rtems/sem/req/flush test case.
*/
typedef struct {
/**
* @brief This member contains the thread queue test context.
*/
TQContext tq_ctx;;
/**
* @brief This member specifies if the attribute set of the semaphore.
*/
rtems_attribute attribute_set;
/**
* @brief This member specifies if the initial count of the semaphore.
*/
uint32_t initial_count;
struct {
/**
* @brief This member defines the pre-condition indices for the next
* action.
*/
size_t pci[ 3 ];
/**
* @brief This member defines the pre-condition states for the next action.
*/
size_t pcs[ 3 ];
/**
* @brief If this member is true, then the test action loop is executed.
*/
bool in_action_loop;
/**
* @brief This member contains the next transition map index.
*/
size_t index;
/**
* @brief This member contains the current transition map entry.
*/
RtemsSemReqFlush_Entry entry;
/**
* @brief If this member is true, then the current transition variant
* should be skipped.
*/
bool skip;
} Map;
} RtemsSemReqFlush_Context;
static RtemsSemReqFlush_Context
RtemsSemReqFlush_Instance;
static const char * const RtemsSemReqFlush_PreDesc_Class[] = {
"Counting",
"Simple",
"Binary",
"PrioCeiling",
"PrioInherit",
"MrsP",
"NA"
};
static const char * const RtemsSemReqFlush_PreDesc_Discipline[] = {
"FIFO",
"Priority",
"NA"
};
static const char * const RtemsSemReqFlush_PreDesc_Id[] = {
"Valid",
"Invalid",
"NA"
};
static const char * const * const RtemsSemReqFlush_PreDesc[] = {
RtemsSemReqFlush_PreDesc_Class,
RtemsSemReqFlush_PreDesc_Discipline,
RtemsSemReqFlush_PreDesc_Id,
NULL
};
#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
typedef RtemsSemReqFlush_Context Context;
static void EnqueuePrepare( TQContext *tq_ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_obtain(
tq_ctx->thread_queue_id,
RTEMS_WAIT,
RTEMS_NO_TIMEOUT
);
T_rsc_success( sc );
}
static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
{
rtems_status_code sc;
(void) wait;
sc = rtems_semaphore_obtain(
tq_ctx->thread_queue_id,
RTEMS_WAIT,
RTEMS_NO_TIMEOUT
);
T_rsc( sc, RTEMS_UNSATISFIED );
return STATUS_BUILD( STATUS_SUCCESSFUL, 0 );
}
static void Flush( TQContext *tq_ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_flush( tq_ctx->thread_queue_id );
T_rsc_success( sc );
}
static void RtemsSemReqFlush_Pre_Class_Prepare(
RtemsSemReqFlush_Context *ctx,
RtemsSemReqFlush_Pre_Class state
)
{
switch ( state ) {
case RtemsSemReqFlush_Pre_Class_Counting: {
/*
* While the semaphore object is a counting semaphore.
*/
ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
ctx->initial_count = 0;
ctx->tq_ctx.enqueue_prepare = TQDoNothing;
ctx->tq_ctx.enqueue_done = TQDoNothing;
break;
}
case RtemsSemReqFlush_Pre_Class_Simple: {
/*
* While the semaphore object is a simple binary semaphore.
*/
ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
ctx->initial_count = 0;
ctx->tq_ctx.enqueue_prepare = TQDoNothing;
ctx->tq_ctx.enqueue_done = TQDoNothing;
break;
}
case RtemsSemReqFlush_Pre_Class_Binary: {
/*
* While the semaphore object is a binary semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
ctx->initial_count = 1;
ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
break;
}
case RtemsSemReqFlush_Pre_Class_PrioCeiling: {
/*
* While the semaphore object is a priority ceiling semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
ctx->initial_count = 1;
ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
break;
}
case RtemsSemReqFlush_Pre_Class_PrioInherit: {
/*
* While the semaphore object is a priority inheritance semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
ctx->initial_count = 1;
ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
break;
}
case RtemsSemReqFlush_Pre_Class_MrsP: {
/*
* While the semaphore object is a MrsP semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
ctx->initial_count = 1;
ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
break;
}
case RtemsSemReqFlush_Pre_Class_NA:
break;
}
}
static void RtemsSemReqFlush_Pre_Discipline_Prepare(
RtemsSemReqFlush_Context *ctx,
RtemsSemReqFlush_Pre_Discipline state
)
{
switch ( state ) {
case RtemsSemReqFlush_Pre_Discipline_FIFO: {
/*
* While the semaphore uses the FIFO task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_FIFO;
ctx->tq_ctx.discipline = TQ_FIFO;
break;
}
case RtemsSemReqFlush_Pre_Discipline_Priority: {
/*
* While the semaphore uses the priority task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_PRIORITY;
ctx->tq_ctx.discipline = TQ_PRIORITY;
break;
}
case RtemsSemReqFlush_Pre_Discipline_NA:
break;
}
}
static void RtemsSemReqFlush_Pre_Id_Prepare(
RtemsSemReqFlush_Context *ctx,
RtemsSemReqFlush_Pre_Id state
)
{
switch ( state ) {
case RtemsSemReqFlush_Pre_Id_Valid: {
/*
* While the ``id`` parameter is associated with the semaphore.
*/
/* Nothing to prepare */
break;
}
case RtemsSemReqFlush_Pre_Id_Invalid: {
/*
* While the ``id`` parameter is not associated with a semaphore.
*/
/* Nothing to prepare */
break;
}
case RtemsSemReqFlush_Pre_Id_NA:
break;
}
}
static void RtemsSemReqFlush_Post_Action_Check(
RtemsSemReqFlush_Context *ctx,
RtemsSemReqFlush_Post_Action state
)
{
rtems_status_code sc;
switch ( state ) {
case RtemsSemReqFlush_Post_Action_InvId: {
/*
* The return status of rtems_semaphore_flush() shall be
* RTEMS_INVALID_ID.
*/
sc = rtems_semaphore_flush( 0xffffffff );
T_rsc( sc, RTEMS_INVALID_ID );
break;
}
case RtemsSemReqFlush_Post_Action_NotDef: {
/*
* The return status of rtems_semaphore_flush() shall be
* RTEMS_NOT_DEFINED.
*/
sc = rtems_semaphore_flush( ctx->tq_ctx.thread_queue_id );
T_rsc( sc, RTEMS_NOT_DEFINED );
break;
}
case RtemsSemReqFlush_Post_Action_FlushFIFO: {
/*
* The calling task shall flush the semaphore as specified by
* /score/tq/req/flush-fifo.
*/
ScoreTqReqFlushFifo_Run( &ctx->tq_ctx );
break;
}
case RtemsSemReqFlush_Post_Action_FlushPriority: {
/*
* The calling task shall flush the semaphore as specified by
* /score/tq/req/flush-priority.
*/
ScoreTqReqFlushPriority_Run( &ctx->tq_ctx, true );
break;
}
case RtemsSemReqFlush_Post_Action_FlushPriorityCeiling: {
/*
* The calling task shall flush the semaphore as specified by
* /score/tq/req/flush-priority.
*/
ScoreTqReqFlushPriority_Run( &ctx->tq_ctx, false );
break;
}
case RtemsSemReqFlush_Post_Action_FlushPriorityInherit: {
/*
* The calling task shall flush the semaphore as specified by
* /score/tq/req/flush-priority-inherit.
*/
ScoreTqReqFlushPriorityInherit_Run( &ctx->tq_ctx );
break;
}
case RtemsSemReqFlush_Post_Action_NA:
break;
}
}
static void RtemsSemReqFlush_Setup( RtemsSemReqFlush_Context *ctx )
{
memset( ctx, 0, sizeof( *ctx ) );
ctx->tq_ctx.enqueue = Enqueue;
ctx->tq_ctx.flush = Flush;
ctx->tq_ctx.surrender = TQSurrenderClassicSem;
ctx->tq_ctx.convert_status = TQConvertStatusClassic;
TQInitialize( &ctx->tq_ctx );
}
static void RtemsSemReqFlush_Setup_Wrap( void *arg )
{
RtemsSemReqFlush_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqFlush_Setup( ctx );
}
static void RtemsSemReqFlush_Teardown( RtemsSemReqFlush_Context *ctx )
{
TQDestroy( &ctx->tq_ctx );
}
static void RtemsSemReqFlush_Teardown_Wrap( void *arg )
{
RtemsSemReqFlush_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqFlush_Teardown( ctx );
}
static void RtemsSemReqFlush_Prepare( RtemsSemReqFlush_Context *ctx )
{
ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
}
static void RtemsSemReqFlush_Action( RtemsSemReqFlush_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_create(
NAME,
ctx->initial_count,
ctx->attribute_set,
PRIO_ULTRA_HIGH,
&ctx->tq_ctx.thread_queue_id
);
T_rsc_success( sc );
}
static void RtemsSemReqFlush_Cleanup( RtemsSemReqFlush_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
}
static const RtemsSemReqFlush_Entry
RtemsSemReqFlush_Entries[] = {
{ 0, 1, 1, 0, RtemsSemReqFlush_Post_Action_InvId },
{ 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushFIFO },
{ 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriority },
{ 1, 0, 0, 0, RtemsSemReqFlush_Post_Action_NA },
{ 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriorityCeiling },
{ 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriorityInherit },
#if defined(RTEMS_SMP)
{ 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_NotDef }
#else
{ 1, 0, 0, 0, RtemsSemReqFlush_Post_Action_NA }
#endif
};
static const uint8_t
RtemsSemReqFlush_Map[] = {
1, 0, 2, 0, 1, 0, 2, 0, 1, 0, 2, 0, 3, 0, 4, 0, 3, 0, 5, 0, 3, 0, 6, 0
};
static size_t RtemsSemReqFlush_Scope( void *arg, char *buf, size_t n )
{
RtemsSemReqFlush_Context *ctx;
ctx = arg;
if ( ctx->Map.in_action_loop ) {
return T_get_scope( RtemsSemReqFlush_PreDesc, buf, n, ctx->Map.pcs );
}
return 0;
}
static T_fixture RtemsSemReqFlush_Fixture = {
.setup = RtemsSemReqFlush_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemReqFlush_Teardown_Wrap,
.scope = RtemsSemReqFlush_Scope,
.initial_context = &RtemsSemReqFlush_Instance
};
static inline RtemsSemReqFlush_Entry RtemsSemReqFlush_PopEntry(
RtemsSemReqFlush_Context *ctx
)
{
size_t index;
index = ctx->Map.index;
ctx->Map.index = index + 1;
return RtemsSemReqFlush_Entries[
RtemsSemReqFlush_Map[ index ]
];
}
static void RtemsSemReqFlush_SetPreConditionStates(
RtemsSemReqFlush_Context *ctx
)
{
if ( ctx->Map.entry.Pre_Class_NA ) {
ctx->Map.pcs[ 0 ] = RtemsSemReqFlush_Pre_Class_NA;
} else {
ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
}
if ( ctx->Map.entry.Pre_Discipline_NA ) {
ctx->Map.pcs[ 1 ] = RtemsSemReqFlush_Pre_Discipline_NA;
} else {
ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
}
ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
}
static void RtemsSemReqFlush_TestVariant( RtemsSemReqFlush_Context *ctx )
{
RtemsSemReqFlush_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
RtemsSemReqFlush_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
RtemsSemReqFlush_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
RtemsSemReqFlush_Action( ctx );
RtemsSemReqFlush_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
}
/**
* @fn void T_case_body_RtemsSemReqFlush( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemReqFlush, &RtemsSemReqFlush_Fixture )
{
RtemsSemReqFlush_Context *ctx;
ctx = T_fixture_context();
ctx->Map.in_action_loop = true;
ctx->Map.index = 0;
for (
ctx->Map.pci[ 0 ] = RtemsSemReqFlush_Pre_Class_Counting;
ctx->Map.pci[ 0 ] < RtemsSemReqFlush_Pre_Class_NA;
++ctx->Map.pci[ 0 ]
) {
for (
ctx->Map.pci[ 1 ] = RtemsSemReqFlush_Pre_Discipline_FIFO;
ctx->Map.pci[ 1 ] < RtemsSemReqFlush_Pre_Discipline_NA;
++ctx->Map.pci[ 1 ]
) {
for (
ctx->Map.pci[ 2 ] = RtemsSemReqFlush_Pre_Id_Valid;
ctx->Map.pci[ 2 ] < RtemsSemReqFlush_Pre_Id_NA;
++ctx->Map.pci[ 2 ]
) {
ctx->Map.entry = RtemsSemReqFlush_PopEntry( ctx );
if ( ctx->Map.entry.Skip ) {
continue;
}
RtemsSemReqFlush_SetPreConditionStates( ctx );
RtemsSemReqFlush_Prepare( ctx );
RtemsSemReqFlush_TestVariant( ctx );
RtemsSemReqFlush_Cleanup( ctx );
}
}
}
}
/** @} */

View File

@@ -0,0 +1,118 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemValIdent
*/
/*
* Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "tr-object-ident.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemValIdent spec:/rtems/sem/val/ident
*
* @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
*
* @brief Test the rtems_semaphore_ident() directive.
*
* This test case performs the following actions:
*
* - Run the generic object identification tests for Classic API semaphore
* class objects defined by /rtems/req/ident.
*
* @{
*/
static rtems_status_code ClassicSemIdentAction(
rtems_name name,
uint32_t node,
rtems_id *id
)
{
return rtems_semaphore_ident( name, node, id );
}
/**
* @brief Run the generic object identification tests for Classic API semaphore
* class objects defined by /rtems/req/ident.
*/
static void RtemsSemValIdent_Action_0( void )
{
rtems_status_code sc;
rtems_id id_local_object;
sc = rtems_semaphore_create(
ClassicObjectIdentName,
0,
RTEMS_DEFAULT_ATTRIBUTES,
0,
&id_local_object
);
T_assert_rsc_success( sc );
RtemsReqIdent_Run(
id_local_object,
ClassicSemIdentAction
);
sc = rtems_semaphore_delete( id_local_object );
T_rsc_success( sc );
}
/**
* @fn void T_case_body_RtemsSemValIdent( void )
*/
T_TEST_CASE( RtemsSemValIdent )
{
RtemsSemValIdent_Action_0();
}
/** @} */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,728 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemReqObtain
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <string.h>
#include "tr-mtx-seize-try.h"
#include "tr-mtx-seize-wait.h"
#include "tr-sem-seize-try.h"
#include "tr-sem-seize-wait.h"
#include "tx-support.h"
#include "tx-thread-queue.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemReqObtain spec:/rtems/sem/req/obtain
*
* @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
*
* @{
*/
typedef enum {
RtemsSemReqObtain_Pre_Class_Counting,
RtemsSemReqObtain_Pre_Class_Simple,
RtemsSemReqObtain_Pre_Class_Binary,
RtemsSemReqObtain_Pre_Class_PrioCeiling,
RtemsSemReqObtain_Pre_Class_PrioInherit,
RtemsSemReqObtain_Pre_Class_MrsP,
RtemsSemReqObtain_Pre_Class_NA
} RtemsSemReqObtain_Pre_Class;
typedef enum {
RtemsSemReqObtain_Pre_Discipline_FIFO,
RtemsSemReqObtain_Pre_Discipline_Priority,
RtemsSemReqObtain_Pre_Discipline_NA
} RtemsSemReqObtain_Pre_Discipline;
typedef enum {
RtemsSemReqObtain_Pre_Id_Valid,
RtemsSemReqObtain_Pre_Id_Invalid,
RtemsSemReqObtain_Pre_Id_NA
} RtemsSemReqObtain_Pre_Id;
typedef enum {
RtemsSemReqObtain_Pre_Wait_No,
RtemsSemReqObtain_Pre_Wait_Timeout,
RtemsSemReqObtain_Pre_Wait_Forever,
RtemsSemReqObtain_Pre_Wait_NA
} RtemsSemReqObtain_Pre_Wait;
typedef enum {
RtemsSemReqObtain_Post_Action_InvId,
RtemsSemReqObtain_Post_Action_SemSeizeTry,
RtemsSemReqObtain_Post_Action_SemSeizeWait,
RtemsSemReqObtain_Post_Action_MtxSeizeTry,
RtemsSemReqObtain_Post_Action_MtxSeizeWait,
RtemsSemReqObtain_Post_Action_InheritMtxSeizeTry,
RtemsSemReqObtain_Post_Action_InheritMtxSeizeWait,
RtemsSemReqObtain_Post_Action_CeilingMtxSeizeTry,
RtemsSemReqObtain_Post_Action_CeilingMtxSeizeWait,
RtemsSemReqObtain_Post_Action_MrsPMtxSeizeTry,
RtemsSemReqObtain_Post_Action_MrsPMtxSeizeWait,
RtemsSemReqObtain_Post_Action_NA
} RtemsSemReqObtain_Post_Action;
typedef struct {
uint16_t Skip : 1;
uint16_t Pre_Class_NA : 1;
uint16_t Pre_Discipline_NA : 1;
uint16_t Pre_Id_NA : 1;
uint16_t Pre_Wait_NA : 1;
uint16_t Post_Action : 4;
} RtemsSemReqObtain_Entry;
/**
* @brief Test context for spec:/rtems/sem/req/obtain test case.
*/
typedef struct {
/**
* @brief This member contains the thread queue test context.
*/
union {
TQContext tq_ctx;
TQMtxContext tq_mtx_ctx;
TQSemContext tq_sem_ctx;
};
/**
* @brief This member specifies if the attribute set of the semaphore.
*/
rtems_attribute attribute_set;
struct {
/**
* @brief This member defines the pre-condition states for the next action.
*/
size_t pcs[ 4 ];
/**
* @brief If this member is true, then the test action loop is executed.
*/
bool in_action_loop;
/**
* @brief This member contains the next transition map index.
*/
size_t index;
/**
* @brief This member contains the current transition map entry.
*/
RtemsSemReqObtain_Entry entry;
/**
* @brief If this member is true, then the current transition variant
* should be skipped.
*/
bool skip;
} Map;
} RtemsSemReqObtain_Context;
static RtemsSemReqObtain_Context
RtemsSemReqObtain_Instance;
static const char * const RtemsSemReqObtain_PreDesc_Class[] = {
"Counting",
"Simple",
"Binary",
"PrioCeiling",
"PrioInherit",
"MrsP",
"NA"
};
static const char * const RtemsSemReqObtain_PreDesc_Discipline[] = {
"FIFO",
"Priority",
"NA"
};
static const char * const RtemsSemReqObtain_PreDesc_Id[] = {
"Valid",
"Invalid",
"NA"
};
static const char * const RtemsSemReqObtain_PreDesc_Wait[] = {
"No",
"Timeout",
"Forever",
"NA"
};
static const char * const * const RtemsSemReqObtain_PreDesc[] = {
RtemsSemReqObtain_PreDesc_Class,
RtemsSemReqObtain_PreDesc_Discipline,
RtemsSemReqObtain_PreDesc_Id,
RtemsSemReqObtain_PreDesc_Wait,
NULL
};
#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
typedef RtemsSemReqObtain_Context Context;
static void RtemsSemReqObtain_Pre_Class_Prepare(
RtemsSemReqObtain_Context *ctx,
RtemsSemReqObtain_Pre_Class state
)
{
switch ( state ) {
case RtemsSemReqObtain_Pre_Class_Counting: {
/*
* While the semaphore object is a counting semaphore.
*/
ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
break;
}
case RtemsSemReqObtain_Pre_Class_Simple: {
/*
* While the semaphore object is a simple binary semaphore.
*/
ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
break;
}
case RtemsSemReqObtain_Pre_Class_Binary: {
/*
* While the semaphore object is a binary semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
break;
}
case RtemsSemReqObtain_Pre_Class_PrioCeiling: {
/*
* While the semaphore object is a priority ceiling semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
break;
}
case RtemsSemReqObtain_Pre_Class_PrioInherit: {
/*
* While the semaphore object is a priority inheritance semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
break;
}
case RtemsSemReqObtain_Pre_Class_MrsP: {
/*
* While the semaphore object is a MrsP semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
break;
}
case RtemsSemReqObtain_Pre_Class_NA:
break;
}
}
static void RtemsSemReqObtain_Pre_Discipline_Prepare(
RtemsSemReqObtain_Context *ctx,
RtemsSemReqObtain_Pre_Discipline state
)
{
switch ( state ) {
case RtemsSemReqObtain_Pre_Discipline_FIFO: {
/*
* While the semaphore uses the FIFO task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_FIFO;
ctx->tq_ctx.discipline = TQ_FIFO;
break;
}
case RtemsSemReqObtain_Pre_Discipline_Priority: {
/*
* While the semaphore uses the priority task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_PRIORITY;
ctx->tq_ctx.discipline = TQ_PRIORITY;
break;
}
case RtemsSemReqObtain_Pre_Discipline_NA:
break;
}
}
static void RtemsSemReqObtain_Pre_Id_Prepare(
RtemsSemReqObtain_Context *ctx,
RtemsSemReqObtain_Pre_Id state
)
{
switch ( state ) {
case RtemsSemReqObtain_Pre_Id_Valid: {
/*
* While the ``id`` parameter is associated with the semaphore.
*/
/* Nothing to prepare */
break;
}
case RtemsSemReqObtain_Pre_Id_Invalid: {
/*
* While the ``id`` parameter is not associated with a semaphore.
*/
/* Nothing to prepare */
break;
}
case RtemsSemReqObtain_Pre_Id_NA:
break;
}
}
static void RtemsSemReqObtain_Pre_Wait_Prepare(
RtemsSemReqObtain_Context *ctx,
RtemsSemReqObtain_Pre_Wait state
)
{
switch ( state ) {
case RtemsSemReqObtain_Pre_Wait_No: {
/*
* While the ``option_set`` parameter indicates the RTEMS_NO_WAIT option.
*/
ctx->tq_ctx.wait = TQ_NO_WAIT;
break;
}
case RtemsSemReqObtain_Pre_Wait_Timeout: {
/*
* While the ``option_set`` parameter indicates the RTEMS_WAIT option,
* while the ``timeout`` parameter is not equal to RTEMS_NO_TIMEOUT.
*/
ctx->tq_ctx.wait = TQ_WAIT_TIMED;
break;
}
case RtemsSemReqObtain_Pre_Wait_Forever: {
/*
* While the ``option_set`` parameter indicates the RTEMS_WAIT option,
* while the ``timeout`` parameter is equal to RTEMS_NO_TIMEOUT.
*/
ctx->tq_ctx.wait = TQ_WAIT_FOREVER;
break;
}
case RtemsSemReqObtain_Pre_Wait_NA:
break;
}
}
static void RtemsSemReqObtain_Post_Action_Check(
RtemsSemReqObtain_Context *ctx,
RtemsSemReqObtain_Post_Action state
)
{
rtems_status_code sc;
switch ( state ) {
case RtemsSemReqObtain_Post_Action_InvId: {
/*
* The return status of rtems_semaphore_obtain() shall be
* RTEMS_INVALID_ID.
*/
sc = rtems_semaphore_obtain( 0xffffffff, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
T_rsc( sc, RTEMS_INVALID_ID );
break;
}
case RtemsSemReqObtain_Post_Action_SemSeizeTry: {
/*
* The calling task shall try to seize the semaphore as specified by
* /score/sem/req/seize-try.
*/
ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
ScoreSemReqSeizeTry_Run( &ctx->tq_sem_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_SemSeizeWait: {
/*
* The calling task shall wait to seize the semaphore as specified by
* /score/sem/req/seize-wait.
*/
ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
ScoreSemReqSeizeWait_Run( &ctx->tq_sem_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_MtxSeizeTry: {
/*
* The calling task shall try to seize the mutex as specified by
* /score/mtx/req/seize-try where an enqueue blocks, a recursive seize is
* allowed, and no locking protocol is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_MtxSeizeWait: {
/*
* The calling task shall wait to seize the mutex as specified by
* /score/mtx/req/seize-wait where an enqueue blocks, a recursive seize
* is allowed, and no locking protocol is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_InheritMtxSeizeTry: {
/*
* The calling task shall try to seize the mutex as specified by
* /score/mtx/req/seize-try where an enqueue blocks, a recursive seize is
* allowed, and a priority inheritance protocol is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_InheritMtxSeizeWait: {
/*
* The calling task shall wait to seize the mutex as specified by
* /score/mtx/req/seize-wait where an enqueue blocks, a recursive seize
* is allowed, and a priority inheritance protocol is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_CeilingMtxSeizeTry: {
/*
* The calling task shall try to seize the mutex as specified by
* /score/mtx/req/seize-try where an enqueue blocks, a recursive seize is
* allowed, and a priority ceiling is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_CeilingMtxSeizeWait: {
/*
* The calling task shall wait to seize the mutex as specified by
* /score/mtx/req/seize-wait where an enqueue blocks, a recursive seize
* is allowed, and a priority ceiling is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_MrsPMtxSeizeTry: {
/*
* The calling task shall try to seize the mutex as specified by
* /score/mtx/req/seize-try where an enqueue is sticky, a recursive seize
* returns an error status, and a priority ceiling is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_MrsPMtxSeizeWait: {
/*
* The calling task shall wait to seize the mutex as specified by
* /score/mtx/req/seize-wait where an enqueue is sticky, a recursive
* seize returns an error status, and a priority ceiling is used.
*/
ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqObtain_Post_Action_NA:
break;
}
}
static void RtemsSemReqObtain_Setup( RtemsSemReqObtain_Context *ctx )
{
memset( ctx, 0, sizeof( *ctx ) );
ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
ctx->tq_ctx.surrender = TQSurrenderClassicSem;
ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
ctx->tq_ctx.convert_status = TQConvertStatusClassic;
TQInitialize( &ctx->tq_ctx );
}
static void RtemsSemReqObtain_Setup_Wrap( void *arg )
{
RtemsSemReqObtain_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqObtain_Setup( ctx );
}
static void RtemsSemReqObtain_Teardown( RtemsSemReqObtain_Context *ctx )
{
TQDestroy( &ctx->tq_ctx );
}
static void RtemsSemReqObtain_Teardown_Wrap( void *arg )
{
RtemsSemReqObtain_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqObtain_Teardown( ctx );
}
static void RtemsSemReqObtain_Prepare( RtemsSemReqObtain_Context *ctx )
{
ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
}
static void RtemsSemReqObtain_Action( RtemsSemReqObtain_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_create(
NAME,
1,
ctx->attribute_set,
PRIO_VERY_HIGH,
&ctx->tq_ctx.thread_queue_id
);
T_rsc_success( sc );
if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
rtems_task_priority prio;
sc = rtems_semaphore_set_priority(
ctx->tq_ctx.thread_queue_id,
SCHEDULER_B_ID,
PRIO_VERY_HIGH,
&prio
);
T_rsc_success( sc );
}
}
static void RtemsSemReqObtain_Cleanup( RtemsSemReqObtain_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
}
static const RtemsSemReqObtain_Entry
RtemsSemReqObtain_Entries[] = {
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InvId },
{ 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA },
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_SemSeizeWait },
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_SemSeizeTry },
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MtxSeizeWait },
#if defined(RTEMS_SMP)
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InvId },
#else
{ 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA },
#endif
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MtxSeizeTry },
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_CeilingMtxSeizeWait },
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InheritMtxSeizeWait },
#if defined(RTEMS_SMP)
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MrsPMtxSeizeWait },
#else
{ 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA },
#endif
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_CeilingMtxSeizeTry },
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InheritMtxSeizeTry },
#if defined(RTEMS_SMP)
{ 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MrsPMtxSeizeTry }
#else
{ 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA }
#endif
};
static const uint8_t
RtemsSemReqObtain_Map[] = {
3, 2, 2, 0, 0, 0, 3, 2, 2, 0, 0, 0, 3, 2, 2, 0, 0, 0, 3, 2, 2, 0, 0, 0, 6, 4,
4, 0, 0, 0, 6, 4, 4, 0, 0, 0, 1, 1, 1, 1, 1, 1, 10, 7, 7, 0, 0, 0, 1, 1, 1,
1, 1, 1, 11, 8, 8, 0, 0, 0, 1, 1, 1, 1, 1, 1, 12, 9, 9, 5, 5, 5
};
static size_t RtemsSemReqObtain_Scope( void *arg, char *buf, size_t n )
{
RtemsSemReqObtain_Context *ctx;
ctx = arg;
if ( ctx->Map.in_action_loop ) {
return T_get_scope( RtemsSemReqObtain_PreDesc, buf, n, ctx->Map.pcs );
}
return 0;
}
static T_fixture RtemsSemReqObtain_Fixture = {
.setup = RtemsSemReqObtain_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemReqObtain_Teardown_Wrap,
.scope = RtemsSemReqObtain_Scope,
.initial_context = &RtemsSemReqObtain_Instance
};
static inline RtemsSemReqObtain_Entry RtemsSemReqObtain_PopEntry(
RtemsSemReqObtain_Context *ctx
)
{
size_t index;
index = ctx->Map.index;
ctx->Map.index = index + 1;
return RtemsSemReqObtain_Entries[
RtemsSemReqObtain_Map[ index ]
];
}
static void RtemsSemReqObtain_TestVariant( RtemsSemReqObtain_Context *ctx )
{
RtemsSemReqObtain_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
RtemsSemReqObtain_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
RtemsSemReqObtain_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
RtemsSemReqObtain_Pre_Wait_Prepare( ctx, ctx->Map.pcs[ 3 ] );
RtemsSemReqObtain_Action( ctx );
RtemsSemReqObtain_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
}
/**
* @fn void T_case_body_RtemsSemReqObtain( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemReqObtain, &RtemsSemReqObtain_Fixture )
{
RtemsSemReqObtain_Context *ctx;
ctx = T_fixture_context();
ctx->Map.in_action_loop = true;
ctx->Map.index = 0;
for (
ctx->Map.pcs[ 0 ] = RtemsSemReqObtain_Pre_Class_Counting;
ctx->Map.pcs[ 0 ] < RtemsSemReqObtain_Pre_Class_NA;
++ctx->Map.pcs[ 0 ]
) {
for (
ctx->Map.pcs[ 1 ] = RtemsSemReqObtain_Pre_Discipline_FIFO;
ctx->Map.pcs[ 1 ] < RtemsSemReqObtain_Pre_Discipline_NA;
++ctx->Map.pcs[ 1 ]
) {
for (
ctx->Map.pcs[ 2 ] = RtemsSemReqObtain_Pre_Id_Valid;
ctx->Map.pcs[ 2 ] < RtemsSemReqObtain_Pre_Id_NA;
++ctx->Map.pcs[ 2 ]
) {
for (
ctx->Map.pcs[ 3 ] = RtemsSemReqObtain_Pre_Wait_No;
ctx->Map.pcs[ 3 ] < RtemsSemReqObtain_Pre_Wait_NA;
++ctx->Map.pcs[ 3 ]
) {
ctx->Map.entry = RtemsSemReqObtain_PopEntry( ctx );
if ( ctx->Map.entry.Skip ) {
continue;
}
RtemsSemReqObtain_Prepare( ctx );
RtemsSemReqObtain_TestVariant( ctx );
RtemsSemReqObtain_Cleanup( ctx );
}
}
}
}
}
/** @} */

View File

@@ -0,0 +1,890 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemValPerf
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include "tx-support.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemValPerf spec:/rtems/sem/val/perf
*
* @ingroup RTEMSTestSuiteTestsuitesPerformanceNoClock0
*
* @brief This test case provides a context to run @ref RTEMSAPIClassicSem
* performance tests.
*
* @{
*/
/**
* @brief Test context for spec:/rtems/sem/val/perf test case.
*/
typedef struct {
/**
* @brief This member provides a mutex identifier.
*/
rtems_id mutex_id;
/**
* @brief This member provides a worker identifier.
*/
rtems_id worker_id;
/**
* @brief This member provides a status code.
*/
rtems_status_code status;
/**
* @brief This member references the measure runtime context.
*/
T_measure_runtime_context *context;
/**
* @brief This member provides the measure runtime request.
*/
T_measure_runtime_request request;
/**
* @brief This member provides an optional measurement begin time point.
*/
T_ticks begin;
/**
* @brief This member provides an optional measurement end time point.
*/
T_ticks end;
} RtemsSemValPerf_Context;
static RtemsSemValPerf_Context
RtemsSemValPerf_Instance;
typedef RtemsSemValPerf_Context Context;
typedef enum {
EVENT_END = RTEMS_EVENT_0,
EVENT_OBTAIN = RTEMS_EVENT_1,
EVENT_OBTAIN_END = RTEMS_EVENT_2,
EVENT_RELEASE = RTEMS_EVENT_3,
EVENT_RELEASE_END = RTEMS_EVENT_4
} Event;
static void Send( const Context *ctx, rtems_event_set events )
{
SendEvents( ctx->worker_id, events );
}
static void Worker( rtems_task_argument arg )
{
Context *ctx;
ctx = (Context *) arg;
while ( true ) {
rtems_event_set events;
rtems_status_code sc;
T_ticks ticks;
sc = rtems_event_receive(
RTEMS_ALL_EVENTS,
RTEMS_EVENT_ANY | RTEMS_WAIT,
RTEMS_NO_TIMEOUT,
&events
);
ticks = T_tick();
T_quiet_rsc_success( sc );
if ( ( events & EVENT_END ) != 0 ) {
ctx->end = ticks;
}
if ( ( events & EVENT_OBTAIN ) != 0 ) {
sc = rtems_semaphore_obtain(
ctx->mutex_id,
RTEMS_WAIT,
RTEMS_NO_TIMEOUT
);
ticks = T_tick();
T_quiet_rsc_success( sc );
if ( ( events & EVENT_OBTAIN_END ) != 0 ) {
ctx->end = ticks;
}
}
if ( ( events & EVENT_RELEASE ) != 0 ) {
sc = rtems_semaphore_release( ctx->mutex_id );
ticks = T_tick();
T_quiet_rsc_success( sc );
if ( ( events & EVENT_RELEASE_END ) != 0 ) {
ctx->end = ticks;
}
}
}
}
static void RtemsSemValPerf_Setup_Context( RtemsSemValPerf_Context *ctx )
{
T_measure_runtime_config config;
memset( &config, 0, sizeof( config ) );
config.sample_count = 100;
ctx->request.arg = ctx;
ctx->request.flags = T_MEASURE_RUNTIME_REPORT_SAMPLES;
ctx->context = T_measure_runtime_create( &config );
T_assert_not_null( ctx->context );
}
/**
* @brief Create a mutex and a worker task.
*/
static void RtemsSemValPerf_Setup( RtemsSemValPerf_Context *ctx )
{
SetSelfPriority( PRIO_NORMAL );
ctx->mutex_id = CreateMutex();
ctx->worker_id = CreateTask( "WORK", PRIO_HIGH );
StartTask( ctx->worker_id, Worker, ctx );
}
static void RtemsSemValPerf_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemValPerf_Setup_Context( ctx );
RtemsSemValPerf_Setup( ctx );
}
/**
* @brief Delete the worker task and the mutex.
*/
static void RtemsSemValPerf_Teardown( RtemsSemValPerf_Context *ctx )
{
DeleteTask( ctx->worker_id );
DeleteMutex( ctx->mutex_id );
RestoreRunnerPriority();
}
static void RtemsSemValPerf_Teardown_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemValPerf_Teardown( ctx );
}
static T_fixture RtemsSemValPerf_Fixture = {
.setup = RtemsSemValPerf_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemValPerf_Teardown_Wrap,
.scope = NULL,
.initial_context = &RtemsSemValPerf_Instance
};
/**
* @brief Obtain the available mutex.
*/
static void RtemsSemReqPerfMtxPiObtain_Body( RtemsSemValPerf_Context *ctx )
{
ctx->status = rtems_semaphore_obtain(
ctx->mutex_id,
RTEMS_WAIT,
RTEMS_NO_TIMEOUT
);
}
static void RtemsSemReqPerfMtxPiObtain_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiObtain_Body( ctx );
}
/**
* @brief Release the mutex. Discard samples interrupted by a clock tick.
*/
static bool RtemsSemReqPerfMtxPiObtain_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc_success( ctx->status );
ReleaseMutex( ctx->mutex_id );
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiObtain_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiObtain_Teardown( ctx, delta, tic, toc, retry );
}
/**
* @brief Obtain the mutex.
*/
static void RtemsSemReqPerfMtxPiRelease_Setup( RtemsSemValPerf_Context *ctx )
{
ObtainMutex( ctx->mutex_id );
}
static void RtemsSemReqPerfMtxPiRelease_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiRelease_Setup( ctx );
}
/**
* @brief Release the mutex.
*/
static void RtemsSemReqPerfMtxPiRelease_Body( RtemsSemValPerf_Context *ctx )
{
ctx->status = rtems_semaphore_release( ctx->mutex_id );
}
static void RtemsSemReqPerfMtxPiRelease_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiRelease_Body( ctx );
}
/**
* @brief Discard samples interrupted by a clock tick.
*/
static bool RtemsSemReqPerfMtxPiRelease_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc_success( ctx->status );
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiRelease_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiRelease_Teardown( ctx, delta, tic, toc, retry );
}
/**
* @brief Let one task wait on the mutex.
*/
static void RtemsSemReqPerfMtxPiReleaseOne_Setup(
RtemsSemValPerf_Context *ctx
)
{
ObtainMutex( ctx->mutex_id );
Send( ctx, EVENT_OBTAIN );
SetPriority( ctx->worker_id, PRIO_LOW );
Send( ctx, EVENT_RELEASE );
}
static void RtemsSemReqPerfMtxPiReleaseOne_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiReleaseOne_Setup( ctx );
}
/**
* @brief Release the mutex.
*/
static void RtemsSemReqPerfMtxPiReleaseOne_Body( RtemsSemValPerf_Context *ctx )
{
ctx->status = rtems_semaphore_release( ctx->mutex_id );
}
static void RtemsSemReqPerfMtxPiReleaseOne_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiReleaseOne_Body( ctx );
}
/**
* @brief Restore the worker priority. Discard samples interrupted by a clock
* tick.
*/
static bool RtemsSemReqPerfMtxPiReleaseOne_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
SetPriority( ctx->worker_id, PRIO_HIGH );
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiReleaseOne_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiReleaseOne_Teardown(
ctx,
delta,
tic,
toc,
retry
);
}
#if defined(RTEMS_SMP)
/**
* @brief Move worker to scheduler B.
*/
static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Prepare(
RtemsSemValPerf_Context *ctx
)
{
SetScheduler( ctx->worker_id, SCHEDULER_B_ID, PRIO_NORMAL );
}
/**
* @brief Let one task wait on the mutex.
*/
static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup(
RtemsSemValPerf_Context *ctx
)
{
ObtainMutex( ctx->mutex_id );
Send( ctx, EVENT_OBTAIN | EVENT_OBTAIN_END | EVENT_RELEASE );
WaitForNextTask( 1, ctx->worker_id );
}
static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup( ctx );
}
/**
* @brief Release the mutex.
*/
static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Body(
RtemsSemValPerf_Context *ctx
)
{
ctx->begin = T_tick();
ctx->status = rtems_semaphore_release( ctx->mutex_id );
}
static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiReleaseOtherCpu_Body( ctx );
}
/**
* @brief Make sure the worker waits for the next event. Set the measured
* runtime. Discard samples interrupted by a clock tick.
*/
static bool RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
WaitForNextTask( 1, ctx->worker_id );
*delta = ctx->end - ctx->begin;
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown(
ctx,
delta,
tic,
toc,
retry
);
}
/**
* @brief Move worker to scheduler A.
*/
static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Cleanup(
RtemsSemValPerf_Context *ctx
)
{
SetScheduler( ctx->worker_id, SCHEDULER_A_ID, PRIO_HIGH );
}
#endif
/**
* @brief Let one task wait on the mutex.
*/
static void RtemsSemReqPerfMtxPiReleasePreempt_Setup(
RtemsSemValPerf_Context *ctx
)
{
ObtainMutex( ctx->mutex_id );
Send( ctx, EVENT_OBTAIN | EVENT_OBTAIN_END | EVENT_RELEASE );
}
static void RtemsSemReqPerfMtxPiReleasePreempt_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiReleasePreempt_Setup( ctx );
}
/**
* @brief Release the mutex.
*/
static void RtemsSemReqPerfMtxPiReleasePreempt_Body(
RtemsSemValPerf_Context *ctx
)
{
ctx->begin = T_tick();
ctx->status = rtems_semaphore_release( ctx->mutex_id );
}
static void RtemsSemReqPerfMtxPiReleasePreempt_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiReleasePreempt_Body( ctx );
}
/**
* @brief Set the measured runtime. Discard samples interrupted by a clock
* tick.
*/
static bool RtemsSemReqPerfMtxPiReleasePreempt_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
*delta = ctx->end - ctx->begin;
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiReleasePreempt_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiReleasePreempt_Teardown(
ctx,
delta,
tic,
toc,
retry
);
}
/**
* @brief Make the mutex unavailable.
*/
static void RtemsSemReqPerfMtxPiTry_Prepare( RtemsSemValPerf_Context *ctx )
{
Send( ctx, EVENT_OBTAIN );
}
/**
* @brief Try to obtain the unavailable mutex.
*/
static void RtemsSemReqPerfMtxPiTry_Body( RtemsSemValPerf_Context *ctx )
{
ctx->status = rtems_semaphore_obtain( ctx->mutex_id, RTEMS_NO_WAIT, 0 );
}
static void RtemsSemReqPerfMtxPiTry_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiTry_Body( ctx );
}
/**
* @brief Discard samples interrupted by a clock tick.
*/
static bool RtemsSemReqPerfMtxPiTry_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc( ctx->status, RTEMS_UNSATISFIED );
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiTry_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiTry_Teardown( ctx, delta, tic, toc, retry );
}
/**
* @brief Make the mutex available.
*/
static void RtemsSemReqPerfMtxPiTry_Cleanup( RtemsSemValPerf_Context *ctx )
{
Send( ctx, EVENT_RELEASE );
}
/**
* @brief Make the mutex unavailable.
*/
static void RtemsSemReqPerfMtxPiWaitForever_Setup(
RtemsSemValPerf_Context *ctx
)
{
Send( ctx, EVENT_OBTAIN );
SetPriority( ctx->worker_id, PRIO_LOW );
Send( ctx, EVENT_END | EVENT_RELEASE );
}
static void RtemsSemReqPerfMtxPiWaitForever_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiWaitForever_Setup( ctx );
}
/**
* @brief Obtain the unavailable mutex and wait forever.
*/
static void RtemsSemReqPerfMtxPiWaitForever_Body(
RtemsSemValPerf_Context *ctx
)
{
ctx->begin = T_tick();
ctx->status = rtems_semaphore_obtain(
ctx->mutex_id,
RTEMS_WAIT,
RTEMS_NO_TIMEOUT
);
}
static void RtemsSemReqPerfMtxPiWaitForever_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiWaitForever_Body( ctx );
}
/**
* @brief Set the measured runtime. Restore the worker priority. Release the
* mutex. Discard samples interrupted by a clock tick.
*/
static bool RtemsSemReqPerfMtxPiWaitForever_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
*delta = ctx->end - ctx->begin;
SetPriority( ctx->worker_id, PRIO_HIGH );
ReleaseMutex( ctx->mutex_id );
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiWaitForever_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiWaitForever_Teardown(
ctx,
delta,
tic,
toc,
retry
);
}
/**
* @brief Make the mutex unavailable.
*/
static void RtemsSemReqPerfMtxPiWaitTimed_Setup( RtemsSemValPerf_Context *ctx )
{
Send( ctx, EVENT_OBTAIN );
SetPriority( ctx->worker_id, PRIO_LOW );
Send( ctx, EVENT_END | EVENT_RELEASE );
}
static void RtemsSemReqPerfMtxPiWaitTimed_Setup_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiWaitTimed_Setup( ctx );
}
/**
* @brief Obtain the unavailable mutex and wait forever.
*/
static void RtemsSemReqPerfMtxPiWaitTimed_Body( RtemsSemValPerf_Context *ctx )
{
ctx->begin = T_tick();
ctx->status = rtems_semaphore_obtain(
ctx->mutex_id,
RTEMS_WAIT,
UINT32_MAX
);
}
static void RtemsSemReqPerfMtxPiWaitTimed_Body_Wrap( void *arg )
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
RtemsSemReqPerfMtxPiWaitTimed_Body( ctx );
}
/**
* @brief Set the measured runtime. Restore the worker priority. Release the
* mutex. Discard samples interrupted by a clock tick.
*/
static bool RtemsSemReqPerfMtxPiWaitTimed_Teardown(
RtemsSemValPerf_Context *ctx,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
*delta = ctx->end - ctx->begin;
SetPriority( ctx->worker_id, PRIO_HIGH );
ReleaseMutex( ctx->mutex_id );
return tic == toc;
}
static bool RtemsSemReqPerfMtxPiWaitTimed_Teardown_Wrap(
void *arg,
T_ticks *delta,
uint32_t tic,
uint32_t toc,
unsigned int retry
)
{
RtemsSemValPerf_Context *ctx;
ctx = arg;
return RtemsSemReqPerfMtxPiWaitTimed_Teardown( ctx, delta, tic, toc, retry );
}
/**
* @fn void T_case_body_RtemsSemValPerf( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemValPerf, &RtemsSemValPerf_Fixture )
{
RtemsSemValPerf_Context *ctx;
ctx = T_fixture_context();
ctx->request.name = "RtemsSemReqPerfMtxPiObtain";
ctx->request.setup = NULL;
ctx->request.body = RtemsSemReqPerfMtxPiObtain_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiObtain_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
ctx->request.name = "RtemsSemReqPerfMtxPiRelease";
ctx->request.setup = RtemsSemReqPerfMtxPiRelease_Setup_Wrap;
ctx->request.body = RtemsSemReqPerfMtxPiRelease_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiRelease_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
ctx->request.name = "RtemsSemReqPerfMtxPiReleaseOne";
ctx->request.setup = RtemsSemReqPerfMtxPiReleaseOne_Setup_Wrap;
ctx->request.body = RtemsSemReqPerfMtxPiReleaseOne_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiReleaseOne_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
#if defined(RTEMS_SMP)
RtemsSemReqPerfMtxPiReleaseOtherCpu_Prepare( ctx );
ctx->request.name = "RtemsSemReqPerfMtxPiReleaseOtherCpu";
ctx->request.setup = RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup_Wrap;
ctx->request.body = RtemsSemReqPerfMtxPiReleaseOtherCpu_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
RtemsSemReqPerfMtxPiReleaseOtherCpu_Cleanup( ctx );
#endif
ctx->request.name = "RtemsSemReqPerfMtxPiReleasePreempt";
ctx->request.setup = RtemsSemReqPerfMtxPiReleasePreempt_Setup_Wrap;
ctx->request.body = RtemsSemReqPerfMtxPiReleasePreempt_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiReleasePreempt_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
RtemsSemReqPerfMtxPiTry_Prepare( ctx );
ctx->request.name = "RtemsSemReqPerfMtxPiTry";
ctx->request.setup = NULL;
ctx->request.body = RtemsSemReqPerfMtxPiTry_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiTry_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
RtemsSemReqPerfMtxPiTry_Cleanup( ctx );
ctx->request.name = "RtemsSemReqPerfMtxPiWaitForever";
ctx->request.setup = RtemsSemReqPerfMtxPiWaitForever_Setup_Wrap;
ctx->request.body = RtemsSemReqPerfMtxPiWaitForever_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiWaitForever_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
ctx->request.name = "RtemsSemReqPerfMtxPiWaitTimed";
ctx->request.setup = RtemsSemReqPerfMtxPiWaitTimed_Setup_Wrap;
ctx->request.body = RtemsSemReqPerfMtxPiWaitTimed_Body_Wrap;
ctx->request.teardown = RtemsSemReqPerfMtxPiWaitTimed_Teardown_Wrap;
T_measure_runtime( ctx->context, &ctx->request );
}
/** @} */

View File

@@ -0,0 +1,613 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemReqRelease
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <string.h>
#include "tr-mtx-surrender.h"
#include "tr-sem-surrender.h"
#include "tx-support.h"
#include "tx-thread-queue.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemReqRelease spec:/rtems/sem/req/release
*
* @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
*
* @{
*/
typedef enum {
RtemsSemReqRelease_Pre_Class_Counting,
RtemsSemReqRelease_Pre_Class_Simple,
RtemsSemReqRelease_Pre_Class_Binary,
RtemsSemReqRelease_Pre_Class_PrioCeiling,
RtemsSemReqRelease_Pre_Class_PrioInherit,
RtemsSemReqRelease_Pre_Class_MrsP,
RtemsSemReqRelease_Pre_Class_NA
} RtemsSemReqRelease_Pre_Class;
typedef enum {
RtemsSemReqRelease_Pre_Discipline_FIFO,
RtemsSemReqRelease_Pre_Discipline_Priority,
RtemsSemReqRelease_Pre_Discipline_NA
} RtemsSemReqRelease_Pre_Discipline;
typedef enum {
RtemsSemReqRelease_Pre_Id_Valid,
RtemsSemReqRelease_Pre_Id_Invalid,
RtemsSemReqRelease_Pre_Id_NA
} RtemsSemReqRelease_Pre_Id;
typedef enum {
RtemsSemReqRelease_Post_Action_InvId,
RtemsSemReqRelease_Post_Action_BinarySurrender,
RtemsSemReqRelease_Post_Action_CountingSurrender,
RtemsSemReqRelease_Post_Action_MtxSurrender,
RtemsSemReqRelease_Post_Action_InheritMtxSurrender,
RtemsSemReqRelease_Post_Action_CeilingMtxSurrender,
RtemsSemReqRelease_Post_Action_MrsPMtxSurrender,
RtemsSemReqRelease_Post_Action_NA
} RtemsSemReqRelease_Post_Action;
typedef struct {
uint8_t Skip : 1;
uint8_t Pre_Class_NA : 1;
uint8_t Pre_Discipline_NA : 1;
uint8_t Pre_Id_NA : 1;
uint8_t Post_Action : 3;
} RtemsSemReqRelease_Entry;
/**
* @brief Test context for spec:/rtems/sem/req/release test case.
*/
typedef struct {
/**
* @brief This member contains the thread queue test context.
*/
union {
TQContext tq_ctx;
TQMtxContext tq_mtx_ctx;
TQSemContext tq_sem_ctx;
};
/**
* @brief This member specifies if the attribute set of the semaphore.
*/
rtems_attribute attribute_set;
struct {
/**
* @brief This member defines the pre-condition states for the next action.
*/
size_t pcs[ 3 ];
/**
* @brief If this member is true, then the test action loop is executed.
*/
bool in_action_loop;
/**
* @brief This member contains the next transition map index.
*/
size_t index;
/**
* @brief This member contains the current transition map entry.
*/
RtemsSemReqRelease_Entry entry;
/**
* @brief If this member is true, then the current transition variant
* should be skipped.
*/
bool skip;
} Map;
} RtemsSemReqRelease_Context;
static RtemsSemReqRelease_Context
RtemsSemReqRelease_Instance;
static const char * const RtemsSemReqRelease_PreDesc_Class[] = {
"Counting",
"Simple",
"Binary",
"PrioCeiling",
"PrioInherit",
"MrsP",
"NA"
};
static const char * const RtemsSemReqRelease_PreDesc_Discipline[] = {
"FIFO",
"Priority",
"NA"
};
static const char * const RtemsSemReqRelease_PreDesc_Id[] = {
"Valid",
"Invalid",
"NA"
};
static const char * const * const RtemsSemReqRelease_PreDesc[] = {
RtemsSemReqRelease_PreDesc_Class,
RtemsSemReqRelease_PreDesc_Discipline,
RtemsSemReqRelease_PreDesc_Id,
NULL
};
#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
typedef RtemsSemReqRelease_Context Context;
static void RtemsSemReqRelease_Pre_Class_Prepare(
RtemsSemReqRelease_Context *ctx,
RtemsSemReqRelease_Pre_Class state
)
{
switch ( state ) {
case RtemsSemReqRelease_Pre_Class_Counting: {
/*
* While the semaphore object is a counting semaphore.
*/
ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
break;
}
case RtemsSemReqRelease_Pre_Class_Simple: {
/*
* While the semaphore object is a simple binary semaphore.
*/
ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
break;
}
case RtemsSemReqRelease_Pre_Class_Binary: {
/*
* While the semaphore object is a binary semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
break;
}
case RtemsSemReqRelease_Pre_Class_PrioCeiling: {
/*
* While the semaphore object is a priority ceiling semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
break;
}
case RtemsSemReqRelease_Pre_Class_PrioInherit: {
/*
* While the semaphore object is a priority inheritance semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
break;
}
case RtemsSemReqRelease_Pre_Class_MrsP: {
/*
* While the semaphore object is a MrsP semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
break;
}
case RtemsSemReqRelease_Pre_Class_NA:
break;
}
}
static void RtemsSemReqRelease_Pre_Discipline_Prepare(
RtemsSemReqRelease_Context *ctx,
RtemsSemReqRelease_Pre_Discipline state
)
{
switch ( state ) {
case RtemsSemReqRelease_Pre_Discipline_FIFO: {
/*
* While the semaphore uses the FIFO task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_FIFO;
ctx->tq_ctx.discipline = TQ_FIFO;
break;
}
case RtemsSemReqRelease_Pre_Discipline_Priority: {
/*
* While the semaphore uses the priority task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_PRIORITY;
ctx->tq_ctx.discipline = TQ_PRIORITY;
break;
}
case RtemsSemReqRelease_Pre_Discipline_NA:
break;
}
}
static void RtemsSemReqRelease_Pre_Id_Prepare(
RtemsSemReqRelease_Context *ctx,
RtemsSemReqRelease_Pre_Id state
)
{
switch ( state ) {
case RtemsSemReqRelease_Pre_Id_Valid: {
/*
* While the ``id`` parameter is associated with the semaphore.
*/
/* Nothing to prepare */
break;
}
case RtemsSemReqRelease_Pre_Id_Invalid: {
/*
* While the ``id`` parameter is not associated with a semaphore.
*/
/* Nothing to prepare */
break;
}
case RtemsSemReqRelease_Pre_Id_NA:
break;
}
}
static void RtemsSemReqRelease_Post_Action_Check(
RtemsSemReqRelease_Context *ctx,
RtemsSemReqRelease_Post_Action state
)
{
rtems_status_code sc;
switch ( state ) {
case RtemsSemReqRelease_Post_Action_InvId: {
/*
* The return status of rtems_semaphore_release() shall be
* RTEMS_INVALID_ID.
*/
sc = rtems_semaphore_release( 0xffffffff );
T_rsc( sc, RTEMS_INVALID_ID );
break;
}
case RtemsSemReqRelease_Post_Action_BinarySurrender: {
/*
* The calling task shall surrender the binary semaphore as specified by
* /score/sem/req/surrender.
*/
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_ctx.get_owner = NULL;
ctx->tq_sem_ctx.variant = TQ_SEM_BINARY;
ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
ScoreSemReqSurrender_Run( &ctx->tq_sem_ctx );
break;
}
case RtemsSemReqRelease_Post_Action_CountingSurrender: {
/*
* The calling task shall surrender the counting semaphore as specified
* by /score/sem/req/surrender.
*/
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_ctx.get_owner = NULL;
ctx->tq_sem_ctx.variant = TQ_SEM_COUNTING;
ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
ScoreSemReqSurrender_Run( &ctx->tq_sem_ctx );
break;
}
case RtemsSemReqRelease_Post_Action_MtxSurrender: {
/*
* The calling task shall surrender the mutex as specified by
* /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
* allowed, the owner is checked, and no locking protocol is used.
*/
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqRelease_Post_Action_InheritMtxSurrender: {
/*
* The calling task shall surrender the mutex as specified by
* /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
* allowed, the owner is checked, and a priority inheritance protocol is
* used.
*/
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_INHERIT;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqRelease_Post_Action_CeilingMtxSurrender: {
/*
* The calling task shall surrender the mutex as specified by
* /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
* allowed, the owner is checked, and a priority ceiling is used.
*/
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqRelease_Post_Action_MrsPMtxSurrender: {
/*
* The calling task shall surrender the mutex as specified by
* /score/mtx/req/surrender where an enqueue is sticky, a recursive seize
* returns an error status, the owner is checked, and a priority ceiling
* is used.
*/
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_STICKY;
ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
case RtemsSemReqRelease_Post_Action_NA:
break;
}
}
static void RtemsSemReqRelease_Setup( RtemsSemReqRelease_Context *ctx )
{
SetSelfPriority( PRIO_NORMAL );
memset( ctx, 0, sizeof( *ctx ) );
ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
ctx->tq_ctx.surrender = TQSurrenderClassicSem;
ctx->tq_ctx.convert_status = TQConvertStatusClassic;
TQInitialize( &ctx->tq_ctx );
}
static void RtemsSemReqRelease_Setup_Wrap( void *arg )
{
RtemsSemReqRelease_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqRelease_Setup( ctx );
}
static void RtemsSemReqRelease_Teardown( RtemsSemReqRelease_Context *ctx )
{
TQDestroy( &ctx->tq_ctx );
RestoreRunnerPriority();
}
static void RtemsSemReqRelease_Teardown_Wrap( void *arg )
{
RtemsSemReqRelease_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqRelease_Teardown( ctx );
}
static void RtemsSemReqRelease_Prepare( RtemsSemReqRelease_Context *ctx )
{
ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
}
static void RtemsSemReqRelease_Action( RtemsSemReqRelease_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_create(
NAME,
1,
ctx->attribute_set,
PRIO_VERY_HIGH,
&ctx->tq_ctx.thread_queue_id
);
T_rsc_success( sc );
if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
rtems_task_priority prio;
sc = rtems_semaphore_set_priority(
ctx->tq_ctx.thread_queue_id,
SCHEDULER_B_ID,
PRIO_VERY_HIGH,
&prio
);
T_rsc_success( sc );
}
}
static void RtemsSemReqRelease_Cleanup( RtemsSemReqRelease_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
}
static const RtemsSemReqRelease_Entry
RtemsSemReqRelease_Entries[] = {
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InvId },
{ 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA },
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_CountingSurrender },
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_BinarySurrender },
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_MtxSurrender },
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_CeilingMtxSurrender },
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InheritMtxSurrender },
#if defined(RTEMS_SMP)
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_MrsPMtxSurrender },
#else
{ 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA },
#endif
#if defined(RTEMS_SMP)
{ 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InvId }
#else
{ 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA }
#endif
};
static const uint8_t
RtemsSemReqRelease_Map[] = {
2, 0, 2, 0, 3, 0, 3, 0, 4, 0, 4, 0, 1, 1, 5, 0, 1, 1, 6, 0, 1, 1, 7, 8
};
static size_t RtemsSemReqRelease_Scope( void *arg, char *buf, size_t n )
{
RtemsSemReqRelease_Context *ctx;
ctx = arg;
if ( ctx->Map.in_action_loop ) {
return T_get_scope( RtemsSemReqRelease_PreDesc, buf, n, ctx->Map.pcs );
}
return 0;
}
static T_fixture RtemsSemReqRelease_Fixture = {
.setup = RtemsSemReqRelease_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemReqRelease_Teardown_Wrap,
.scope = RtemsSemReqRelease_Scope,
.initial_context = &RtemsSemReqRelease_Instance
};
static inline RtemsSemReqRelease_Entry RtemsSemReqRelease_PopEntry(
RtemsSemReqRelease_Context *ctx
)
{
size_t index;
index = ctx->Map.index;
ctx->Map.index = index + 1;
return RtemsSemReqRelease_Entries[
RtemsSemReqRelease_Map[ index ]
];
}
static void RtemsSemReqRelease_TestVariant( RtemsSemReqRelease_Context *ctx )
{
RtemsSemReqRelease_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
RtemsSemReqRelease_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
RtemsSemReqRelease_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
RtemsSemReqRelease_Action( ctx );
RtemsSemReqRelease_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
}
/**
* @fn void T_case_body_RtemsSemReqRelease( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemReqRelease, &RtemsSemReqRelease_Fixture )
{
RtemsSemReqRelease_Context *ctx;
ctx = T_fixture_context();
ctx->Map.in_action_loop = true;
ctx->Map.index = 0;
for (
ctx->Map.pcs[ 0 ] = RtemsSemReqRelease_Pre_Class_Counting;
ctx->Map.pcs[ 0 ] < RtemsSemReqRelease_Pre_Class_NA;
++ctx->Map.pcs[ 0 ]
) {
for (
ctx->Map.pcs[ 1 ] = RtemsSemReqRelease_Pre_Discipline_FIFO;
ctx->Map.pcs[ 1 ] < RtemsSemReqRelease_Pre_Discipline_NA;
++ctx->Map.pcs[ 1 ]
) {
for (
ctx->Map.pcs[ 2 ] = RtemsSemReqRelease_Pre_Id_Valid;
ctx->Map.pcs[ 2 ] < RtemsSemReqRelease_Pre_Id_NA;
++ctx->Map.pcs[ 2 ]
) {
ctx->Map.entry = RtemsSemReqRelease_PopEntry( ctx );
if ( ctx->Map.entry.Skip ) {
continue;
}
RtemsSemReqRelease_Prepare( ctx );
RtemsSemReqRelease_TestVariant( ctx );
RtemsSemReqRelease_Cleanup( ctx );
}
}
}
}
/** @} */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,478 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemValSmp
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <rtems/score/threaddispatch.h>
#include "ts-config.h"
#include "tx-support.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemValSmp spec:/rtems/sem/val/smp
*
* @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
*
* @brief Tests SMP-specific semaphore behaviour.
*
* This test case performs the following actions:
*
* - Create a worker thread and a MrsP mutex. Use the mutex and the worker to
* perform a bad sticky thread queue enqueue.
*
* - Create two worker threads, a MrsP mutex, and a priority inheritance mutex.
* Use the mutexes and the workers to raise the current priority to a higher
* priority than the ceiling priority of the mutex while one of the workers
* waits on the mutex.
*
* - Let the first worker try to obtain the MrsP mutex. Check that it
* acquired the ceiling priority.
*
* - Let the second worker try to obtain the priority inheritance mutex.
* Check that the first worker inherited the priority from the second
* worker.
*
* - Set the real priority of the first worker. Check that it defines the
* current priority.
*
* - Release the MrsP mutex so that the first worker can to obtain it. It
* will replace a temporary priority node which is the maximum priority
* node. This is the first scenario we want to test.
*
* - Obtain the MrsP mutex for the runner thread to start the second scenario
* we would like to test.
*
* - Let the first worker try to obtain the MrsP mutex. Check that it
* acquired the ceiling priority.
*
* - Let the second worker try to obtain the priority inheritance mutex.
* Check that the first worker inherited the priority from the second
* worker.
*
* - Lower the priority of the second worker. Check that the inherited
* priority of the first worker reflects this priority change.
*
* - Change the real priority of the first worker so that it defines its
* current priority.
*
* - Release the MrsP mutex so that the first worker can to obtain it. It
* will replace a temporary priority node which is between the minimum and
* maximum priority node. This is the second scenario we want to test.
*
* - Clean up all used resources.
*
* @{
*/
/**
* @brief Test context for spec:/rtems/sem/val/smp test case.
*/
typedef struct {
/**
* @brief This member contains the mutex identifier.
*/
rtems_id mutex_id;;
/**
* @brief This member contains the second mutex identifier.
*/
rtems_id mutex_2_id;;
/**
* @brief If this member is true, then the worker is done.
*/
volatile bool done;
/**
* @brief If this member is true, then the second worker is done.
*/
volatile bool done_2;
} RtemsSemValSmp_Context;
static RtemsSemValSmp_Context
RtemsSemValSmp_Instance;
typedef RtemsSemValSmp_Context Context;
static void BadEnqueueFatal(
rtems_fatal_source source,
rtems_fatal_code code,
void *arg
)
{
Per_CPU_Control *cpu_self;
Context *ctx;
T_eq_int( source, INTERNAL_ERROR_CORE );
T_eq_ulong(
code,
INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
);
SetFatalHandler( NULL, NULL );
cpu_self = _Per_CPU_Get();
_Thread_Dispatch_unnest( cpu_self );
_Thread_Dispatch_unnest( cpu_self );
ctx = arg;
ctx->done = true;
SuspendSelf();
}
static void BadEnqueueTask( rtems_task_argument arg )
{
Context *ctx;
ctx = (Context *) arg;
(void) _Thread_Dispatch_disable();
ObtainMutex( ctx->mutex_id );
}
static void ObtainReleaseMrsPTask( rtems_task_argument arg )
{
Context *ctx;
ctx = (Context *) arg;
ObtainMutex( ctx->mutex_2_id );
ctx->done = true;
ObtainMutex( ctx->mutex_id );
ReleaseMutex( ctx->mutex_id );
ReleaseMutex( ctx->mutex_2_id );
ctx->done = true;
SuspendSelf();
}
static void ObtainRelease2Task( rtems_task_argument arg )
{
Context *ctx;
ctx = (Context *) arg;
ctx->done_2 = true;
ObtainMutex( ctx->mutex_2_id );
ReleaseMutex( ctx->mutex_2_id );
ctx->done_2 = true;
SuspendSelf();
}
static void RtemsSemValSmp_Setup( RtemsSemValSmp_Context *ctx )
{
SetSelfPriority( PRIO_NORMAL );
}
static void RtemsSemValSmp_Setup_Wrap( void *arg )
{
RtemsSemValSmp_Context *ctx;
ctx = arg;
RtemsSemValSmp_Setup( ctx );
}
static void RtemsSemValSmp_Teardown( RtemsSemValSmp_Context *ctx )
{
RestoreRunnerPriority();
}
static void RtemsSemValSmp_Teardown_Wrap( void *arg )
{
RtemsSemValSmp_Context *ctx;
ctx = arg;
RtemsSemValSmp_Teardown( ctx );
}
static T_fixture RtemsSemValSmp_Fixture = {
.setup = RtemsSemValSmp_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemValSmp_Teardown_Wrap,
.scope = NULL,
.initial_context = &RtemsSemValSmp_Instance
};
/**
* @brief Create a worker thread and a MrsP mutex. Use the mutex and the
* worker to perform a bad sticky thread queue enqueue.
*/
static void RtemsSemValSmp_Action_0( RtemsSemValSmp_Context *ctx )
{
rtems_status_code sc;
rtems_id worker_id;
rtems_id scheduler_b_id;
ctx->done = false;
sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &scheduler_b_id );
T_rsc_success( sc );
sc = rtems_semaphore_create(
rtems_build_name( 'M', 'U', 'T', 'X' ),
1,
RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
PRIO_HIGH,
&ctx->mutex_id
);
T_rsc_success( sc );
worker_id = CreateTask( "WORK", PRIO_NORMAL );
SetScheduler( worker_id, scheduler_b_id, PRIO_NORMAL );
ObtainMutex( ctx->mutex_id );
SetFatalHandler( BadEnqueueFatal, ctx );
StartTask( worker_id, BadEnqueueTask, ctx );
while ( !ctx->done ) {
/* Wait */
}
DeleteTask( worker_id );
ReleaseMutex( ctx->mutex_id );
DeleteMutex( ctx->mutex_id );
}
/**
* @brief Create two worker threads, a MrsP mutex, and a priority inheritance
* mutex. Use the mutexes and the workers to raise the current priority to a
* higher priority than the ceiling priority of the mutex while one of the
* workers waits on the mutex.
*/
static void RtemsSemValSmp_Action_1( RtemsSemValSmp_Context *ctx )
{
rtems_status_code sc;
rtems_id worker_id;
rtems_id worker_2_id;
rtems_id scheduler_b_id;
rtems_task_priority prio;
sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &scheduler_b_id );
T_rsc_success( sc );
sc = rtems_semaphore_create(
rtems_build_name( 'M', 'U', 'T', 'X' ),
1,
RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
PRIO_HIGH,
&ctx->mutex_id
);
T_rsc_success( sc );
sc = rtems_semaphore_set_priority(
ctx->mutex_id,
scheduler_b_id,
PRIO_HIGH,
&prio
);
T_rsc_success( sc );
ctx->mutex_2_id = CreateMutex();
worker_id = CreateTask( "WORK", PRIO_NORMAL );
SetScheduler( worker_id, scheduler_b_id, PRIO_NORMAL );
worker_2_id = CreateTask( "WRK2", PRIO_NORMAL );
SetScheduler( worker_2_id, scheduler_b_id, PRIO_VERY_HIGH );
/*
* Let the first worker try to obtain the MrsP mutex. Check that it acquired
* the ceiling priority.
*/
ObtainMutex( ctx->mutex_id );
ctx->done = false;
StartTask( worker_id, ObtainReleaseMrsPTask, ctx );
while ( !ctx->done ) {
/* Wait */
}
ctx->done = false;
WaitForIntendToBlock( worker_id );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_HIGH );
/*
* Let the second worker try to obtain the priority inheritance mutex. Check
* that the first worker inherited the priority from the second worker.
*/
ctx->done_2 = false;
StartTask( worker_2_id, ObtainRelease2Task, ctx );
while ( !ctx->done_2 ) {
/* Wait */
}
ctx->done_2 = false;
WaitForExecutionStop( worker_2_id );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_VERY_HIGH );
/*
* Set the real priority of the first worker. Check that it defines the
* current priority.
*/
SetPriority( worker_id, PRIO_ULTRA_HIGH );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_ULTRA_HIGH );
/*
* Release the MrsP mutex so that the first worker can to obtain it. It will
* replace a temporary priority node which is the maximum priority node.
* This is the first scenario we want to test.
*/
ReleaseMutex( ctx->mutex_id );
while ( !ctx->done || !ctx->done_2 ) {
/* Wait */
}
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_ULTRA_HIGH );
/*
* Obtain the MrsP mutex for the runner thread to start the second scenario
* we would like to test.
*/
ObtainMutex( ctx->mutex_id );
/*
* Let the first worker try to obtain the MrsP mutex. Check that it acquired
* the ceiling priority.
*/
ctx->done = false;
sc = rtems_task_restart( worker_id, (rtems_task_argument) ctx );
T_rsc_success( sc );
while ( !ctx->done ) {
/* Wait */
}
ctx->done = false;
WaitForIntendToBlock( worker_id );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_HIGH );
/*
* Let the second worker try to obtain the priority inheritance mutex. Check
* that the first worker inherited the priority from the second worker.
*/
ctx->done_2 = false;
sc = rtems_task_restart( worker_2_id, (rtems_task_argument) ctx );
T_rsc_success( sc );
while ( !ctx->done_2 ) {
/* Wait */
}
ctx->done_2 = false;
WaitForExecutionStop( worker_2_id );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_VERY_HIGH );
/*
* Lower the priority of the second worker. Check that the inherited
* priority of the first worker reflects this priority change.
*/
SetPriority( worker_2_id, PRIO_LOW );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_HIGH );
/*
* Change the real priority of the first worker so that it defines its
* current priority.
*/
SetPriority( worker_id, PRIO_ULTRA_HIGH );
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_ULTRA_HIGH );
/*
* Release the MrsP mutex so that the first worker can to obtain it. It will
* replace a temporary priority node which is between the minimum and maximum
* priority node. This is the second scenario we want to test.
*/
ReleaseMutex( ctx->mutex_id );
while ( !ctx->done || !ctx->done_2 ) {
/* Wait */
}
prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
T_eq_u32( prio, PRIO_ULTRA_HIGH );
/*
* Clean up all used resources.
*/
DeleteTask( worker_id );
DeleteTask( worker_2_id );
DeleteMutex( ctx->mutex_id );
DeleteMutex( ctx->mutex_2_id );
}
/**
* @fn void T_case_body_RtemsSemValSmp( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemValSmp, &RtemsSemValSmp_Fixture )
{
RtemsSemValSmp_Context *ctx;
ctx = T_fixture_context();
RtemsSemValSmp_Action_0( ctx );
RtemsSemValSmp_Action_1( ctx );
}
/** @} */

View File

@@ -0,0 +1,461 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemReqTimeout
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <string.h>
#include "tr-tq-timeout-mrsp.h"
#include "tr-tq-timeout-priority-inherit.h"
#include "tr-tq-timeout.h"
#include "tx-support.h"
#include "tx-thread-queue.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemReqTimeout spec:/rtems/sem/req/timeout
*
* @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
*
* @{
*/
typedef enum {
RtemsSemReqTimeout_Pre_Class_Counting,
RtemsSemReqTimeout_Pre_Class_Simple,
RtemsSemReqTimeout_Pre_Class_Binary,
RtemsSemReqTimeout_Pre_Class_PrioCeiling,
RtemsSemReqTimeout_Pre_Class_PrioInherit,
RtemsSemReqTimeout_Pre_Class_MrsP,
RtemsSemReqTimeout_Pre_Class_NA
} RtemsSemReqTimeout_Pre_Class;
typedef enum {
RtemsSemReqTimeout_Pre_Discipline_FIFO,
RtemsSemReqTimeout_Pre_Discipline_Priority,
RtemsSemReqTimeout_Pre_Discipline_NA
} RtemsSemReqTimeout_Pre_Discipline;
typedef enum {
RtemsSemReqTimeout_Post_Action_Timeout,
RtemsSemReqTimeout_Post_Action_TimeoutMrsP,
RtemsSemReqTimeout_Post_Action_TimeoutPriorityInherit,
RtemsSemReqTimeout_Post_Action_NA
} RtemsSemReqTimeout_Post_Action;
typedef struct {
uint8_t Skip : 1;
uint8_t Pre_Class_NA : 1;
uint8_t Pre_Discipline_NA : 1;
uint8_t Post_Action : 2;
} RtemsSemReqTimeout_Entry;
/**
* @brief Test context for spec:/rtems/sem/req/timeout test case.
*/
typedef struct {
/**
* @brief This member contains the thread queue test context.
*/
TQContext tq_ctx;;
/**
* @brief This member specifies if the attribute set of the semaphore.
*/
rtems_attribute attribute_set;
struct {
/**
* @brief This member defines the pre-condition states for the next action.
*/
size_t pcs[ 2 ];
/**
* @brief If this member is true, then the test action loop is executed.
*/
bool in_action_loop;
/**
* @brief This member contains the next transition map index.
*/
size_t index;
/**
* @brief This member contains the current transition map entry.
*/
RtemsSemReqTimeout_Entry entry;
/**
* @brief If this member is true, then the current transition variant
* should be skipped.
*/
bool skip;
} Map;
} RtemsSemReqTimeout_Context;
static RtemsSemReqTimeout_Context
RtemsSemReqTimeout_Instance;
static const char * const RtemsSemReqTimeout_PreDesc_Class[] = {
"Counting",
"Simple",
"Binary",
"PrioCeiling",
"PrioInherit",
"MrsP",
"NA"
};
static const char * const RtemsSemReqTimeout_PreDesc_Discipline[] = {
"FIFO",
"Priority",
"NA"
};
static const char * const * const RtemsSemReqTimeout_PreDesc[] = {
RtemsSemReqTimeout_PreDesc_Class,
RtemsSemReqTimeout_PreDesc_Discipline,
NULL
};
static void RtemsSemReqTimeout_Pre_Class_Prepare(
RtemsSemReqTimeout_Context *ctx,
RtemsSemReqTimeout_Pre_Class state
)
{
switch ( state ) {
case RtemsSemReqTimeout_Pre_Class_Counting: {
/*
* While the semaphore object is a counting semaphore.
*/
ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
break;
}
case RtemsSemReqTimeout_Pre_Class_Simple: {
/*
* While the semaphore object is a simple binary semaphore.
*/
ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
break;
}
case RtemsSemReqTimeout_Pre_Class_Binary: {
/*
* While the semaphore object is a binary semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
break;
}
case RtemsSemReqTimeout_Pre_Class_PrioCeiling: {
/*
* While the semaphore object is a priority ceiling semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
break;
}
case RtemsSemReqTimeout_Pre_Class_PrioInherit: {
/*
* While the semaphore object is a priority inheritance semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
break;
}
case RtemsSemReqTimeout_Pre_Class_MrsP: {
/*
* While the semaphore object is a MrsP semaphore.
*/
ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_STICKY;
break;
}
case RtemsSemReqTimeout_Pre_Class_NA:
break;
}
}
static void RtemsSemReqTimeout_Pre_Discipline_Prepare(
RtemsSemReqTimeout_Context *ctx,
RtemsSemReqTimeout_Pre_Discipline state
)
{
switch ( state ) {
case RtemsSemReqTimeout_Pre_Discipline_FIFO: {
/*
* While the semaphore uses the FIFO task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_FIFO;
ctx->tq_ctx.discipline = TQ_FIFO;
break;
}
case RtemsSemReqTimeout_Pre_Discipline_Priority: {
/*
* While the semaphore uses the priority task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_PRIORITY;
ctx->tq_ctx.discipline = TQ_PRIORITY;
break;
}
case RtemsSemReqTimeout_Pre_Discipline_NA:
break;
}
}
static void RtemsSemReqTimeout_Post_Action_Check(
RtemsSemReqTimeout_Context *ctx,
RtemsSemReqTimeout_Post_Action state
)
{
switch ( state ) {
case RtemsSemReqTimeout_Post_Action_Timeout: {
/*
* The semaphore obtain timeout actions shall be done as specified by
* /score/tq/req/timeout.
*/
ctx->tq_ctx.wait = TQ_WAIT_TIMED;
ScoreTqReqTimeout_Run( &ctx->tq_ctx );
break;
}
case RtemsSemReqTimeout_Post_Action_TimeoutMrsP: {
/*
* The semaphore obtain timeout actions shall be done as specified by
* /score/tq/req/timeout-mrsp.
*/
ctx->tq_ctx.wait = TQ_WAIT_TIMED;
ScoreTqReqTimeoutMrsp_Run( &ctx->tq_ctx );
break;
}
case RtemsSemReqTimeout_Post_Action_TimeoutPriorityInherit: {
/*
* The semaphore obtain timeout actions shall be done as specified by
* /score/tq/req/timeout-priority-inherit.
*/
ctx->tq_ctx.wait = TQ_WAIT_FOREVER;
ScoreTqReqTimeoutPriorityInherit_Run( &ctx->tq_ctx );
break;
}
case RtemsSemReqTimeout_Post_Action_NA:
break;
}
}
static void RtemsSemReqTimeout_Setup( RtemsSemReqTimeout_Context *ctx )
{
memset( ctx, 0, sizeof( *ctx ) );
ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
ctx->tq_ctx.surrender = TQSurrenderClassicSem;
ctx->tq_ctx.convert_status = TQConvertStatusClassic;
TQInitialize( &ctx->tq_ctx );
}
static void RtemsSemReqTimeout_Setup_Wrap( void *arg )
{
RtemsSemReqTimeout_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqTimeout_Setup( ctx );
}
static void RtemsSemReqTimeout_Teardown( RtemsSemReqTimeout_Context *ctx )
{
TQDestroy( &ctx->tq_ctx );
}
static void RtemsSemReqTimeout_Teardown_Wrap( void *arg )
{
RtemsSemReqTimeout_Context *ctx;
ctx = arg;
ctx->Map.in_action_loop = false;
RtemsSemReqTimeout_Teardown( ctx );
}
static void RtemsSemReqTimeout_Prepare( RtemsSemReqTimeout_Context *ctx )
{
ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
ctx->tq_ctx.thread_queue_id = 0;
}
static void RtemsSemReqTimeout_Action( RtemsSemReqTimeout_Context *ctx )
{
rtems_status_code sc;
sc = rtems_semaphore_create(
OBJECT_NAME,
1,
ctx->attribute_set,
PRIO_HIGH,
&ctx->tq_ctx.thread_queue_id
);
T_rsc_success( sc );
}
static void RtemsSemReqTimeout_Cleanup( RtemsSemReqTimeout_Context *ctx )
{
if ( ctx->tq_ctx.thread_queue_id != 0 ) { rtems_status_code sc;
sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc ); }
}
static const RtemsSemReqTimeout_Entry
RtemsSemReqTimeout_Entries[] = {
{ 0, 0, 0, RtemsSemReqTimeout_Post_Action_Timeout },
{ 1, 0, 0, RtemsSemReqTimeout_Post_Action_NA },
{ 0, 0, 0, RtemsSemReqTimeout_Post_Action_TimeoutPriorityInherit },
#if !defined(RTEMS_SMP)
{ 1, 0, 0, RtemsSemReqTimeout_Post_Action_NA }
#else
{ 0, 0, 0, RtemsSemReqTimeout_Post_Action_TimeoutMrsP }
#endif
};
static const uint8_t
RtemsSemReqTimeout_Map[] = {
0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 1, 3
};
static size_t RtemsSemReqTimeout_Scope( void *arg, char *buf, size_t n )
{
RtemsSemReqTimeout_Context *ctx;
ctx = arg;
if ( ctx->Map.in_action_loop ) {
return T_get_scope( RtemsSemReqTimeout_PreDesc, buf, n, ctx->Map.pcs );
}
return 0;
}
static T_fixture RtemsSemReqTimeout_Fixture = {
.setup = RtemsSemReqTimeout_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemReqTimeout_Teardown_Wrap,
.scope = RtemsSemReqTimeout_Scope,
.initial_context = &RtemsSemReqTimeout_Instance
};
static inline RtemsSemReqTimeout_Entry RtemsSemReqTimeout_PopEntry(
RtemsSemReqTimeout_Context *ctx
)
{
size_t index;
index = ctx->Map.index;
ctx->Map.index = index + 1;
return RtemsSemReqTimeout_Entries[
RtemsSemReqTimeout_Map[ index ]
];
}
static void RtemsSemReqTimeout_TestVariant( RtemsSemReqTimeout_Context *ctx )
{
RtemsSemReqTimeout_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
RtemsSemReqTimeout_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
RtemsSemReqTimeout_Action( ctx );
RtemsSemReqTimeout_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
}
/**
* @fn void T_case_body_RtemsSemReqTimeout( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemReqTimeout, &RtemsSemReqTimeout_Fixture )
{
RtemsSemReqTimeout_Context *ctx;
ctx = T_fixture_context();
ctx->Map.in_action_loop = true;
ctx->Map.index = 0;
for (
ctx->Map.pcs[ 0 ] = RtemsSemReqTimeout_Pre_Class_Counting;
ctx->Map.pcs[ 0 ] < RtemsSemReqTimeout_Pre_Class_NA;
++ctx->Map.pcs[ 0 ]
) {
for (
ctx->Map.pcs[ 1 ] = RtemsSemReqTimeout_Pre_Discipline_FIFO;
ctx->Map.pcs[ 1 ] < RtemsSemReqTimeout_Pre_Discipline_NA;
++ctx->Map.pcs[ 1 ]
) {
ctx->Map.entry = RtemsSemReqTimeout_PopEntry( ctx );
if ( ctx->Map.entry.Skip ) {
continue;
}
RtemsSemReqTimeout_Prepare( ctx );
RtemsSemReqTimeout_TestVariant( ctx );
RtemsSemReqTimeout_Cleanup( ctx );
}
}
}
/** @} */

View File

@@ -0,0 +1,226 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSTestCaseRtemsSemValUni
*/
/*
* Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file is part of the RTEMS quality process and was automatically
* generated. If you find something that needs to be fixed or
* worded better please post a report or patch to an RTEMS mailing list
* or raise a bug report:
*
* https://www.rtems.org/bugs.html
*
* For information on updating and regenerating please refer to the How-To
* section in the Software Requirements Engineering chapter of the
* RTEMS Software Engineering manual. The manual is provided as a part of
* a release. For development sources please refer to the online
* documentation at:
*
* https://docs.rtems.org
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include "tx-support.h"
#include <rtems/test.h>
/**
* @defgroup RTEMSTestCaseRtemsSemValUni spec:/rtems/sem/val/uni
*
* @ingroup RTEMSTestSuiteTestsuitesValidationOneCpu0
*
* @brief Tests uniprocessor-specific semaphore behaviour.
*
* This test case performs the following actions:
*
* - Create a worker thread and two MrsP mutexes. Obtain the Mrsp mutexes and
* check that a task yield works (owner is not sticky). We need two mutexes
* since the uniprocessor schedulers do not increment the stick level in the
* scheduler unblock operation.
*
* - Yield and let the worker obtain the MrsP mutexes.
*
* - Yield and let the worker release the MrsP mutexes.
*
* - Clean up all used resources.
*
* @{
*/
/**
* @brief Test context for spec:/rtems/sem/val/uni test case.
*/
typedef struct {
/**
* @brief This member contains the mutex identifier.
*/
rtems_id mutex_id;
/**
* @brief This member contains the second mutex identifier.
*/
rtems_id mutex_2_id;
/**
* @brief This member contains a progress counter.
*/
uint32_t counter;
} RtemsSemValUni_Context;
static RtemsSemValUni_Context
RtemsSemValUni_Instance;
typedef RtemsSemValUni_Context Context;
static void ObtainReleaseMrsPTask( rtems_task_argument arg )
{
Context *ctx;
ctx = (Context *) arg;
ObtainMutex( ctx->mutex_id );
ObtainMutex( ctx->mutex_2_id );
ctx->counter = 1;
Yield();
ReleaseMutex( ctx->mutex_2_id );
ReleaseMutex( ctx->mutex_id );
ctx->counter = 2;
(void) ReceiveAnyEvents();
}
static void RtemsSemValUni_Setup( RtemsSemValUni_Context *ctx )
{
SetSelfPriority( PRIO_NORMAL );
}
static void RtemsSemValUni_Setup_Wrap( void *arg )
{
RtemsSemValUni_Context *ctx;
ctx = arg;
RtemsSemValUni_Setup( ctx );
}
static void RtemsSemValUni_Teardown( RtemsSemValUni_Context *ctx )
{
RestoreRunnerPriority();
}
static void RtemsSemValUni_Teardown_Wrap( void *arg )
{
RtemsSemValUni_Context *ctx;
ctx = arg;
RtemsSemValUni_Teardown( ctx );
}
static T_fixture RtemsSemValUni_Fixture = {
.setup = RtemsSemValUni_Setup_Wrap,
.stop = NULL,
.teardown = RtemsSemValUni_Teardown_Wrap,
.scope = NULL,
.initial_context = &RtemsSemValUni_Instance
};
/**
* @brief Create a worker thread and two MrsP mutexes. Obtain the Mrsp mutexes
* and check that a task yield works (owner is not sticky). We need two
* mutexes since the uniprocessor schedulers do not increment the stick level
* in the scheduler unblock operation.
*/
static void RtemsSemValUni_Action_0( RtemsSemValUni_Context *ctx )
{
rtems_status_code sc;
rtems_id worker_id;
sc = rtems_semaphore_create(
rtems_build_name( 'M', 'T', 'X', '1' ),
1,
RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
PRIO_NORMAL,
&ctx->mutex_id
);
T_rsc_success( sc );
sc = rtems_semaphore_create(
rtems_build_name( 'M', 'T', 'X', '2' ),
1,
RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
PRIO_NORMAL,
&ctx->mutex_2_id
);
T_rsc_success( sc );
ctx->counter = 0;
worker_id = CreateTask( "WORK", PRIO_NORMAL );
StartTask( worker_id, ObtainReleaseMrsPTask, ctx );
/*
* Yield and let the worker obtain the MrsP mutexes.
*/
Yield();
T_eq_u32( ctx->counter, 1 );
/*
* Yield and let the worker release the MrsP mutexes.
*/
Yield();
T_eq_u32( ctx->counter, 2 );
/*
* Clean up all used resources.
*/
DeleteTask( worker_id );
DeleteMutex( ctx->mutex_2_id );
DeleteMutex( ctx->mutex_id );
}
/**
* @fn void T_case_body_RtemsSemValUni( void )
*/
T_TEST_CASE_FIXTURE( RtemsSemValUni, &RtemsSemValUni_Fixture )
{
RtemsSemValUni_Context *ctx;
ctx = T_fixture_context();
RtemsSemValUni_Action_0( ctx );
}
/** @} */