smptests: Move SMP broadcast action test case

This commit is contained in:
Sebastian Huber
2019-04-17 11:05:26 +02:00
parent 317997fd1b
commit 9f52acb5e5
2 changed files with 79 additions and 51 deletions

View File

@@ -29,7 +29,6 @@ const char rtems_test_name[] = "SMPCACHE 1";
typedef struct {
SMP_barrier_Control barrier;
uint32_t count[CPU_COUNT];
bool do_longjmp[CPU_COUNT];
jmp_buf instruction_invalidate_return_context[CPU_COUNT];
} test_context;
@@ -43,13 +42,6 @@ static void function_to_flush( void )
/* Does nothing. Used to give a pointer to instruction address space. */
}
static void test_action( void *arg )
{
rtems_test_assert(arg == &ctx);
ctx.count[rtems_scheduler_get_processor()]++;
}
typedef void ( *test_case )( void );
static void test_cache_invalidate_entire_instruction( void )
@@ -80,51 +72,26 @@ static void barrier( SMP_barrier_State *bs )
);
}
static void broadcast_test_init( void )
{
ctx.count[rtems_scheduler_get_processor()] = 0;
}
static void broadcast_test_body( void )
{
_SMP_Multicast_action( NULL, test_action, &ctx );
}
static void broadcast_test_fini( void )
{
rtems_test_assert(
ctx.count[rtems_scheduler_get_processor()]
== rtems_scheduler_get_processor_maximum()
);
}
static test_case test_cases[] = {
test_cache_invalidate_entire_instruction,
test_cache_invalidate_multiple_instruction_lines,
broadcast_test_body
test_cache_invalidate_multiple_instruction_lines
};
static void call_tests( SMP_barrier_State *bs )
{
size_t i;
broadcast_test_init();
for (i = 0; i < RTEMS_ARRAY_SIZE( test_cases ); ++i) {
barrier( bs );
( *test_cases[ i ] )();
barrier( bs );
}
broadcast_test_fini();
}
static void call_tests_isr_disabled( SMP_barrier_State *bs )
{
size_t i;
broadcast_test_init();
for (i = 0; i < RTEMS_ARRAY_SIZE( test_cases ); ++i) {
ISR_Level isr_level;
@@ -134,16 +101,12 @@ static void call_tests_isr_disabled( SMP_barrier_State *bs )
_ISR_Local_enable( isr_level );
barrier( bs );
}
broadcast_test_fini();
}
static void call_tests_with_thread_dispatch_disabled( SMP_barrier_State *bs )
{
size_t i;
broadcast_test_init();
for (i = 0; i < RTEMS_ARRAY_SIZE( test_cases ); ++i) {
Per_CPU_Control *cpu_self;
@@ -153,8 +116,6 @@ static void call_tests_with_thread_dispatch_disabled( SMP_barrier_State *bs )
barrier( bs );
_Thread_Dispatch_enable( cpu_self );
}
broadcast_test_fini();
}
static void cmlog( const char* str )

View File

@@ -48,11 +48,17 @@ static const T_config config = {
};
typedef struct {
Atomic_Uint id[CPU_COUNT];
rtems_test_parallel_context base;
Atomic_Uint id[CPU_COUNT][CPU_COUNT];
} test_context;
static test_context test_instance;
static void clear_ids_by_worker(test_context *ctx, size_t worker_index)
{
memset(&ctx->id[worker_index][0], 0, sizeof(ctx->id[worker_index]));
}
static void multicast_action_irq_disabled(
const Processor_mask *targets,
SMP_Action_handler handler,
@@ -81,16 +87,16 @@ static void multicast_action_dispatch_disabled(
static void action(void *arg)
{
test_context *ctx;
Atomic_Uint *id;
uint32_t self;
unsigned expected;
bool success;
ctx = arg;
id = arg;
self = rtems_scheduler_get_processor();
expected = 0;
success = _Atomic_Compare_exchange_uint(
&ctx->id[self],
&id[self],
&expected,
self + 1,
ATOMIC_ORDER_RELAXED,
@@ -116,17 +122,17 @@ static void test_unicast(
Processor_mask cpus;
uint32_t j;
memset(ctx, 0, sizeof(*ctx));
clear_ids_by_worker(ctx, 0);
_Processor_mask_Zero(&cpus);
_Processor_mask_Set(&cpus, i);
(*multicast_action)(&cpus, action, ctx);
(*multicast_action)(&cpus, action, &ctx->id[0][0]);
for (j = 0; j < n; ++j) {
unsigned id;
++step;
id = _Atomic_Load_uint(&ctx->id[j], ATOMIC_ORDER_RELAXED);
id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
if (j == i) {
T_quiet_eq_uint(j + 1, id);
@@ -155,15 +161,15 @@ static void test_broadcast(
for (i = 0; i < n; ++i) {
uint32_t j;
memset(ctx, 0, sizeof(*ctx));
clear_ids_by_worker(ctx, 0);
(*multicast_action)(NULL, action, ctx);
(*multicast_action)(NULL, action, &ctx->id[0][0]);
for (j = 0; j < n; ++j) {
unsigned id;
++step;
id = _Atomic_Load_uint(&ctx->id[j], ATOMIC_ORDER_RELAXED);
id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
T_quiet_eq_uint(j + 1, id);
}
}
@@ -171,6 +177,65 @@ static void test_broadcast(
T_step_eq_u32(0, step, n * n);
}
static rtems_interval test_duration(void)
{
return rtems_clock_get_ticks_per_second();
}
static rtems_interval test_broadcast_init(
rtems_test_parallel_context *base,
void *arg,
size_t active_workers
)
{
return test_duration();
}
static void test_broadcast_body(
rtems_test_parallel_context *base,
void *arg,
size_t active_workers,
size_t worker_index
)
{
test_context *ctx;
ctx = (test_context *) base;
while (!rtems_test_parallel_stop_job(&ctx->base)) {
clear_ids_by_worker(ctx, worker_index);
_SMP_Multicast_action(NULL, action, &ctx->id[worker_index][0]);
}
}
static void test_broadcast_fini(
rtems_test_parallel_context *base,
void *arg,
size_t active_workers
)
{
/* Do nothing */
}
static const rtems_test_parallel_job test_jobs[] = {
{
.init = test_broadcast_init,
.body = test_broadcast_body,
.fini = test_broadcast_fini,
.cascade = true
}
};
T_TEST_CASE(ParallelBroadcast)
{
rtems_test_parallel(
&test_instance.base,
NULL,
&test_jobs[0],
RTEMS_ARRAY_SIZE(test_jobs)
);
}
static void test_before_multitasking(void)
{
test_context *ctx;
@@ -326,7 +391,9 @@ static void fatal_extension(
#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
#define CONFIGURE_MAXIMUM_TASKS 1
#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
#define CONFIGURE_MAXIMUM_TIMERS 1
#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT