smptests/smpmrsp01: Add and update test cases

This commit is contained in:
Sebastian Huber
2014-06-24 10:00:06 +02:00
parent 5c3d250959
commit 9f228beaa6
2 changed files with 720 additions and 185 deletions

View File

@@ -21,7 +21,9 @@
#include <rtems.h> #include <rtems.h>
#include <rtems/libcsupport.h> #include <rtems/libcsupport.h>
#include <rtems/score/schedulersmpimpl.h>
#include <rtems/score/smpbarrier.h> #include <rtems/score/smpbarrier.h>
#include <rtems/score/smplock.h>
#define TESTS_USE_PRINTK #define TESTS_USE_PRINTK
#include "tmacros.h" #include "tmacros.h"
@@ -32,27 +34,43 @@ const char rtems_test_name[] = "SMPMRSP 1";
#define MRSP_COUNT 32 #define MRSP_COUNT 32
#define SWITCH_EVENT_COUNT 32
typedef struct { typedef struct {
uint32_t sleep; uint32_t sleep;
uint32_t timeout; uint32_t timeout;
uint32_t obtain[MRSP_COUNT]; uint32_t obtain[MRSP_COUNT];
uint32_t cpu[CPU_COUNT];
} counter; } counter;
typedef struct {
uint32_t cpu_index;
const Thread_Control *executing;
const Thread_Control *heir;
const Thread_Control *heir_node;
Priority_Control heir_priority;
} switch_event;
typedef struct { typedef struct {
rtems_id main_task_id; rtems_id main_task_id;
rtems_id migration_task_id;
rtems_id counting_sem_id; rtems_id counting_sem_id;
rtems_id mrsp_ids[MRSP_COUNT]; rtems_id mrsp_ids[MRSP_COUNT];
rtems_id scheduler_ids[CPU_COUNT]; rtems_id scheduler_ids[CPU_COUNT];
rtems_id worker_ids[2 * CPU_COUNT]; rtems_id worker_ids[2 * CPU_COUNT];
rtems_id timer_id;
volatile bool stop_worker[CPU_COUNT]; volatile bool stop_worker[CPU_COUNT];
counter counters[2 * CPU_COUNT]; counter counters[2 * CPU_COUNT];
uint32_t migration_counters[CPU_COUNT];
Thread_Control *worker_task; Thread_Control *worker_task;
SMP_barrier_Control barrier; SMP_barrier_Control barrier;
SMP_lock_Control switch_lock;
size_t switch_index;
switch_event switch_events[32];
} test_context; } test_context;
static test_context test_instance = { static test_context test_instance = {
.barrier = SMP_BARRIER_CONTROL_INITIALIZER .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
.switch_lock = SMP_LOCK_INITIALIZER("test instance switch lock")
}; };
static void barrier(test_context *ctx, SMP_barrier_State *bs) static void barrier(test_context *ctx, SMP_barrier_State *bs)
@@ -60,14 +78,27 @@ static void barrier(test_context *ctx, SMP_barrier_State *bs)
_SMP_barrier_Wait(&ctx->barrier, bs, 2); _SMP_barrier_Wait(&ctx->barrier, bs, 2);
} }
static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio) static rtems_task_priority get_prio(rtems_id task_id)
{ {
rtems_status_code sc; rtems_status_code sc;
rtems_task_priority prio; rtems_task_priority prio;
sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio); sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(prio == expected_prio);
return prio;
}
static void wait_for_prio(rtems_id task_id, rtems_task_priority prio)
{
while (get_prio(task_id) != prio) {
/* Wait */
}
}
static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio)
{
rtems_test_assert(get_prio(task_id) == expected_prio);
} }
static void change_prio(rtems_id task_id, rtems_task_priority prio) static void change_prio(rtems_id task_id, rtems_task_priority prio)
@@ -85,6 +116,78 @@ static void assert_executing_worker(test_context *ctx)
); );
} }
static void switch_extension(Thread_Control *executing, Thread_Control *heir)
{
test_context *ctx = &test_instance;
SMP_lock_Context lock_context;
size_t i;
_SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
i = ctx->switch_index;
if (i < SWITCH_EVENT_COUNT) {
switch_event *e = &ctx->switch_events[i];
Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node(heir);
e->cpu_index = rtems_get_current_processor();
e->executing = executing;
e->heir = heir;
e->heir_node = _Scheduler_Node_get_owner(&node->Base);
e->heir_priority = node->priority;
ctx->switch_index = i + 1;
}
_SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
}
static void reset_switch_events(test_context *ctx)
{
SMP_lock_Context lock_context;
_SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
ctx->switch_index = 0;
_SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
}
static size_t get_switch_events(test_context *ctx)
{
SMP_lock_Context lock_context;
size_t events;
_SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
events = ctx->switch_index;
_SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
return events;
}
static void print_switch_events(test_context *ctx)
{
size_t n = get_switch_events(ctx);
size_t i;
for (i = 0; i < n; ++i) {
switch_event *e = &ctx->switch_events[i];
char ex[5];
char hr[5];
char hn[5];
rtems_object_get_name(e->executing->Object.id, sizeof(ex), &ex[0]);
rtems_object_get_name(e->heir->Object.id, sizeof(hr), &hr[0]);
rtems_object_get_name(e->heir_node->Object.id, sizeof(hn), &hn[0]);
printf(
"[%" PRIu32 "] %4s -> %4s (prio %3" PRIu32 ", node %4s)\n",
e->cpu_index,
&ex[0],
&hr[0],
e->heir_priority,
&hn[0]
);
}
}
static void obtain_and_release_worker(rtems_task_argument arg) static void obtain_and_release_worker(rtems_task_argument arg)
{ {
test_context *ctx = &test_instance; test_context *ctx = &test_instance;
@@ -134,9 +237,8 @@ static void obtain_and_release_worker(rtems_task_argument arg)
rtems_test_assert(0); rtems_test_assert(0);
} }
static void test_mrsp_obtain_and_release(void) static void test_mrsp_obtain_and_release(test_context *ctx)
{ {
test_context *ctx = &test_instance;
rtems_status_code sc; rtems_status_code sc;
rtems_task_priority prio; rtems_task_priority prio;
rtems_id scheduler_id; rtems_id scheduler_id;
@@ -144,6 +246,8 @@ static void test_mrsp_obtain_and_release(void)
puts("test MrsP obtain and release"); puts("test MrsP obtain and release");
change_prio(RTEMS_SELF, 2);
/* Check executing task parameters */ /* Check executing task parameters */
sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id); sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
@@ -151,8 +255,6 @@ static void test_mrsp_obtain_and_release(void)
rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id); rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
assert_prio(RTEMS_SELF, 2);
/* Create a MrsP semaphore object and lock it */ /* Create a MrsP semaphore object and lock it */
sc = rtems_semaphore_create( sc = rtems_semaphore_create(
@@ -408,15 +510,6 @@ static void test_mrsp_unlock_order_error(void)
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
} }
static void deadlock_timer(rtems_id id, void *arg)
{
test_context *ctx = &test_instance;
rtems_status_code sc;
sc = rtems_task_suspend(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void deadlock_worker(rtems_task_argument arg) static void deadlock_worker(rtems_task_argument arg)
{ {
test_context *ctx = &test_instance; test_context *ctx = &test_instance;
@@ -425,9 +518,6 @@ static void deadlock_worker(rtems_task_argument arg)
sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT); sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_timer_fire_after(ctx->timer_id, 2, deadlock_timer, NULL);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT); sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -444,21 +534,14 @@ static void deadlock_worker(rtems_task_argument arg)
rtems_test_assert(0); rtems_test_assert(0);
} }
static void test_mrsp_deadlock_error(void) static void test_mrsp_deadlock_error(test_context *ctx)
{ {
test_context *ctx = &test_instance;
rtems_status_code sc; rtems_status_code sc;
rtems_task_priority prio = 2; rtems_task_priority prio = 2;
puts("test MrsP deadlock error"); puts("test MrsP deadlock error");
assert_prio(RTEMS_SELF, prio); change_prio(RTEMS_SELF, prio);
sc = rtems_timer_create(
rtems_build_name('M', 'R', 'S', 'P'),
&ctx->timer_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_create( sc = rtems_semaphore_create(
rtems_build_name(' ', ' ', ' ', 'A'), rtems_build_name(' ', ' ', ' ', 'A'),
@@ -505,9 +588,6 @@ static void test_mrsp_deadlock_error(void)
sc = rtems_semaphore_release(ctx->mrsp_ids[0]); sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_resume(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT); sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -519,9 +599,6 @@ static void test_mrsp_deadlock_error(void)
sc = rtems_semaphore_delete(ctx->mrsp_ids[1]); sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_timer_delete(ctx->timer_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
} }
static void test_mrsp_multiple_obtain(void) static void test_mrsp_multiple_obtain(void)
@@ -642,6 +719,206 @@ static void test_mrsp_multiple_obtain(void)
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
} }
static void run_task(rtems_task_argument arg)
{
volatile bool *run = (volatile bool *) arg;
while (true) {
*run = true;
}
}
static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
{
rtems_status_code sc;
rtems_id sem_id;
rtems_id run_task_id;
volatile bool run = false;
puts("test MrsP obtain and sleep and release");
change_prio(RTEMS_SELF, 1);
reset_switch_events(ctx);
sc = rtems_task_create(
rtems_build_name(' ', 'R', 'U', 'N'),
2,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&run_task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_create(
rtems_build_name('S', 'E', 'M', 'A'),
1,
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
| RTEMS_BINARY_SEMAPHORE,
1,
&sem_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(!run);
sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(run);
run = false;
sc = rtems_semaphore_obtain(sem_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(!run);
sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(!run);
sc = rtems_semaphore_release(sem_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
print_switch_events(ctx);
sc = rtems_semaphore_delete(sem_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(run_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void help_task(rtems_task_argument arg)
{
test_context *ctx = &test_instance;
rtems_status_code sc;
sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
while (true) {
/* Do nothing */
}
}
static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
{
rtems_status_code sc;
rtems_id help_task_id;
rtems_id run_task_id;
volatile bool run = false;
puts("test MrsP obtain and release with help");
change_prio(RTEMS_SELF, 3);
reset_switch_events(ctx);
sc = rtems_semaphore_create(
rtems_build_name('S', 'E', 'M', 'A'),
1,
RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
| RTEMS_BINARY_SEMAPHORE,
2,
&ctx->mrsp_ids[0]
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
assert_prio(RTEMS_SELF, 2);
sc = rtems_task_create(
rtems_build_name('H', 'E', 'L', 'P'),
3,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&help_task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_scheduler(
help_task_id,
ctx->scheduler_ids[1]
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(help_task_id, help_task, 0);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_create(
rtems_build_name(' ', 'R', 'U', 'N'),
4,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&run_task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
wait_for_prio(help_task_id, 2);
sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(rtems_get_current_processor() == 0);
rtems_test_assert(!run);
change_prio(run_task_id, 1);
rtems_test_assert(rtems_get_current_processor() == 1);
while (!run) {
/* Wait */
}
sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(rtems_get_current_processor() == 1);
change_prio(run_task_id, 4);
rtems_test_assert(rtems_get_current_processor() == 1);
sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(rtems_get_current_processor() == 1);
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
assert_prio(RTEMS_SELF, 3);
wait_for_prio(help_task_id, 3);
print_switch_events(ctx);
sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(help_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(run_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static uint32_t simple_random(uint32_t v) static uint32_t simple_random(uint32_t v)
{ {
v *= 1664525; v *= 1664525;
@@ -673,6 +950,8 @@ static void load_worker(rtems_task_argument index)
sc = rtems_task_wake_after(1); sc = rtems_task_wake_after(1);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
++ctx->counters[index].cpu[rtems_get_current_processor()];
} else { } else {
uint32_t n = (v >> 17) % (i + 1); uint32_t n = (v >> 17) % (i + 1);
uint32_t s; uint32_t s;
@@ -695,6 +974,8 @@ static void load_worker(rtems_task_argument index)
break; break;
} }
++ctx->counters[index].cpu[rtems_get_current_processor()];
v = simple_random(v); v = simple_random(v);
} }
@@ -704,6 +985,8 @@ static void load_worker(rtems_task_argument index)
sc = rtems_semaphore_release(ctx->mrsp_ids[k]); sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
++ctx->counters[index].cpu[rtems_get_current_processor()];
} }
} }
@@ -717,16 +1000,47 @@ static void load_worker(rtems_task_argument index)
rtems_test_assert(0); rtems_test_assert(0);
} }
static void test_mrsp_load(void) static void migration_task(rtems_task_argument arg)
{ {
test_context *ctx = &test_instance; test_context *ctx = &test_instance;
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t v = 0xdeadbeef;
while (true) {
uint32_t cpu_index = (v >> 5) % cpu_count;
sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
++ctx->migration_counters[rtems_get_current_processor()];
v = simple_random(v);
}
}
static void test_mrsp_load(test_context *ctx)
{
rtems_status_code sc; rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count(); uint32_t cpu_count = rtems_get_processor_count();
uint32_t index; uint32_t index;
puts("test MrsP load"); puts("test MrsP load");
assert_prio(RTEMS_SELF, 2); change_prio(RTEMS_SELF, 2);
sc = rtems_task_create(
rtems_build_name('M', 'I', 'G', 'R'),
2,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&ctx->migration_task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(ctx->migration_task_id, migration_task, 0);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_create( sc = rtems_semaphore_create(
rtems_build_name('S', 'Y', 'N', 'C'), rtems_build_name('S', 'Y', 'N', 'C'),
@@ -829,15 +1143,18 @@ static void test_mrsp_load(void)
sc = rtems_semaphore_delete(ctx->counting_sem_id); sc = rtems_semaphore_delete(ctx->counting_sem_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(ctx->migration_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
for (index = 0; index < 2 * cpu_count; ++index) { for (index = 0; index < 2 * cpu_count; ++index) {
uint32_t nest_level; uint32_t nest_level;
uint32_t cpu_index;
printf( printf(
"worker[%" PRIu32 "][%" PRIu32 "]\n" "worker[%" PRIu32 "]\n"
" sleep = %" PRIu32 "\n" " sleep = %" PRIu32 "\n"
" timeout = %" PRIu32 "\n", " timeout = %" PRIu32 "\n",
index / 2, index,
index % 2,
ctx->counters[index].sleep, ctx->counters[index].sleep,
ctx->counters[index].timeout ctx->counters[index].timeout
); );
@@ -849,6 +1166,22 @@ static void test_mrsp_load(void)
ctx->counters[index].obtain[nest_level] ctx->counters[index].obtain[nest_level]
); );
} }
for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
printf(
" cpu[%" PRIu32 "] = %" PRIu32 "\n",
cpu_index,
ctx->counters[index].cpu[cpu_index]
);
}
}
for (index = 0; index < cpu_count; ++index) {
printf(
"migrations[%" PRIu32 "] = %" PRIu32 "\n",
index,
ctx->migration_counters[index]
);
} }
} }
@@ -883,10 +1216,12 @@ static void Init(rtems_task_argument arg)
test_mrsp_initially_locked_error(); test_mrsp_initially_locked_error();
test_mrsp_nested_obtain_error(); test_mrsp_nested_obtain_error();
test_mrsp_unlock_order_error(); test_mrsp_unlock_order_error();
test_mrsp_deadlock_error(); test_mrsp_deadlock_error(ctx);
test_mrsp_multiple_obtain(); test_mrsp_multiple_obtain();
test_mrsp_obtain_and_release(); test_mrsp_obtain_and_sleep_and_release(ctx);
test_mrsp_load(); test_mrsp_obtain_and_release_with_help(ctx);
test_mrsp_obtain_and_release(ctx);
test_mrsp_load(ctx);
rtems_test_assert(rtems_resource_snapshot_check(&snapshot)); rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
@@ -901,7 +1236,7 @@ static void Init(rtems_task_argument arg)
#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER #define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
#define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 1) #define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 2)
#define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1) #define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1)
#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT #define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT
#define CONFIGURE_MAXIMUM_TIMERS 1 #define CONFIGURE_MAXIMUM_TIMERS 1
@@ -983,8 +1318,11 @@ RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \ RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL) RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION #define CONFIGURE_INITIAL_EXTENSIONS \
{ .thread_switch = switch_extension }, \
RTEMS_TEST_INITIAL_EXTENSION
#define CONFIGURE_INIT_TASK_NAME rtems_build_name('M', 'A', 'I', 'N')
#define CONFIGURE_INIT_TASK_PRIORITY 2 #define CONFIGURE_INIT_TASK_PRIORITY 2
#define CONFIGURE_RTEMS_INIT_TASKS_TABLE #define CONFIGURE_RTEMS_INIT_TASKS_TABLE

View File

@@ -2,146 +2,343 @@
test MrsP flush error test MrsP flush error
test MrsP initially locked error test MrsP initially locked error
test MrsP nested obtain error test MrsP nested obtain error
test MrsP unlock order error
test MrsP deadlock error
test MrsP multiple obtain
test MrsP obtain and sleep and release
[0] MAIN -> RUN (prio 2, node RUN)
[0] RUN -> MAIN (prio 1, node MAIN)
[0] MAIN -> IDLE (prio 1, node MAIN)
[0] IDLE -> MAIN (prio 1, node MAIN)
test MrsP obtain and release with help
[1] IDLE -> HELP (prio 3, node HELP)
[0] MAIN -> IDLE (prio 2, node MAIN)
[0] IDLE -> MAIN (prio 2, node MAIN)
[1] HELP -> MAIN (prio 2, node HELP)
[0] MAIN -> RUN (prio 1, node RUN)
[1] MAIN -> HELP (prio 2, node HELP)
[1] HELP -> MAIN (prio 2, node HELP)
[0] RUN -> IDLE (prio 2, node MAIN)
[1] MAIN -> HELP (prio 2, node HELP)
[1] HELP -> MAIN (prio 2, node HELP)
[1] MAIN -> HELP (prio 2, node HELP)
[0] IDLE -> MAIN (prio 3, node MAIN)
test MrsP obtain and release test MrsP obtain and release
test MrsP load test MrsP load
worker[0][0] worker[0]
sleep = 890 sleep = 16
timeout = 1455 timeout = 3420
obtain[0] = 141069 obtain[0] = 2503
obtain[1] = 111062 obtain[1] = 1671
obtain[2] = 255631 obtain[2] = 4446
obtain[3] = 186559 obtain[3] = 2850
obtain[4] = 310707 obtain[4] = 5003
obtain[5] = 246838 obtain[5] = 3793
obtain[6] = 331853 obtain[6] = 5575
obtain[7] = 298938 obtain[7] = 4468
obtain[8] = 331989 obtain[8] = 5326
obtain[9] = 343041 obtain[9] = 4645
obtain[10] = 310191 obtain[10] = 4565
obtain[11] = 381001 obtain[11] = 5082
obtain[12] = 269001 obtain[12] = 4050
obtain[13] = 412849 obtain[13] = 5203
obtain[14] = 217768 obtain[14] = 2945
obtain[15] = 444036 obtain[15] = 5704
obtain[16] = 160721 obtain[16] = 1652
obtain[17] = 476211 obtain[17] = 5870
obtain[18] = 151929 obtain[18] = 1472
obtain[19] = 438664 obtain[19] = 4933
obtain[20] = 132708 obtain[20] = 1136
obtain[21] = 388090 obtain[21] = 3463
obtain[22] = 118166 obtain[22] = 1257
obtain[23] = 337468 obtain[23] = 3230
obtain[24] = 96676 obtain[24] = 823
obtain[25] = 271392 obtain[25] = 2860
obtain[26] = 75445 obtain[26] = 736
obtain[27] = 203259 obtain[27] = 1270
obtain[28] = 52933 obtain[28] = 438
obtain[29] = 132769 obtain[29] = 1273
obtain[30] = 27856 obtain[30] = 378
obtain[31] = 57014 obtain[31] = 422
worker[0][1] cpu[0] = 116428
sleep = 15 cpu[1] = 16133
timeout = 33 cpu[2] = 32982
obtain[0] = 2241 cpu[3] = 32557
obtain[1] = 1890 worker[1]
obtain[2] = 4128 sleep = 1
obtain[3] = 3128 timeout = 4
obtain[4] = 5110 obtain[0] = 2
obtain[5] = 3981 obtain[1] = 0
obtain[6] = 5348 obtain[2] = 3
obtain[7] = 4825 obtain[3] = 8
obtain[8] = 5184 obtain[4] = 10
obtain[9] = 5720 obtain[5] = 0
obtain[10] = 4488 obtain[6] = 7
obtain[11] = 6038 obtain[7] = 0
obtain[12] = 4095 obtain[8] = 0
obtain[13] = 6658 obtain[9] = 0
obtain[14] = 3754 obtain[10] = 11
obtain[15] = 6768 obtain[11] = 0
obtain[16] = 2654 obtain[12] = 26
obtain[17] = 7051 obtain[13] = 14
obtain[18] = 2679 obtain[14] = 5
obtain[19] = 6956 obtain[15] = 0
obtain[20] = 2498 obtain[16] = 0
obtain[21] = 6173 obtain[17] = 18
obtain[22] = 2024 obtain[18] = 12
obtain[23] = 5514 obtain[19] = 0
obtain[24] = 1650 obtain[20] = 0
obtain[25] = 4141 obtain[21] = 0
obtain[26] = 1568 obtain[22] = 0
obtain[27] = 3285 obtain[23] = 0
obtain[28] = 812 obtain[24] = 0
obtain[29] = 2317 obtain[25] = 0
obtain[30] = 527 obtain[26] = 0
obtain[31] = 996 obtain[27] = 0
worker[1][0] obtain[28] = 0
sleep = 890 obtain[29] = 0
timeout = 1581 obtain[30] = 0
obtain[0] = 140732 obtain[31] = 0
obtain[1] = 111655 cpu[0] = 140
obtain[2] = 256936 cpu[1] = 24
obtain[3] = 186534 cpu[2] = 33
obtain[4] = 311714 cpu[3] = 36
obtain[5] = 248065 worker[2]
obtain[6] = 333155 sleep = 14
obtain[7] = 300734 timeout = 3513
obtain[8] = 329675 obtain[0] = 2474
obtain[9] = 343832 obtain[1] = 1793
obtain[10] = 309112 obtain[2] = 4551
obtain[11] = 380452 obtain[3] = 2833
obtain[12] = 270156 obtain[4] = 5293
obtain[13] = 416600 obtain[5] = 3681
obtain[14] = 223484 obtain[6] = 5309
obtain[15] = 444991 obtain[7] = 4565
obtain[16] = 163750 obtain[8] = 5270
obtain[17] = 476096 obtain[9] = 4610
obtain[18] = 150317 obtain[10] = 4817
obtain[19] = 432827 obtain[11] = 4760
obtain[20] = 133946 obtain[12] = 3858
obtain[21] = 388441 obtain[13] = 5919
obtain[22] = 119760 obtain[14] = 3172
obtain[23] = 337033 obtain[15] = 5286
obtain[24] = 99153 obtain[16] = 1968
obtain[25] = 271558 obtain[17] = 5800
obtain[26] = 77535 obtain[18] = 1768
obtain[27] = 202607 obtain[19] = 4629
obtain[28] = 53225 obtain[20] = 1446
obtain[29] = 130801 obtain[21] = 4109
obtain[30] = 27321 obtain[22] = 1046
obtain[31] = 56239 obtain[23] = 3119
worker[1][1] obtain[24] = 1012
sleep = 25 obtain[25] = 2338
timeout = 48 obtain[26] = 790
obtain[0] = 2164 obtain[27] = 1628
obtain[1] = 1722 obtain[28] = 432
obtain[2] = 4095 obtain[29] = 1281
obtain[3] = 3002 obtain[30] = 270
obtain[4] = 4950 obtain[31] = 418
obtain[5] = 4020 cpu[0] = 15589
obtain[6] = 5235 cpu[1] = 121473
obtain[7] = 4716 cpu[2] = 31797
obtain[8] = 5407 cpu[3] = 31645
obtain[9] = 5070 worker[3]
obtain[10] = 5082 sleep = 1
obtain[11] = 6130 timeout = 3
obtain[12] = 4368 obtain[0] = 4
obtain[13] = 6108 obtain[1] = 0
obtain[14] = 3270 obtain[2] = 9
obtain[15] = 6800 obtain[3] = 0
obtain[16] = 2652 obtain[4] = 5
obtain[17] = 7633 obtain[5] = 0
obtain[18] = 2451 obtain[6] = 0
obtain[19] = 7480 obtain[7] = 8
obtain[20] = 2079 obtain[8] = 0
obtain[21] = 6232 obtain[9] = 10
obtain[22] = 1590 obtain[10] = 0
obtain[23] = 5739 obtain[11] = 0
obtain[24] = 1627 obtain[12] = 0
obtain[25] = 4030 obtain[13] = 0
obtain[26] = 1296 obtain[14] = 0
obtain[27] = 2803 obtain[15] = 16
obtain[28] = 969 obtain[16] = 0
obtain[29] = 2253 obtain[17] = 18
obtain[30] = 217 obtain[18] = 0
obtain[31] = 930 obtain[19] = 0
obtain[20] = 0
obtain[21] = 1
obtain[22] = 0
obtain[23] = 24
obtain[24] = 0
obtain[25] = 0
obtain[26] = 0
obtain[27] = 0
obtain[28] = 0
obtain[29] = 0
obtain[30] = 0
obtain[31] = 0
cpu[0] = 22
cpu[1] = 123
cpu[2] = 7
cpu[3] = 39
worker[4]
sleep = 19
timeout = 3025
obtain[0] = 2574
obtain[1] = 1845
obtain[2] = 4423
obtain[3] = 2985
obtain[4] = 5086
obtain[5] = 3679
obtain[6] = 5286
obtain[7] = 4447
obtain[8] = 5885
obtain[9] = 4771
obtain[10] = 4857
obtain[11] = 5467
obtain[12] = 4554
obtain[13] = 5210
obtain[14] = 3547
obtain[15] = 6169
obtain[16] = 2337
obtain[17] = 6109
obtain[18] = 1797
obtain[19] = 5136
obtain[20] = 1646
obtain[21] = 4251
obtain[22] = 1041
obtain[23] = 3235
obtain[24] = 975
obtain[25] = 2961
obtain[26] = 739
obtain[27] = 1704
obtain[28] = 554
obtain[29] = 1344
obtain[30] = 182
obtain[31] = 559
cpu[0] = 5617
cpu[1] = 5834
cpu[2] = 100129
cpu[3] = 99149
worker[5]
sleep = 18
timeout = 3123
obtain[0] = 2439
obtain[1] = 1878
obtain[2] = 4576
obtain[3] = 2938
obtain[4] = 5088
obtain[5] = 3723
obtain[6] = 5611
obtain[7] = 4411
obtain[8] = 5522
obtain[9] = 4893
obtain[10] = 4877
obtain[11] = 4932
obtain[12] = 4263
obtain[13] = 5608
obtain[14] = 2791
obtain[15] = 5905
obtain[16] = 1739
obtain[17] = 5322
obtain[18] = 1892
obtain[19] = 5118
obtain[20] = 1360
obtain[21] = 4764
obtain[22] = 1099
obtain[23] = 3180
obtain[24] = 913
obtain[25] = 2612
obtain[26] = 807
obtain[27] = 1588
obtain[28] = 493
obtain[29] = 1348
obtain[30] = 389
obtain[31] = 471
cpu[0] = 5554
cpu[1] = 6139
cpu[2] = 97852
cpu[3] = 95573
worker[6]
sleep = 1
timeout = 11
obtain[0] = 2
obtain[1] = 2
obtain[2] = 9
obtain[3] = 4
obtain[4] = 15
obtain[5] = 12
obtain[6] = 7
obtain[7] = 16
obtain[8] = 10
obtain[9] = 20
obtain[10] = 11
obtain[11] = 5
obtain[12] = 0
obtain[13] = 0
obtain[14] = 0
obtain[15] = 16
obtain[16] = 0
obtain[17] = 0
obtain[18] = 0
obtain[19] = 20
obtain[20] = 0
obtain[21] = 44
obtain[22] = 0
obtain[23] = 0
obtain[24] = 0
obtain[25] = 7
obtain[26] = 0
obtain[27] = 0
obtain[28] = 0
obtain[29] = 0
obtain[30] = 0
obtain[31] = 0
cpu[0] = 10
cpu[1] = 6
cpu[2] = 168
cpu[3] = 217
worker[7]
sleep = 1
timeout = 0
obtain[0] = 0
obtain[1] = 0
obtain[2] = 0
obtain[3] = 0
obtain[4] = 0
obtain[5] = 0
obtain[6] = 0
obtain[7] = 0
obtain[8] = 0
obtain[9] = 0
obtain[10] = 0
obtain[11] = 0
obtain[12] = 0
obtain[13] = 0
obtain[14] = 0
obtain[15] = 0
obtain[16] = 0
obtain[17] = 0
obtain[18] = 0
obtain[19] = 0
obtain[20] = 0
obtain[21] = 0
obtain[22] = 0
obtain[23] = 0
obtain[24] = 0
obtain[25] = 0
obtain[26] = 0
obtain[27] = 0
obtain[28] = 0
obtain[29] = 0
obtain[30] = 0
obtain[31] = 0
cpu[0] = 0
cpu[1] = 0
cpu[2] = 1
cpu[3] = 0
migrations[0] = 110919
migrations[1] = 110920
migrations[2] = 109762
migrations[3] = 112076
*** END OF TEST SMPMRSP 1 *** *** END OF TEST SMPMRSP 1 ***