forked from Imagelibrary/rtems
rtems: Add rtems_scheduler_get_processor()
Add rtems_scheduler_get_processor() as a replacement for rtems_get_current_processor(). The rtems_get_current_processor() is a bit orphaned. Adopt it by the Scheduler Manager. This is in line with the glibc sched_getcpu() function. Deprecate rtems_get_current_processor(). Update #3731.
This commit is contained in:
@@ -69,7 +69,7 @@ static void restart_interrupt(void *arg)
|
||||
_SMP_barrier_State_initialize(&bs);
|
||||
_SMP_barrier_Wait(&restart_barrier, &bs, _SMP_Processor_count);
|
||||
|
||||
cpu_self_index = rtems_get_current_processor();
|
||||
cpu_self_index = rtems_scheduler_get_processor();
|
||||
thread_index = cpu_self_index % QORIQ_THREAD_COUNT;
|
||||
|
||||
if (cpu_self_index == 0) {
|
||||
|
||||
@@ -48,7 +48,7 @@ void bsp_fatal_extension(
|
||||
* processor.
|
||||
*/
|
||||
uint32_t max_wait = 1234567;
|
||||
uint32_t self_cpu = rtems_get_current_processor();
|
||||
uint32_t self_cpu = rtems_scheduler_get_processor();
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t halt_mask = 0;
|
||||
uint32_t i;
|
||||
|
||||
@@ -49,25 +49,6 @@ extern "C" {
|
||||
*/
|
||||
uint32_t rtems_get_processor_count(void);
|
||||
|
||||
/**
|
||||
* @brief Returns the index of the current processor.
|
||||
*
|
||||
* On uni-processor configurations a value of zero will be returned.
|
||||
*
|
||||
* On SMP configurations an architecture specific method is used to obtain the
|
||||
* index of the current processor in the system. The set of processor indices
|
||||
* is the range of integers starting with zero up to the processor count minus
|
||||
* one.
|
||||
*
|
||||
* Outside of sections with disabled thread dispatching the current processor
|
||||
* index may change after every instruction since the thread may migrate from
|
||||
* one processor to another. Sections with disabled interrupts are sections
|
||||
* with thread dispatching disabled.
|
||||
*
|
||||
* @return The index of the current processor.
|
||||
*/
|
||||
uint32_t rtems_get_current_processor(void);
|
||||
|
||||
/** @} */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@@ -583,6 +583,40 @@ rtems_status_code rtems_scheduler_ident_by_processor_set(
|
||||
rtems_id *id
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Returns the index of the current processor.
|
||||
*
|
||||
* In uniprocessor configurations, a value of zero will be returned.
|
||||
*
|
||||
* In SMP configurations, an architecture specific method is used to obtain the
|
||||
* index of the current processor in the system. The set of processor indices
|
||||
* is the range of integers starting with zero up to the processor count minus
|
||||
* one.
|
||||
*
|
||||
* Outside of sections with disabled thread dispatching the current processor
|
||||
* index may change after every instruction since the thread may migrate from
|
||||
* one processor to another. Sections with disabled interrupts are sections
|
||||
* with thread dispatching disabled.
|
||||
*
|
||||
* @return The index of the current processor.
|
||||
*/
|
||||
uint32_t rtems_scheduler_get_processor( void );
|
||||
|
||||
/**
|
||||
* @brief Returns the index of the current processor.
|
||||
*
|
||||
* @return The index of the current processor.
|
||||
*
|
||||
* @deprecated
|
||||
*
|
||||
* Use rtems_scheduler_get_processor() instead.
|
||||
*/
|
||||
RTEMS_DEPRECATED RTEMS_INLINE_ROUTINE uint32_t
|
||||
rtems_get_current_processor( void )
|
||||
{
|
||||
return rtems_scheduler_get_processor();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Gets the set of processors owned by the specified scheduler instance.
|
||||
*
|
||||
|
||||
@@ -356,7 +356,7 @@ void
|
||||
rtems_capture_record_lock (rtems_capture_record_lock_context* context)
|
||||
{
|
||||
rtems_capture_per_cpu_data* cpu;
|
||||
cpu = capture_per_cpu_get (rtems_get_current_processor ());
|
||||
cpu = capture_per_cpu_get (rtems_scheduler_get_processor ());
|
||||
rtems_interrupt_lock_interrupt_disable (&context->lock_context);
|
||||
context->lock = &cpu->lock;
|
||||
rtems_interrupt_lock_acquire_isr (&cpu->lock, &context->lock_context);
|
||||
@@ -379,7 +379,7 @@ rtems_capture_record_open (rtems_tcb* tcb,
|
||||
|
||||
size += sizeof (rtems_capture_record);
|
||||
|
||||
cpu = capture_per_cpu_get (rtems_get_current_processor ());
|
||||
cpu = capture_per_cpu_get (rtems_scheduler_get_processor ());
|
||||
|
||||
rtems_capture_record_lock (context);
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ load_worker(rtems_task_argument arg)
|
||||
chunk = lctx->chunk;
|
||||
chunk_size = ctx->chunk_size;
|
||||
cache_line_size = ctx->cache_line_size;
|
||||
token = (unsigned int)rtems_get_current_processor();
|
||||
token = (unsigned int)rtems_scheduler_get_processor();
|
||||
|
||||
token = dirty_data_cache(chunk, chunk_size, cache_line_size, token);
|
||||
wakeup_master(ctx);
|
||||
|
||||
@@ -74,7 +74,7 @@ static void run_tests(
|
||||
*/
|
||||
rtems_interrupt_local_disable(level);
|
||||
_SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
|
||||
worker_index = rtems_get_current_processor();
|
||||
worker_index = rtems_scheduler_get_processor();
|
||||
rtems_interrupt_local_enable(level);
|
||||
|
||||
_Assert(worker_index < ctx->worker_count);
|
||||
|
||||
@@ -16,10 +16,10 @@
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/rtems/smp.h>
|
||||
#include <rtems/rtems/tasks.h>
|
||||
#include <rtems/score/smp.h>
|
||||
|
||||
uint32_t rtems_get_current_processor(void)
|
||||
uint32_t rtems_scheduler_get_processor(void)
|
||||
{
|
||||
return _SMP_Get_current_processor();
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ rtems_task Init(
|
||||
rtems_status_code status;
|
||||
bool allDone;
|
||||
|
||||
cpu_self = rtems_get_current_processor();
|
||||
cpu_self = rtems_scheduler_get_processor();
|
||||
|
||||
/* XXX - Delay a bit to allow debug messages from
|
||||
* startup to print. This may need to go away when
|
||||
|
||||
@@ -28,7 +28,7 @@ rtems_task Test_task(
|
||||
rtems_test_assert( p != NULL );
|
||||
|
||||
/* Get the CPU Number */
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
|
||||
/* Print that the task is up and running. */
|
||||
Loop();
|
||||
|
||||
@@ -73,7 +73,7 @@ rtems_task Init(
|
||||
&id
|
||||
);
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
locked_printf(" CPU %" PRIu32 " start task TA%c\n", cpu_num, ch);
|
||||
status = rtems_task_start( id, Test_task, i+1 );
|
||||
directive_failed( status, str );
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Test_task(
|
||||
uint32_t cpu_num;
|
||||
rtems_status_code sc;
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
|
||||
do {
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ void PrintTaskInfo(
|
||||
{
|
||||
uint32_t cpu_num;
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
|
||||
locked_printf(" CPU %" PRIu32 " running task %s\n", cpu_num, task_name );
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ rtems_task Test_task(
|
||||
rtems_task_argument argument
|
||||
)
|
||||
{
|
||||
locked_printf( "Shut down from CPU %" PRIu32 "\n", rtems_get_current_processor() );
|
||||
locked_printf( "Shut down from CPU %" PRIu32 "\n", rtems_scheduler_get_processor() );
|
||||
success();
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ rtems_task Init(
|
||||
);
|
||||
directive_failed( status, "task create" );
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
locked_printf(" CPU %" PRIu32 " start task TA%c\n", cpu_num, ch);
|
||||
|
||||
status = rtems_task_start( id, Test_task, i+1 );
|
||||
|
||||
@@ -48,7 +48,7 @@ rtems_task Test_task(
|
||||
rtems_test_assert( p != NULL );
|
||||
|
||||
/* Get the CPU Number */
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
|
||||
/* Print that the task is up and running. */
|
||||
locked_printf(" CPU %" PRIu32 " runnng Task %s and blocking\n", cpu_num, name);
|
||||
@@ -130,7 +130,7 @@ rtems_task Init(
|
||||
);
|
||||
directive_failed( status, "task create" );
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
locked_printf(" CPU %d start task TA1\n", cpu_num );
|
||||
status = rtems_task_start( id, Test_task, 1 );
|
||||
directive_failed( status, "task start" );
|
||||
|
||||
@@ -23,7 +23,7 @@ void PrintTaskInfo(
|
||||
{
|
||||
uint32_t cpu_num;
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
|
||||
/* Print the cpu number and task name */
|
||||
locked_printf(
|
||||
|
||||
@@ -66,7 +66,7 @@ rtems_task Init(
|
||||
);
|
||||
directive_failed( status, "task create" );
|
||||
|
||||
cpu_num = rtems_get_current_processor();
|
||||
cpu_num = rtems_scheduler_get_processor();
|
||||
locked_printf(" CPU %" PRIu32 " start task TA%c\n", cpu_num, ch);
|
||||
|
||||
status = rtems_task_start( id, Test_task, i+1 );
|
||||
|
||||
@@ -487,7 +487,7 @@ static void test_atomic_store_load_rmw_body(
|
||||
* Use the physical processor index, to observe timing differences introduced
|
||||
* by the system topology.
|
||||
*/
|
||||
cpu_self_index = rtems_get_current_processor();
|
||||
cpu_self_index = rtems_scheduler_get_processor();
|
||||
|
||||
/* Store release and load acquire test case */
|
||||
|
||||
@@ -638,7 +638,7 @@ static void test_single_writer_seqlock_body(
|
||||
* Use the physical processor index, to observe timing differences introduced
|
||||
* by the system topology.
|
||||
*/
|
||||
cpu_self_index = rtems_get_current_processor();
|
||||
cpu_self_index = rtems_scheduler_get_processor();
|
||||
|
||||
if (cpu_self_index == 0) {
|
||||
counter = 0;
|
||||
@@ -700,7 +700,7 @@ static void test_multi_writer_seqlock_body(
|
||||
* Use the physical processor index, to observe timing differences introduced
|
||||
* by the system topology.
|
||||
*/
|
||||
cpu_self_index = rtems_get_current_processor();
|
||||
cpu_self_index = rtems_scheduler_get_processor();
|
||||
|
||||
if (cpu_self_index % 2 == 0) {
|
||||
counter = 0;
|
||||
|
||||
@@ -47,7 +47,7 @@ static void test_action( void *arg )
|
||||
{
|
||||
rtems_test_assert(arg == &ctx);
|
||||
|
||||
ctx.count[rtems_get_current_processor()]++;
|
||||
ctx.count[rtems_scheduler_get_processor()]++;
|
||||
}
|
||||
|
||||
typedef void ( *test_case )(
|
||||
@@ -68,7 +68,7 @@ static void test_cache_invalidate_multiple_instruction_lines(
|
||||
const cpu_set_t *cpu_set
|
||||
)
|
||||
{
|
||||
uint32_t self = rtems_get_current_processor();
|
||||
uint32_t self = rtems_scheduler_get_processor();
|
||||
|
||||
ctx.do_longjmp[self] = true;
|
||||
|
||||
@@ -87,7 +87,7 @@ static void barrier( SMP_barrier_State *bs )
|
||||
|
||||
static void broadcast_test_init( void )
|
||||
{
|
||||
ctx.count[rtems_get_current_processor()] = 0;
|
||||
ctx.count[rtems_scheduler_get_processor()] = 0;
|
||||
}
|
||||
|
||||
static void broadcast_test_body(
|
||||
@@ -101,7 +101,7 @@ static void broadcast_test_body(
|
||||
static void broadcast_test_fini( void )
|
||||
{
|
||||
rtems_test_assert(
|
||||
ctx.count[rtems_get_current_processor()] == rtems_get_processor_count()
|
||||
ctx.count[rtems_scheduler_get_processor()] == rtems_get_processor_count()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ static void call_tests_with_thread_dispatch_disabled( size_t set_size,
|
||||
|
||||
static void cmlog( const char* str )
|
||||
{
|
||||
if ( rtems_get_current_processor() == 0 )
|
||||
if ( rtems_scheduler_get_processor() == 0 )
|
||||
printf( "%s", str );
|
||||
}
|
||||
|
||||
@@ -256,7 +256,7 @@ static void fatal_extension(
|
||||
rtems_fatal_code error
|
||||
)
|
||||
{
|
||||
uint32_t self = rtems_get_current_processor();
|
||||
uint32_t self = rtems_scheduler_get_processor();
|
||||
|
||||
if (source == RTEMS_FATAL_SOURCE_EXCEPTION && ctx.do_longjmp[self]) {
|
||||
_ISR_Set_level(0);
|
||||
|
||||
@@ -76,7 +76,7 @@ static void task(rtems_task_argument arg)
|
||||
sc = rtems_semaphore_obtain (task_sem, RTEMS_NO_WAIT, 0);
|
||||
if (sc == RTEMS_SUCCESSFUL) {
|
||||
task_data[arg].ran = true;
|
||||
task_data[arg].actual_cpu = rtems_get_current_processor();
|
||||
task_data[arg].actual_cpu = rtems_scheduler_get_processor();
|
||||
rtems_semaphore_release(task_sem);
|
||||
test_delay(1);
|
||||
}
|
||||
@@ -89,7 +89,7 @@ static void set_init_task(void)
|
||||
|
||||
/* Set Init task data */
|
||||
task_data[0].ran = true;
|
||||
task_data[0].actual_cpu = rtems_get_current_processor();
|
||||
task_data[0].actual_cpu = rtems_scheduler_get_processor();
|
||||
|
||||
rtems_semaphore_release(task_sem);
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ static void timer_task(rtems_task_argument arg)
|
||||
rtems_status_code sc;
|
||||
rtems_id timer_id;
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
sc = rtems_timer_create(SCHEDULER_B, &timer_id);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
@@ -96,7 +96,7 @@ static void delay_clock_tick(test_context *ctx)
|
||||
const Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index(1);
|
||||
uint64_t ticks;
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
rtems_test_spin_until_next_tick();
|
||||
ticks = cpu_self->Watchdog.ticks;
|
||||
|
||||
@@ -48,7 +48,7 @@ static void fatal_extension(
|
||||
SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
|
||||
|
||||
if (source == RTEMS_FATAL_SOURCE_SMP) {
|
||||
uint32_t self = rtems_get_current_processor();
|
||||
uint32_t self = rtems_scheduler_get_processor();
|
||||
|
||||
assert(!always_set_to_false);
|
||||
assert(code == SMP_FATAL_SHUTDOWN);
|
||||
@@ -76,7 +76,7 @@ static rtems_status_code test_driver_init(
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
uint32_t self = rtems_get_current_processor();
|
||||
uint32_t self = rtems_scheduler_get_processor();
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu;
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ static void fatal_extension(
|
||||
)
|
||||
{
|
||||
SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
|
||||
uint32_t self = rtems_get_current_processor();
|
||||
uint32_t self = rtems_scheduler_get_processor();
|
||||
|
||||
assert(!always_set_to_false);
|
||||
|
||||
@@ -80,7 +80,7 @@ static rtems_status_code test_driver_init(
|
||||
void *arg
|
||||
)
|
||||
{
|
||||
uint32_t self = rtems_get_current_processor();
|
||||
uint32_t self = rtems_scheduler_get_processor();
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu;
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ static void test_send_message_while_processing_a_message(
|
||||
)
|
||||
{
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu_index_self = rtems_get_current_processor();
|
||||
uint32_t cpu_index_self = rtems_scheduler_get_processor();
|
||||
uint32_t cpu_index;
|
||||
SMP_barrier_State *bs = &ctx->main_barrier_state;
|
||||
|
||||
@@ -132,7 +132,7 @@ static void test_send_message_flood(
|
||||
)
|
||||
{
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu_index_self = rtems_get_current_processor();
|
||||
uint32_t cpu_index_self = rtems_scheduler_get_processor();
|
||||
uint32_t cpu_index;
|
||||
|
||||
_SMP_Set_test_message_handler(counter_handler);
|
||||
|
||||
@@ -72,7 +72,7 @@ static void runner(rtems_task_argument self)
|
||||
test_counters *next_counters = &ctx->counters[next];
|
||||
|
||||
while (true) {
|
||||
uint32_t current_cpu = rtems_get_current_processor();
|
||||
uint32_t current_cpu = rtems_scheduler_get_processor();
|
||||
|
||||
++counters->cycles_per_cpu[current_cpu].counter;
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ static void migration_task(rtems_task_argument arg)
|
||||
uint32_t task_index = arg;
|
||||
rtems_task_priority prio = migration_task_prio(task_index);
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu_index = rtems_get_current_processor();
|
||||
uint32_t cpu_index = rtems_scheduler_get_processor();
|
||||
|
||||
while (true) {
|
||||
rtems_status_code sc;
|
||||
@@ -73,7 +73,7 @@ static void migration_task(rtems_task_argument arg)
|
||||
|
||||
++ctx->counters[task_index].value;
|
||||
|
||||
rtems_test_assert(cpu_index == rtems_get_current_processor());
|
||||
rtems_test_assert(cpu_index == rtems_scheduler_get_processor());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -154,7 +154,7 @@ static void switch_extension(Thread_Control *executing, Thread_Control *heir)
|
||||
switch_event *e = &ctx->switch_events[i];
|
||||
Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node(heir);
|
||||
|
||||
e->cpu_index = rtems_get_current_processor();
|
||||
e->cpu_index = rtems_scheduler_get_processor();
|
||||
e->executing = executing;
|
||||
e->heir = heir;
|
||||
e->heir_node = _Scheduler_Node_get_owner(&node->Base);
|
||||
@@ -505,9 +505,9 @@ static void test_mrsp_obtain_and_release(test_context *ctx)
|
||||
);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
while (rtems_get_current_processor() != 0) {
|
||||
while (rtems_scheduler_get_processor() != 0) {
|
||||
/* Wait */
|
||||
}
|
||||
|
||||
@@ -654,7 +654,7 @@ static void test_mrsp_obtain_after_migration(test_context *ctx)
|
||||
sc = rtems_task_start(ctx->high_task_id[0], obtain_after_migration_high, 0);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
/* Obtain done (I) */
|
||||
_SMP_barrier_State_initialize(&barrier_state);
|
||||
@@ -663,7 +663,7 @@ static void test_mrsp_obtain_after_migration(test_context *ctx)
|
||||
sc = rtems_task_suspend(ctx->high_task_id[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
/*
|
||||
* Obtain second MrsP semaphore and ensure that we change the priority of our
|
||||
@@ -675,7 +675,7 @@ static void test_mrsp_obtain_after_migration(test_context *ctx)
|
||||
|
||||
assert_prio(RTEMS_SELF, 1);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
sc = rtems_semaphore_release(ctx->mrsp_ids[2]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
@@ -686,7 +686,7 @@ static void test_mrsp_obtain_after_migration(test_context *ctx)
|
||||
/* Ready to release (J) */
|
||||
barrier(ctx, &barrier_state);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
/* Prepare barrier for worker */
|
||||
barrier_init(ctx);
|
||||
@@ -695,7 +695,7 @@ static void test_mrsp_obtain_after_migration(test_context *ctx)
|
||||
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
print_switch_events(ctx);
|
||||
|
||||
@@ -1106,7 +1106,7 @@ static void various_block_unblock(test_context *ctx)
|
||||
sc = rtems_task_resume(ctx->worker_ids[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
/* Use node of the active rival */
|
||||
|
||||
@@ -1116,7 +1116,7 @@ static void various_block_unblock(test_context *ctx)
|
||||
sc = rtems_task_resume(ctx->high_task_id[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
sc = rtems_task_suspend(ctx->worker_ids[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
@@ -1152,7 +1152,7 @@ static void various_block_unblock(test_context *ctx)
|
||||
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
assert_prio(RTEMS_SELF, 4);
|
||||
|
||||
@@ -1409,12 +1409,12 @@ static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
|
||||
sc = rtems_task_wake_after(2);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
rtems_test_assert(!run);
|
||||
|
||||
change_prio(run_task_id, 1);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
while (!run) {
|
||||
/* Wait */
|
||||
@@ -1423,11 +1423,11 @@ static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
|
||||
sc = rtems_task_wake_after(2);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
change_prio(run_task_id, 4);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
/*
|
||||
* With this operation the scheduler instance 0 has now only the main and the
|
||||
@@ -1436,7 +1436,7 @@ static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
|
||||
sc = rtems_task_suspend(run_task_id);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
change_prio(RTEMS_SELF, 1);
|
||||
change_prio(RTEMS_SELF, 3);
|
||||
@@ -1444,7 +1444,7 @@ static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
|
||||
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
assert_prio(RTEMS_SELF, 3);
|
||||
|
||||
@@ -1494,7 +1494,7 @@ static void load_worker(rtems_task_argument index)
|
||||
sc = rtems_task_wake_after(1);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
++ctx->counters[index].cpu[rtems_get_current_processor()];
|
||||
++ctx->counters[index].cpu[rtems_scheduler_get_processor()];
|
||||
} else {
|
||||
uint32_t n = (v >> 17) % (i + 1);
|
||||
uint32_t s;
|
||||
@@ -1517,7 +1517,7 @@ static void load_worker(rtems_task_argument index)
|
||||
break;
|
||||
}
|
||||
|
||||
++ctx->counters[index].cpu[rtems_get_current_processor()];
|
||||
++ctx->counters[index].cpu[rtems_scheduler_get_processor()];
|
||||
|
||||
v = simple_random(v);
|
||||
}
|
||||
@@ -1529,7 +1529,7 @@ static void load_worker(rtems_task_argument index)
|
||||
sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
++ctx->counters[index].cpu[rtems_get_current_processor()];
|
||||
++ctx->counters[index].cpu[rtems_scheduler_get_processor()];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1556,7 +1556,7 @@ static void migration_task(rtems_task_argument arg)
|
||||
sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index], 2);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
++ctx->migration_counters[rtems_get_current_processor()];
|
||||
++ctx->migration_counters[rtems_scheduler_get_processor()];
|
||||
|
||||
v = simple_random(v);
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ static test_context test_instance;
|
||||
|
||||
static void assert_cpu(uint32_t expected_cpu)
|
||||
{
|
||||
rtems_test_assert(rtems_get_current_processor() == expected_cpu);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == expected_cpu);
|
||||
}
|
||||
|
||||
static void test_task_get_priority_not_defined(test_context *ctx)
|
||||
@@ -226,7 +226,7 @@ static void request(test_context *ctx, task_id id, request_id req)
|
||||
send_event(ctx, id, req);
|
||||
clear_done(ctx);
|
||||
|
||||
if (rtems_get_current_processor() == 0) {
|
||||
if (rtems_scheduler_get_processor() == 0) {
|
||||
id = H_B;
|
||||
} else {
|
||||
id = H_A;
|
||||
|
||||
@@ -50,7 +50,7 @@ static void *thread_b(void *arg)
|
||||
|
||||
ctx = arg;
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
@@ -61,7 +61,7 @@ static void *thread_b(void *arg)
|
||||
sc = rtems_task_set_scheduler(pthread_self(), scheduler_b_id, prio);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
eno = pthread_mutex_init(&ctx->mtx_b, &ctx->mtx_attr);
|
||||
rtems_test_assert(eno == 0);
|
||||
@@ -97,7 +97,7 @@ static void test(test_context *ctx)
|
||||
|
||||
cpu_count = rtems_get_processor_count();
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
eno = pthread_mutexattr_init(&ctx->mtx_attr);
|
||||
rtems_test_assert(eno == 0);
|
||||
|
||||
@@ -82,14 +82,14 @@ static void signal_send(test_context *ctx, test_state new_state)
|
||||
static void check_consumer_processor(const test_context *ctx)
|
||||
{
|
||||
rtems_test_assert(
|
||||
ctx->consumer_processor == rtems_get_current_processor()
|
||||
ctx->consumer_processor == rtems_scheduler_get_processor()
|
||||
);
|
||||
}
|
||||
|
||||
static void check_producer_processor(const test_context *ctx)
|
||||
{
|
||||
rtems_test_assert(
|
||||
ctx->producer_processor == rtems_get_current_processor()
|
||||
ctx->producer_processor == rtems_scheduler_get_processor()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ static void *producer(void *arg)
|
||||
{
|
||||
test_context *ctx = arg;
|
||||
|
||||
ctx->producer_processor = rtems_get_current_processor();
|
||||
ctx->producer_processor = rtems_scheduler_get_processor();
|
||||
|
||||
rtems_test_assert(ctx->consumer_processor != ctx->producer_processor);
|
||||
|
||||
@@ -120,7 +120,7 @@ static void test(void)
|
||||
void *producer_status;
|
||||
|
||||
ctx->consumer = pthread_self();
|
||||
ctx->consumer_processor = rtems_get_current_processor();
|
||||
ctx->consumer_processor = rtems_scheduler_get_processor();
|
||||
|
||||
memset(&new_action, 0, sizeof(new_action));
|
||||
new_action.sa_handler = signal_handler;
|
||||
|
||||
@@ -66,7 +66,7 @@ static void task(rtems_task_argument arg)
|
||||
uint32_t cpu;
|
||||
cpu_set_t cpuset;
|
||||
|
||||
cpu = rtems_get_current_processor();
|
||||
cpu = rtems_scheduler_get_processor();
|
||||
|
||||
rtems_task_get_affinity( rtems_task_self(), sizeof(cpuset), &cpuset );
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ static void init_verify( int expect )
|
||||
|
||||
test_delay(20);
|
||||
|
||||
cpu = rtems_get_current_processor();
|
||||
cpu = rtems_scheduler_get_processor();
|
||||
printf( "Init: expected=%d actual=%d\n", expect, cpu);
|
||||
rtems_test_assert( expect == cpu );
|
||||
}
|
||||
@@ -112,7 +112,7 @@ static void task(rtems_task_argument arg)
|
||||
while (true) {
|
||||
sc = rtems_semaphore_obtain (task_sem, RTEMS_NO_WAIT, 0);
|
||||
if (sc == RTEMS_SUCCESSFUL) {
|
||||
task_data.actual_cpu = rtems_get_current_processor();
|
||||
task_data.actual_cpu = rtems_scheduler_get_processor();
|
||||
task_data.ran = true;
|
||||
test_delay(25);
|
||||
rtems_semaphore_release(task_sem);
|
||||
|
||||
@@ -64,7 +64,7 @@ static void test(void)
|
||||
test_delay(100);
|
||||
|
||||
/* Check the cpu the Init task is running on */
|
||||
cpu = rtems_get_current_processor();
|
||||
cpu = rtems_scheduler_get_processor();
|
||||
printf("On cpu %d\n", cpu);
|
||||
rtems_test_assert(cpu == i);
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ static void task(rtems_task_argument arg)
|
||||
sc = rtems_semaphore_obtain (task_sem, RTEMS_NO_WAIT, 0);
|
||||
if (sc == RTEMS_SUCCESSFUL) {
|
||||
task_data.ran = true;
|
||||
task_data.actual_cpu = rtems_get_current_processor();
|
||||
task_data.actual_cpu = rtems_scheduler_get_processor();
|
||||
rtems_semaphore_release(task_sem);
|
||||
}
|
||||
}
|
||||
@@ -115,7 +115,7 @@ static void test(void)
|
||||
* Verify the Init task is running on the max core.
|
||||
*/
|
||||
printf("Verify Init task is on cpu %" PRIu32 "\n",cpu_count-1);
|
||||
cpu = rtems_get_current_processor();
|
||||
cpu = rtems_scheduler_get_processor();
|
||||
rtems_test_assert(cpu == (cpu_count-1));
|
||||
|
||||
/* Walk TA1 across all of the cores */
|
||||
|
||||
@@ -77,7 +77,7 @@ static void task(rtems_task_argument arg)
|
||||
sc = rtems_semaphore_obtain (task_sem, RTEMS_NO_WAIT, 0);
|
||||
if (sc == RTEMS_SUCCESSFUL) {
|
||||
task_data[arg].ran = true;
|
||||
task_data[arg].actual_cpu = rtems_get_current_processor();
|
||||
task_data[arg].actual_cpu = rtems_scheduler_get_processor();
|
||||
rtems_semaphore_release(task_sem);
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ static void verify_tasks(void)
|
||||
|
||||
/* Set Init task data */
|
||||
task_data[0].ran = true;
|
||||
task_data[0].actual_cpu = rtems_get_current_processor();
|
||||
task_data[0].actual_cpu = rtems_scheduler_get_processor();
|
||||
|
||||
/* Verify all tasks */
|
||||
for (i = 0; i < NUM_CPUS; i++) {
|
||||
|
||||
@@ -46,7 +46,7 @@ static void task(rtems_task_argument arg)
|
||||
|
||||
(void) arg;
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
rtems_test_assert(sched_get_priority_min(SCHED_RR) == 1);
|
||||
rtems_test_assert(sched_get_priority_max(SCHED_RR) == INT_MAX - 1);
|
||||
|
||||
@@ -77,7 +77,7 @@ static void sticky_task(rtems_task_argument arg)
|
||||
|
||||
(void) arg;
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
sc = rtems_semaphore_create(
|
||||
rtems_build_name(' ', 'M', 'T', 'X'),
|
||||
@@ -149,12 +149,12 @@ static void test_scheduler_add_remove_processors(void)
|
||||
sc = rtems_scheduler_add_processor(scheduler_a_id, 1);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
sc = rtems_scheduler_remove_processor(scheduler_a_id, 0);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
CPU_ZERO(&first_cpu);
|
||||
CPU_SET(0, &first_cpu);
|
||||
@@ -168,7 +168,7 @@ static void test_scheduler_add_remove_processors(void)
|
||||
sc = rtems_scheduler_add_processor(scheduler_a_id, 0);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
sc = rtems_task_create(
|
||||
rtems_build_name('T', 'A', 'S', 'K'),
|
||||
@@ -190,7 +190,7 @@ static void test_scheduler_add_remove_processors(void)
|
||||
sc = rtems_scheduler_remove_processor(scheduler_a_id, 1);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
sc = rtems_event_transient_send(task_id);
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
@@ -222,7 +222,7 @@ static void test(void)
|
||||
cpu_set_t online_cpus;
|
||||
uint32_t cpu_count;
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
cpu_count = rtems_get_processor_count();
|
||||
main_task_id = rtems_task_self();
|
||||
|
||||
@@ -47,7 +47,7 @@ static void migration_task(rtems_task_argument arg)
|
||||
{
|
||||
test_context *ctx = (test_context *) arg;
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu_index = rtems_get_current_processor();
|
||||
uint32_t cpu_index = rtems_scheduler_get_processor();
|
||||
|
||||
while (true) {
|
||||
rtems_status_code sc;
|
||||
@@ -65,7 +65,7 @@ static void migration_task(rtems_task_argument arg)
|
||||
}
|
||||
|
||||
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
|
||||
rtems_test_assert(cpu_index == rtems_get_current_processor());
|
||||
rtems_test_assert(cpu_index == rtems_scheduler_get_processor());
|
||||
++ctx->migration_counter;
|
||||
}
|
||||
}
|
||||
@@ -74,7 +74,7 @@ static void scheduler_task(rtems_task_argument arg)
|
||||
{
|
||||
test_context *ctx = (test_context *) arg;
|
||||
uint32_t cpu_count = rtems_get_processor_count();
|
||||
uint32_t cpu_index = rtems_get_current_processor();
|
||||
uint32_t cpu_index = rtems_scheduler_get_processor();
|
||||
|
||||
while (true) {
|
||||
rtems_status_code sc;
|
||||
|
||||
@@ -90,14 +90,14 @@ static void signal_send(test_context *ctx, test_state new_state)
|
||||
static void check_consumer_processor(const test_context *ctx)
|
||||
{
|
||||
rtems_test_assert(
|
||||
ctx->consumer_processor == rtems_get_current_processor()
|
||||
ctx->consumer_processor == rtems_scheduler_get_processor()
|
||||
);
|
||||
}
|
||||
|
||||
static void check_producer_processor(const test_context *ctx)
|
||||
{
|
||||
rtems_test_assert(
|
||||
ctx->producer_processor == rtems_get_current_processor()
|
||||
ctx->producer_processor == rtems_scheduler_get_processor()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ static void producer(rtems_task_argument arg)
|
||||
{
|
||||
test_context *ctx = (test_context *) arg;
|
||||
|
||||
ctx->producer_processor = rtems_get_current_processor();
|
||||
ctx->producer_processor = rtems_scheduler_get_processor();
|
||||
|
||||
rtems_test_assert(ctx->consumer_processor != ctx->producer_processor);
|
||||
|
||||
@@ -220,7 +220,7 @@ static void Init(rtems_task_argument arg)
|
||||
TEST_BEGIN();
|
||||
|
||||
ctx->consumer = rtems_task_self();
|
||||
ctx->consumer_processor = rtems_get_current_processor();
|
||||
ctx->consumer_processor = rtems_scheduler_get_processor();
|
||||
|
||||
test_isr_level(ctx);
|
||||
|
||||
|
||||
@@ -286,18 +286,18 @@ static void test_simple_pin_unpin(test_context *ctx, int run)
|
||||
set_affinity(ctx->busy, 0);
|
||||
set_prio(ctx->busy, PRIO_IDLE);
|
||||
set_prio(RTEMS_SELF, PRIO_MIDDLE);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
cpu_self = _Thread_Dispatch_disable();
|
||||
executing = _Per_CPU_Get_executing(cpu_self);
|
||||
_Thread_Pin(executing);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
_Thread_Unpin(executing, cpu_self);
|
||||
_Thread_Dispatch_enable(cpu_self);
|
||||
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
}
|
||||
|
||||
static void test_pin_wait_unpin(test_context *ctx, bool blocked, int run)
|
||||
@@ -309,20 +309,20 @@ static void test_pin_wait_unpin(test_context *ctx, bool blocked, int run)
|
||||
set_prio(RTEMS_SELF, PRIO_MIDDLE);
|
||||
set_prio(ctx->event, PRIO_LOW);
|
||||
set_affinity(ctx->event, 1);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
pin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
send_events(ctx->event, EVENT_WAKEUP_MASTER);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
wait_for_events();
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
set_prio(ctx->busy, PRIO_HIGH);
|
||||
set_affinity(ctx->busy, 0);
|
||||
unpin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
}
|
||||
|
||||
static void test_pin_preempt_unpin(test_context *ctx, bool blocked, int run)
|
||||
@@ -338,10 +338,10 @@ static void test_pin_preempt_unpin(test_context *ctx, bool blocked, int run)
|
||||
set_prio(ctx->busy, PRIO_HIGH);
|
||||
set_affinity(ctx->event, 0);
|
||||
set_affinity(ctx->busy, 0);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
pin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
ctx->flag = false;
|
||||
send_events(
|
||||
@@ -351,12 +351,12 @@ static void test_pin_preempt_unpin(test_context *ctx, bool blocked, int run)
|
||||
);
|
||||
|
||||
while (!ctx->flag) {
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
}
|
||||
|
||||
set_affinity(ctx->busy, 0);
|
||||
unpin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
}
|
||||
|
||||
static void test_pin_home_no_help_unpin(
|
||||
@@ -376,16 +376,16 @@ static void test_pin_home_no_help_unpin(
|
||||
set_affinity(ctx->busy, 1);
|
||||
set_prio(ctx->busy, PRIO_IDLE);
|
||||
set_prio(RTEMS_SELF, PRIO_MIDDLE);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
pin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->sched_b, 1);
|
||||
rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
|
||||
|
||||
rtems_mutex_lock(&ctx->mtx);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
set_affinity(ctx->event, 1);
|
||||
set_prio(ctx->event, PRIO_MIDDLE);
|
||||
@@ -398,7 +398,7 @@ static void test_pin_home_no_help_unpin(
|
||||
|
||||
/* Now the event task can help us */
|
||||
rtems_test_assert(ctx->mtx._Queue._heads != NULL);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
set_affinity(ctx->event_2, 0);
|
||||
set_affinity(ctx->busy, 1);
|
||||
@@ -409,17 +409,17 @@ static void test_pin_home_no_help_unpin(
|
||||
| EVENT_SET_SELF_PRIO_TO_LOW | EVENT_SET_BUSY_PRIO_TO_IDLE
|
||||
);
|
||||
set_prio(ctx->event_2, PRIO_VERY_HIGH);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
rtems_mutex_unlock(&ctx->mtx);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
send_events(ctx->event, EVENT_WAKEUP_MASTER | EVENT_MTX_UNLOCK);
|
||||
wait_for_events();
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
unpin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
}
|
||||
|
||||
static void test_pin_foreign_no_help_unpin(
|
||||
@@ -437,10 +437,10 @@ static void test_pin_foreign_no_help_unpin(
|
||||
set_affinity(ctx->busy, 1);
|
||||
set_prio(ctx->busy, PRIO_IDLE);
|
||||
set_prio(RTEMS_SELF, PRIO_MIDDLE);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
rtems_mutex_lock(&ctx->mtx);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
set_affinity(ctx->event, 1);
|
||||
set_prio(ctx->event, PRIO_MIDDLE);
|
||||
@@ -452,16 +452,16 @@ static void test_pin_foreign_no_help_unpin(
|
||||
|
||||
/* Now the event task can help us */
|
||||
rtems_test_assert(ctx->mtx._Queue._heads != NULL);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
/* Request help */
|
||||
set_affinity(ctx->busy, 0);
|
||||
set_prio(ctx->busy, PRIO_HIGH);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
/* Pin while using foreign scheduler */
|
||||
pin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
set_affinity(ctx->event_2, 1);
|
||||
send_events(
|
||||
@@ -470,18 +470,18 @@ static void test_pin_foreign_no_help_unpin(
|
||||
| EVENT_SET_SELF_PRIO_TO_LOW | EVENT_SET_BUSY_PRIO_TO_IDLE
|
||||
);
|
||||
set_prio(ctx->event_2, PRIO_VERY_HIGH);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
unpin(blocked);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
set_prio(ctx->busy, PRIO_IDLE);
|
||||
rtems_mutex_unlock(&ctx->mtx);
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
|
||||
send_events(ctx->event, EVENT_WAKEUP_MASTER | EVENT_MTX_UNLOCK);
|
||||
wait_for_events();
|
||||
rtems_test_assert(rtems_get_current_processor() == 0);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 0);
|
||||
}
|
||||
|
||||
static void test(test_context *ctx)
|
||||
@@ -514,7 +514,7 @@ static void test(test_context *ctx)
|
||||
|
||||
set_affinity(ctx->busy, 0);
|
||||
set_prio(ctx->busy, PRIO_IDLE);
|
||||
rtems_test_assert(rtems_get_current_processor() == 1);
|
||||
rtems_test_assert(rtems_scheduler_get_processor() == 1);
|
||||
|
||||
sc = rtems_task_create(
|
||||
rtems_build_name('E', 'V', 'T', '1'),
|
||||
|
||||
@@ -116,7 +116,7 @@ static void load_task(rtems_task_argument arg)
|
||||
volatile int *load_data = (volatile int *) arg;
|
||||
size_t n = data_size;
|
||||
size_t clsz = cache_line_size;
|
||||
int j = (int) rtems_get_current_processor();
|
||||
int j = (int) rtems_scheduler_get_processor();
|
||||
|
||||
while (true) {
|
||||
j = dirty_data_cache(load_data, n, clsz, j);
|
||||
|
||||
Reference in New Issue
Block a user