rtems: Add rtems_scheduler_get_processor_maximum()

Add rtems_scheduler_get_processor_maximum() as a replacement for
rtems_get_processor_count(). The rtems_get_processor_count() is a bit
orphaned. Adopt it by the Scheduler Manager. The count is also
misleading, since the processor set may have gaps and the actual count
of online processors may be less than the value returned by
rtems_get_processor_count().

Update #3732.
This commit is contained in:
Sebastian Huber
2019-04-05 08:16:05 +02:00
parent 03c9f24061
commit f9219db2a9
66 changed files with 161 additions and 161 deletions

View File

@@ -80,7 +80,7 @@ static void grlib_tc_tick_irqmp_timestamp_init(void)
bool done =
_Atomic_Fetch_add_uint(&counter, 1, ATOMIC_ORDER_RELAXED)
== rtems_get_processor_count() - 1;
== rtems_scheduler_get_processor_maximum() - 1;
#else
bool done = true;
#endif

View File

@@ -108,7 +108,7 @@ void bsp_interrupt_get_affinity(
Processor_mask *affinity
)
{
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index;
_Processor_mask_Zero(affinity);
@@ -126,7 +126,7 @@ void bsp_interrupt_set_affinity(
)
{
uint32_t unmasked = 0;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index;
for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {

View File

@@ -60,7 +60,7 @@ static bsp_interrupt_server_context *bsp_interrupt_server_get_context(
}
#endif
if (server_index >= rtems_get_processor_count()) {
if (server_index >= rtems_scheduler_get_processor_maximum()) {
*sc = RTEMS_INVALID_ID;
return NULL;
}
@@ -504,7 +504,7 @@ rtems_status_code rtems_interrupt_server_initialize(
server_count = &dummy;
}
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
#if defined(RTEMS_SMP)
instances = calloc(cpu_count, sizeof(*instances));

View File

@@ -76,7 +76,7 @@ static void leon3_tc_tick_irqmp_timestamp_init(void)
bool done =
_Atomic_Fetch_add_uint(&counter, 1, ATOMIC_ORDER_RELAXED)
== rtems_get_processor_count() - 1;
== rtems_scheduler_get_processor_maximum() - 1;
#else
bool done = true;
#endif

View File

@@ -49,7 +49,7 @@ void bsp_fatal_extension(
*/
uint32_t max_wait = 1234567;
uint32_t self_cpu = rtems_scheduler_get_processor();
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t halt_mask = 0;
uint32_t i;

View File

@@ -31,7 +31,7 @@ void bsp_interrupt_set_affinity(
)
{
uint32_t unmasked = 0;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index;
for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
@@ -53,7 +53,7 @@ void bsp_interrupt_get_affinity(
Processor_mask *affinity
)
{
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index;
_Processor_mask_Zero(affinity);

View File

@@ -274,7 +274,6 @@ include_rtems_rtems_HEADERS += include/rtems/rtems/semmp.h
include_rtems_rtems_HEADERS += include/rtems/rtems/signal.h
include_rtems_rtems_HEADERS += include/rtems/rtems/signalimpl.h
include_rtems_rtems_HEADERS += include/rtems/rtems/signalmp.h
include_rtems_rtems_HEADERS += include/rtems/rtems/smp.h
include_rtems_rtems_HEADERS += include/rtems/rtems/status.h
include_rtems_rtems_HEADERS += include/rtems/rtems/statusimpl.h
include_rtems_rtems_HEADERS += include/rtems/rtems/support.h

View File

@@ -55,7 +55,6 @@
#if defined(RTEMS_MULTIPROCESSING)
#include <rtems/rtems/mp.h>
#endif
#include <rtems/rtems/smp.h>
#include <rtems/rtems/support.h>
#include <rtems/score/stack.h>

View File

@@ -1,59 +0,0 @@
/**
* @file
*
* @ingroup ClassicSMP
*
* @brief SMP Services API
*/
/*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifndef _RTEMS_RTEMS_SMP_H
#define _RTEMS_RTEMS_SMP_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup ClassicSMP SMP Services
*
* @ingroup RTEMSAPIClassic
*
* This encapsulates functionality which is useful for SMP applications.
*
* @{
*/
/**
* @brief Returns the count of processors in the system.
*
* On uni-processor configurations a value of one will be returned.
*
* On SMP configurations this returns the value of a global variable set during
* system initialization to indicate the count of utilized processors. The
* processor count depends on the physically or virtually available processors
* and application configuration. The value will always be less than or equal
* to the maximum count of application configured processors.
*
* @return The count of processors being utilized.
*/
uint32_t rtems_get_processor_count(void);
/** @} */
#ifdef __cplusplus
}
#endif
#endif
/* end of include file */

View File

@@ -617,6 +617,39 @@ rtems_get_current_processor( void )
return rtems_scheduler_get_processor();
}
/**
* @brief Returns the processor maximum supported by the system.
*
* In uniprocessor configurations, a value of one will be returned.
*
* In SMP configurations, this function returns the minimum of the processors
* (physically or virtually) available by the platform and the configured
* processor maximum. Not all processors in the range from processor index
* zero to the last processor index (which is the processor maximum minus one)
* may be configured to be used by a scheduler or online (online processors
* have a scheduler assigned).
*
* @return The processor maximum supported by the system.
*
* @see rtems_scheduler_add_processor() and rtems_scheduler_remove_processor().
*/
RTEMS_CONST uint32_t rtems_scheduler_get_processor_maximum( void );
/**
* @brief Returns the processor maximum supported by the system.
*
* @return The processor maximum supported by the system.
*
* @deprecated
*
* Use rtems_scheduler_get_processor_maximum() instead.
*/
RTEMS_DEPRECATED RTEMS_INLINE_ROUTINE uint32_t
rtems_get_processor_count( void )
{
return rtems_scheduler_get_processor_maximum();
}
/**
* @brief Gets the set of processors owned by the specified scheduler instance.
*

View File

@@ -599,7 +599,7 @@ rtems_capture_open (uint32_t size, rtems_capture_timestamp timestamp RTEMS_UNU
return RTEMS_RESOURCE_IN_USE;
}
count = rtems_get_processor_count();
count = rtems_scheduler_get_processor_maximum();
if (capture_per_cpu == NULL) {
capture_per_cpu = calloc( count, sizeof( *capture_per_cpu ) );
}
@@ -686,7 +686,7 @@ rtems_capture_close (void)
}
capture_controls = NULL;
for (cpu=0; cpu < rtems_get_processor_count(); cpu++) {
for (cpu=0; cpu < rtems_scheduler_get_processor_maximum(); cpu++) {
if (capture_records_on_cpu(cpu).buffer)
rtems_capture_buffer_destroy( &capture_records_on_cpu(cpu) );
@@ -788,7 +788,7 @@ rtems_capture_flush (bool prime)
else
capture_flags_global &= ~RTEMS_CAPTURE_OVERFLOW;
for (cpu=0; cpu < rtems_get_processor_count(); cpu++) {
for (cpu=0; cpu < rtems_scheduler_get_processor_maximum(); cpu++) {
RTEMS_INTERRUPT_LOCK_REFERENCE( lock, &(capture_lock_on_cpu( cpu )) )
rtems_interrupt_lock_context lock_context_per_cpu;

View File

@@ -268,7 +268,7 @@ rtems_capture_print_trace_records (int total, bool csv)
rtems_capture_time last_time = 0;
int i;
cpus = rtems_get_processor_count ();
cpus = rtems_scheduler_get_processor_maximum ();
per_cpu = calloc (cpus, sizeof(*per_cpu));
if (per_cpu == NULL)

View File

@@ -56,7 +56,7 @@ void rtems_cpu_usage_reset( void )
_TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset );
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

View File

@@ -477,7 +477,7 @@ void rtems_stack_checker_report_usage_with_plugin(
RTEMS_DECONST( rtems_printer *, printer )
);
cpu_max = rtems_get_processor_count();
cpu_max = rtems_scheduler_get_processor_maximum();
for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
Stack_check_Dump_interrupt_stack_usage(

View File

@@ -200,7 +200,7 @@ T_measure_runtime_create(const T_measure_runtime_config *config)
chunk_size *= 2;
load_count = rtems_get_processor_count();
load_count = rtems_scheduler_get_processor_maximum();
load_size = load_count * sizeof(ctx->load_contexts[0]);
ctx = malloc(sizeof(*ctx) + sample_size + load_size + chunk_size +

View File

@@ -60,8 +60,8 @@ static void run_tests(
for (i = 0; i < job_count; ++i) {
const rtems_test_parallel_job *job = &jobs[i];
size_t n = rtems_get_processor_count();
size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1;
size_t n = rtems_scheduler_get_processor_maximum();
size_t j = job->cascade ? 0 : rtems_scheduler_get_processor_maximum() - 1;
while (j < n) {
size_t active_worker = j + 1;
@@ -133,7 +133,7 @@ void rtems_test_parallel(
_Atomic_Init_ulong(&ctx->stop, 0);
_SMP_barrier_Control_initialize(&ctx->barrier);
ctx->worker_count = rtems_get_processor_count();
ctx->worker_count = rtems_scheduler_get_processor_maximum();
ctx->worker_ids[0] = rtems_task_self();
ctx->jobs = jobs;
ctx->job_count = job_count;

View File

@@ -147,7 +147,7 @@ void _Record_Stream_header_initialize( Record_Stream_header *header )
header->Processor_maximum.event =
RTEMS_RECORD_TIME_EVENT( 0, RTEMS_RECORD_PROCESSOR_MAXIMUM );
header->Processor_maximum.data = rtems_get_processor_count() - 1;
header->Processor_maximum.data = rtems_scheduler_get_processor_maximum() - 1;
header->Count.event = RTEMS_RECORD_TIME_EVENT( 0, RTEMS_RECORD_PER_CPU_COUNT );
header->Count.data = _Record_Item_count;

View File

@@ -50,7 +50,7 @@ long sysconf(
case _SC_NPROCESSORS_CONF:
return (long) rtems_configuration_get_maximum_processors();
case _SC_NPROCESSORS_ONLN:
return (long) rtems_get_processor_count();
return (long) rtems_scheduler_get_processor_maximum();
case _SC_POSIX_26_VERSION:
return (long) _POSIX_26_VERSION;
#if defined(__sparc__)

View File

@@ -16,10 +16,10 @@
#include "config.h"
#endif
#include <rtems/rtems/smp.h>
#include <rtems/rtems/tasks.h>
#include <rtems/score/smp.h>
uint32_t rtems_get_processor_count(void)
uint32_t rtems_scheduler_get_processor_maximum(void)
{
return _SMP_Get_processor_count();
}

View File

@@ -31,7 +31,7 @@ static void per_cpu_stats_iterate(
)
{
#ifdef RTEMS_PROFILING
uint32_t n = rtems_get_processor_count();
uint32_t n = rtems_scheduler_get_processor_maximum();
uint32_t i;
memset(data, 0, sizeof(*data));

View File

@@ -79,7 +79,8 @@ void *POSIX_Init(
);
rtems_test_assert(
sysconf( _SC_NPROCESSORS_ONLN ) == (long) rtems_get_processor_count()
sysconf( _SC_NPROCESSORS_ONLN )
== (long) rtems_scheduler_get_processor_maximum()
);
#if defined(__sparc__)

View File

@@ -48,12 +48,12 @@ rtems_task Init(
locked_print_initialize();
/* Initialize the TaskRan array */
for ( i=0; i<rtems_get_processor_count() ; i++ ) {
for ( i=0; i<rtems_scheduler_get_processor_maximum() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=0; i< rtems_get_processor_count() ; i++ ) {
for ( i=0; i< rtems_scheduler_get_processor_maximum() ; i++ ) {
if ( i != cpu_self ) {
ch = '0' + i;
@@ -78,7 +78,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=0; i<rtems_get_processor_count() ; i++ ) {
for ( i=0; i<rtems_scheduler_get_processor_maximum() ; i++ ) {
if ( i != cpu_self && TaskRan[i] == false)
allDone = false;
}

View File

@@ -40,7 +40,7 @@ rtems_task Init(
locked_print_initialize();
if ( rtems_get_processor_count() == 1 ) {
if ( rtems_scheduler_get_processor_maximum() == 1 ) {
success();
}
@@ -59,7 +59,7 @@ rtems_task Init(
status = rtems_semaphore_obtain( Semaphore, RTEMS_WAIT, 0);
directive_failed( status,"rtems_semaphore_obtain of SEM1\n");
for ( i=1; i < rtems_get_processor_count(); i++ ){
for ( i=1; i < rtems_scheduler_get_processor_maximum(); i++ ){
/* Create and start tasks for each CPU */
ch = '0' + i;

View File

@@ -50,20 +50,22 @@ rtems_task Init(
char ch = '0';
rtems_id id;
rtems_status_code status;
uint32_t cpu_max;
Loop();
TEST_BEGIN();
locked_print_initialize();
cpu_max = rtems_scheduler_get_processor_maximum();
if ( rtems_get_processor_count() == 1 ) {
if ( cpu_max == 1 ) {
success();
}
/* Initialize the TaskRan array */
TaskRan[0] = true;
for ( i=1; i<rtems_get_processor_count() ; i++ ) {
for ( i=1; i<cpu_max ; i++ ) {
TaskRan[i] = false;
}
@@ -71,7 +73,7 @@ rtems_task Init(
PrintTaskInfo( "Init" );
/* for each remaining cpu create and start a task */
for ( i=1; i < rtems_get_processor_count(); i++ ){
for ( i=1; i < cpu_max; i++ ){
ch = '0' + i;
@@ -104,13 +106,13 @@ rtems_task Init(
&id
);
directive_failed( status, "rtems_task_create" );
status = rtems_task_start(id,Test_task,rtems_get_processor_count());
status = rtems_task_start(id,Test_task,cpu_max);
directive_failed( status, "rtems_task_start" );
/* Wait on all tasks to run */
while (1) {
TestFinished = true;
for ( i=1; i < (rtems_get_processor_count()+1) ; i++ ) {
for ( i=1; i < (cpu_max+1) ; i++ ) {
if (TaskRan[i] == false)
TestFinished = false;
}

View File

@@ -52,11 +52,11 @@ rtems_task Init(
locked_print_initialize();
TEST_BEGIN();
if ( rtems_get_processor_count() == 1 ) {
if ( rtems_scheduler_get_processor_maximum() == 1 ) {
success();
}
for ( i=0; i<rtems_get_processor_count() ; i++ ) {
for ( i=0; i<rtems_scheduler_get_processor_maximum() ; i++ ) {
ch = '1' + i;
status = rtems_task_create(

View File

@@ -99,7 +99,7 @@ rtems_task Init(
locked_print_initialize();
TEST_BEGIN();
if ( rtems_get_processor_count() == 1 ) {
if ( rtems_scheduler_get_processor_maximum() == 1 ) {
success();
}

View File

@@ -76,7 +76,7 @@ rtems_task Init(
/* Show that the init task is running on this cpu */
PrintTaskInfo( "Init", &time );
for ( i=1; i <= rtems_get_processor_count() *3; i++ ) {
for ( i=1; i <= rtems_scheduler_get_processor_maximum() *3; i++ ) {
sprintf(ch, "%02" PRIu32, i );
status = rtems_task_create(

View File

@@ -53,7 +53,7 @@ rtems_task Init(
for ( killtime=0; killtime<1000000; killtime++ )
;
for ( i=0; i<rtems_get_processor_count() -1; i++ ) {
for ( i=0; i<rtems_scheduler_get_processor_maximum() -1; i++ ) {
ch = '1' + i;
status = rtems_task_create(

View File

@@ -112,7 +112,7 @@ void Validate_affinity(void )
directive_failed( sc, "Get Affinity of Init Task" );
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
/* Fill the remaining cpus with med priority tasks */
puts( "Init - Create Medium priority tasks");

View File

@@ -82,7 +82,11 @@ static void test_cache_invalidate_multiple_instruction_lines(
static void barrier( SMP_barrier_State *bs )
{
_SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
_SMP_barrier_Wait(
&ctx.barrier,
bs,
rtems_scheduler_get_processor_maximum()
);
}
static void broadcast_test_init( void )
@@ -101,7 +105,8 @@ static void broadcast_test_body(
static void broadcast_test_fini( void )
{
rtems_test_assert(
ctx.count[rtems_scheduler_get_processor()] == rtems_get_processor_count()
ctx.count[rtems_scheduler_get_processor()]
== rtems_scheduler_get_processor_maximum()
);
}
@@ -175,9 +180,9 @@ static void cmlog( const char* str )
static void all_tests( void )
{
uint32_t cpu_count = rtems_get_processor_count();
size_t set_size = CPU_ALLOC_SIZE( rtems_get_processor_count() );
cpu_set_t *cpu_set = CPU_ALLOC( rtems_get_processor_count() );
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
size_t set_size = CPU_ALLOC_SIZE( cpu_count );
cpu_set_t *cpu_set = CPU_ALLOC( cpu_count );
SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
/* Send message to all available CPUs */
@@ -217,7 +222,7 @@ static void test_smp_cache_manager( void )
{
rtems_status_code sc;
size_t worker_index;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
for (worker_index = 1; worker_index < cpu_count; ++worker_index) {
rtems_id worker_id;

View File

@@ -103,7 +103,7 @@ static void test(void)
rtems_task_priority priority;
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
if (cpu_count != 4) {
printf("Test requires a minimum of 4 cores\n");
return;

View File

@@ -301,7 +301,7 @@ static void Init(rtems_task_argument arg)
TEST_BEGIN();
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
sc = rtems_capture_open(50000, NULL);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);

View File

@@ -161,7 +161,7 @@ static void Init(rtems_task_argument arg)
rtems_resource_snapshot_take(&snapshot);
if (rtems_get_processor_count() == CPU_COUNT) {
if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
test();
}

View File

@@ -67,7 +67,11 @@ static void fatal_extension(
}
}
_SMP_barrier_Wait(&barrier, &barrier_state, rtems_get_processor_count());
_SMP_barrier_Wait(
&barrier,
&barrier_state,
rtems_scheduler_get_processor_maximum()
);
}
static rtems_status_code test_driver_init(
@@ -77,7 +81,7 @@ static rtems_status_code test_driver_init(
)
{
uint32_t self = rtems_scheduler_get_processor();
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu;
TEST_BEGIN();

View File

@@ -71,7 +71,11 @@ static void fatal_extension(
assert(code == SMP_FATAL_SHUTDOWN);
}
_SMP_barrier_Wait(&barrier, &barrier_state, rtems_get_processor_count());
_SMP_barrier_Wait(
&barrier,
&barrier_state,
rtems_scheduler_get_processor_maximum()
);
}
static rtems_status_code test_driver_init(
@@ -81,7 +85,7 @@ static rtems_status_code test_driver_init(
)
{
uint32_t self = rtems_scheduler_get_processor();
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu;
TEST_BEGIN();

View File

@@ -78,7 +78,7 @@ static void test_send_message_while_processing_a_message(
test_context *ctx
)
{
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index_self = rtems_scheduler_get_processor();
uint32_t cpu_index;
SMP_barrier_State *bs = &ctx->main_barrier_state;
@@ -131,7 +131,7 @@ static void test_send_message_flood(
test_context *ctx
)
{
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index_self = rtems_scheduler_get_processor();
uint32_t cpu_index;

View File

@@ -80,7 +80,7 @@ static void inherit_obtain_task(rtems_task_argument arg)
test_context *ctx = &test_instance;
rtems_status_code sc;
SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
rtems_counter_ticks delay = (cpu_count - 1 - arg) * ctx->inherit_obtain_delay;
while (true) {
@@ -333,7 +333,7 @@ static void test(void)
sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
for (i = 0; i < rtems_get_processor_count(); ++i) {
for (i = 0; i < rtems_scheduler_get_processor_maximum(); ++i) {
sc = rtems_task_create(
rtems_build_name('I', 'N', 'H', 'O'),
INHERIT_OBTAIN_PRIO_BASE + i,
@@ -368,7 +368,7 @@ static void test(void)
ctx->inherit_release_counter
);
for (i = 0; i < rtems_get_processor_count(); ++i) {
for (i = 0; i < rtems_scheduler_get_processor_maximum(); ++i) {
printf(
"priority inheritance obtain count %2" PRIu32 ": %" PRIu64 "\n",
i,

View File

@@ -227,7 +227,7 @@ static void Init(rtems_task_argument arg)
rtems_print_printer_fprintf_putc(&rtems_test_printer);
TEST_BEGIN();
if (rtems_get_processor_count() >= 2) {
if (rtems_scheduler_get_processor_maximum() >= 2) {
test();
}

View File

@@ -56,7 +56,7 @@ static void migration_task(rtems_task_argument arg)
test_context *ctx = &test_instance;
uint32_t task_index = arg;
rtems_task_priority prio = migration_task_prio(task_index);
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index = rtems_scheduler_get_processor();
while (true) {
@@ -80,7 +80,7 @@ static void migration_task(rtems_task_argument arg)
static void test_migrations(test_context *ctx)
{
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t task_count = cpu_count + 1;
uint32_t task_index;
@@ -146,7 +146,7 @@ static Thread_Control *get_thread_by_id(rtems_id task_id)
static void test_double_migration(test_context *ctx)
{
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
if (cpu_count >= 2) {
rtems_status_code sc;
@@ -250,7 +250,7 @@ static void test_double_migration(test_context *ctx)
static void init_scheduler_ids(test_context *ctx)
{
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index;
for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {

View File

@@ -250,7 +250,7 @@ static void create_mrsp_sema(
rtems_task_priority prio
)
{
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t index;
rtems_status_code sc;
@@ -1547,7 +1547,7 @@ static void migration_task(rtems_task_argument arg)
{
test_context *ctx = &test_instance;
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t v = 0xdeadbeef;
while (true) {
@@ -1565,7 +1565,7 @@ static void migration_task(rtems_task_argument arg)
static void test_mrsp_load(test_context *ctx)
{
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t index;
puts("test MrsP load");
@@ -1727,7 +1727,7 @@ static void Init(rtems_task_argument arg)
test_context *ctx = &test_instance;
rtems_status_code sc;
rtems_resource_snapshot snapshot;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index;
TEST_BEGIN();

View File

@@ -516,7 +516,7 @@ static void test_init(test_context *ctx)
start_task(ctx, A_2_1, worker, 2, SCHED_A);
start_task(ctx, H_A, helper, 3, SCHED_A);
if (rtems_get_processor_count() >= PART_COUNT) {
if (rtems_scheduler_get_processor_maximum() >= PART_COUNT) {
start_task(ctx, B_4, worker, 4, SCHED_B);
start_task(ctx, B_5_0, worker, 5, SCHED_B);
start_task(ctx, B_5_1, worker, 5, SCHED_B);
@@ -1065,7 +1065,7 @@ static void test(test_context *ctx)
{
test_init(ctx);
if (rtems_get_processor_count() >= PART_COUNT) {
if (rtems_scheduler_get_processor_maximum() >= PART_COUNT) {
test_task_get_priority_not_defined(ctx);
test_simple_inheritance(ctx);
test_dequeue_order_one_scheduler_instance(ctx);

View File

@@ -171,7 +171,7 @@ static void set_up(test_context *ctx)
rtems_status_code sc;
uint32_t i;
ctx->cpu_count = rtems_get_processor_count();
ctx->cpu_count = rtems_scheduler_get_processor_maximum();
ctx->mtx_count = MTX_PER_CPU * ctx->cpu_count;
sc = rtems_semaphore_create(

View File

@@ -239,7 +239,7 @@ static uint32_t find_free_cpu(test_context *ctx)
uint32_t i;
uint32_t n;
n = rtems_get_processor_count();
n = rtems_scheduler_get_processor_maximum();
pthread_mutex_lock(&ctx->mtx);

View File

@@ -111,7 +111,7 @@ void Validate_attr(void )
rtems_test_assert( priority != -1 );
cpus = rtems_get_processor_count();
cpus = rtems_scheduler_get_processor_maximum();
puts(
"Init - Validate pthread_attr_setaffinity_np and "
"pthread_attr_getaffinity_np"

View File

@@ -125,7 +125,7 @@ void Validate_affinity(void )
rtems_test_assert( !sc );
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
/* Fill the remaining cpus with med priority tasks */
puts( "Init - Create Medium priority tasks");

View File

@@ -95,7 +95,7 @@ static void test(test_context *ctx)
int prio_ceiling;
int eno;
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
rtems_test_assert(rtems_scheduler_get_processor() == 0);

View File

@@ -165,7 +165,7 @@ static void *POSIX_Init(void *arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() >= 2) {
if (rtems_scheduler_get_processor_maximum() >= 2) {
test();
}

View File

@@ -84,7 +84,7 @@ static void test(void)
uint32_t cpu_count;
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
size = sizeof(cpu_set_t);

View File

@@ -128,7 +128,7 @@ static void test(void)
cpu_set_t cpuset;
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
if (cpu_count < NUM_CPUS) {
printf("Error: Test requires at least 2 cpus\n");
return;

View File

@@ -43,7 +43,7 @@ static void test(void)
cpu_set_t cpuset;
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
id = rtems_task_self();

View File

@@ -75,7 +75,7 @@ static void test(void)
cpu_set_t cpuset;
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
if (cpu_count < 2) {
printf("Error: Test requires at least 2 cpus\n");
return;

View File

@@ -133,7 +133,7 @@ static void test(void)
rtems_task_priority priority;
/* Get the number of processors that we are using. */
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
if (cpu_count != 4) {
printf("Test requires a minimum of 4 cores\n");
return;

View File

@@ -381,7 +381,7 @@ static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() == CPU_COUNT) {
if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
test();
} else {
puts("warning: wrong processor count to run the test");

View File

@@ -57,7 +57,7 @@ static void affinity_task(rtems_task_argument arg)
uint32_t n;
v = (uint32_t) arg;
n = rtems_get_processor_count();
n = rtems_scheduler_get_processor_maximum();
while (true) {
rtems_status_code sc;

View File

@@ -90,7 +90,7 @@ static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() == CPU_COUNT) {
if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
test();
} else {
puts("warning: wrong processor count to run the test");

View File

@@ -70,7 +70,7 @@ static void task(rtems_task_argument arg)
static bool is_per_cpu_state_ok(void)
{
bool ok = true;
uint32_t n = rtems_get_processor_count();
uint32_t n = rtems_scheduler_get_processor_maximum();
uint32_t i;
for (i = 0; i < n; ++i) {

View File

@@ -134,7 +134,7 @@ static void test_scheduler_add_remove_processors(void)
sc = rtems_scheduler_remove_processor(scheduler_a_id, 0);
rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
if (rtems_get_processor_count() > 1) {
if (rtems_scheduler_get_processor_maximum() > 1) {
rtems_id scheduler_id;
rtems_id scheduler_b_id;
rtems_id task_id;
@@ -224,7 +224,7 @@ static void test(void)
rtems_test_assert(rtems_scheduler_get_processor() == 0);
cpu_count = rtems_get_processor_count();
cpu_count = rtems_scheduler_get_processor_maximum();
main_task_id = rtems_task_self();
CPU_ZERO(&first_cpu);

View File

@@ -46,7 +46,7 @@ static test_context test_instance;
static void migration_task(rtems_task_argument arg)
{
test_context *ctx = (test_context *) arg;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index = rtems_scheduler_get_processor();
while (true) {
@@ -73,7 +73,7 @@ static void migration_task(rtems_task_argument arg)
static void scheduler_task(rtems_task_argument arg)
{
test_context *ctx = (test_context *) arg;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t cpu_index = rtems_scheduler_get_processor();
while (true) {
@@ -107,7 +107,7 @@ static void test(test_context *ctx)
rtems_status_code sc;
uint32_t i;
for (i = 0; i < rtems_get_processor_count(); ++i) {
for (i = 0; i < rtems_scheduler_get_processor_maximum(); ++i) {
sc = rtems_scheduler_ident(i, &ctx->scheduler_ids[i]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}

View File

@@ -224,7 +224,7 @@ static void Init(rtems_task_argument arg)
test_isr_level(ctx);
if (rtems_get_processor_count() >= 2) {
if (rtems_scheduler_get_processor_maximum() >= 2) {
test_two_processors(ctx);
}

View File

@@ -314,7 +314,7 @@ static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() == CPU_COUNT) {
if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
test();
} else {
puts("warning: wrong processor count to run the test");

View File

@@ -225,7 +225,7 @@ static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() >= 2) {
if (rtems_scheduler_get_processor_maximum() >= 2) {
test();
}

View File

@@ -486,7 +486,7 @@ static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() >= CPU_COUNT) {
if (rtems_scheduler_get_processor_maximum() >= CPU_COUNT) {
test_restart();
test_delete();
test_set_life_protection(0);

View File

@@ -570,7 +570,7 @@ static void Init(rtems_task_argument arg)
{
TEST_BEGIN();
if (rtems_get_processor_count() == CPU_COUNT) {
if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
test(&test_instance);
} else {
puts("warning: wrong processor count to run the test");

View File

@@ -75,7 +75,7 @@ static void test(void)
{
rtems_status_code sc;
uint32_t test_time_in_seconds = 10;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
uint32_t i;
for (i = 0; i < cpu_count; ++i) {

View File

@@ -68,7 +68,11 @@ static void test_initial_values(void)
{
uint32_t cpu_index;
for (cpu_index = 0; cpu_index < rtems_get_processor_count(); ++cpu_index) {
for (
cpu_index = 0;
cpu_index < rtems_scheduler_get_processor_maximum();
++cpu_index
) {
Per_CPU_Control *cpu;
unsigned char *c;
unsigned short *s;
@@ -137,7 +141,11 @@ static void set_unique_values(unsigned int v)
{
uint32_t cpu_index;
for (cpu_index = 0; cpu_index < rtems_get_processor_count(); ++cpu_index) {
for (
cpu_index = 0;
cpu_index < rtems_scheduler_get_processor_maximum();
++cpu_index
) {
Per_CPU_Control *cpu;
unsigned char *c;
unsigned short *s;
@@ -196,7 +204,11 @@ static void test_unique_values(unsigned int v)
{
uint32_t cpu_index;
for (cpu_index = 0; cpu_index < rtems_get_processor_count(); ++cpu_index) {
for (
cpu_index = 0;
cpu_index < rtems_scheduler_get_processor_maximum();
++cpu_index
) {
Per_CPU_Control *cpu;
unsigned char *c;
unsigned short *s;

View File

@@ -230,7 +230,7 @@ static void Init(rtems_task_argument arg)
test(false, load);
test(true, load);
for (load = 1; load < rtems_get_processor_count(); ++load) {
for (load = 1; load < rtems_scheduler_get_processor_maximum(); ++load) {
rtems_status_code sc;
rtems_id id;
volatile int *load_data = NULL;