score: Rename rtems_smp_get_number_of_processors()

Rename in rtems_smp_get_processor_count().  Always provide
<rtems/score/smp.h> and <rtems/rtems/smp.h>.  Add
_SMP_Get_processor_count().  This function will be a compile time
constant defined to be one on uni-processor configurations.  This allows
iterations over all processors without overhead on uni-processor
configurations.
This commit is contained in:
Sebastian Huber
2013-06-14 14:26:34 +02:00
parent 5c78940d94
commit edde99bd21
28 changed files with 118 additions and 121 deletions

View File

@@ -130,7 +130,7 @@ void bsp_smp_broadcast_interrupt(void)
int max_cpus;
cpu = bsp_smp_processor_id();
max_cpus = rtems_smp_get_number_of_processors();
max_cpus = rtems_smp_get_processor_count();
for ( dest_cpu=0 ; dest_cpu < max_cpus ; dest_cpu++ ) {
if ( cpu == dest_cpu )

View File

@@ -39,7 +39,7 @@
}
#else
int cpu;
for ( cpu=0 ; cpu < rtems_smp_get_number_of_processors() ; cpu++ ) {
for ( cpu=0 ; cpu < rtems_smp_get_processor_count() ; cpu++ ) {
Per_CPU_Control *p = &_Per_CPU_Information[cpu];
if ( p->executing->Object.id == the_thread->Object.id ) {
*time_of_context_switch = p->time_of_last_context_switch;

View File

@@ -34,6 +34,7 @@ include_rtems_rtems_HEADERS += include/rtems/rtems/region.h
include_rtems_rtems_HEADERS += include/rtems/rtems/rtemsapi.h
include_rtems_rtems_HEADERS += include/rtems/rtems/sem.h
include_rtems_rtems_HEADERS += include/rtems/rtems/signal.h
include_rtems_rtems_HEADERS += include/rtems/rtems/smp.h
include_rtems_rtems_HEADERS += include/rtems/rtems/status.h
include_rtems_rtems_HEADERS += include/rtems/rtems/support.h
include_rtems_rtems_HEADERS += include/rtems/rtems/tasks.h
@@ -54,10 +55,6 @@ include_rtems_rtems_HEADERS += include/rtems/rtems/signalmp.h
include_rtems_rtems_HEADERS += include/rtems/rtems/taskmp.h
endif
if HAS_SMP
include_rtems_rtems_HEADERS += include/rtems/rtems/smp.h
endif
include_rtems_rtems_HEADERS += inline/rtems/rtems/asr.inl
include_rtems_rtems_HEADERS += inline/rtems/rtems/attr.inl
include_rtems_rtems_HEADERS += inline/rtems/rtems/barrier.inl

View File

@@ -73,9 +73,7 @@ extern "C" {
#if defined(RTEMS_MULTIPROCESSING)
#include <rtems/rtems/mp.h>
#endif
#if defined(RTEMS_SMP)
#include <rtems/rtems/smp.h>
#endif
#include <rtems/rtems/support.h>

View File

@@ -1,16 +1,13 @@
/**
* @file rtems/rtems/smp.h
* @file
*
* @defgroup ClassicSMP Classic API SMP Services
* @ingroup ClassicSMP
*
* @ingroup ClassicRTEMS
* @brief SMP information and services.
*
* Most of the SMP interface is hidden from the application
* and exists between the BSP and RTEMS.
* @brief SMP Services API
*/
/* COPYRIGHT (c) 1989-2011.
/*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
@@ -28,25 +25,34 @@ extern "C" {
#include <rtems/score/smp.h>
/**
* @defgroup ClassicSMP Classic API SMP Services
* @defgroup ClassicSMP SMP Services
*
* @ingroup ClassicRTEMS
*
* This encapsulates functionality which is useful for SMP applications.
*
* This API is also available on uni-processor configurations. Here compile
* time constants are used whenever possible.
*
* @{
*/
/**@{*/
/**
* @brief Obtain Number of Cores in System
* @brief Returns the count of processors in the system.
*
* This method returns the number of CPU cores that are currently in
* the system. This will always be less than or equal to the number
* of maximum number of cores which were configured.
* On uni-processor configurations this is a compile time constant and defined
* to be one.
*
* @retval This method returns the number of cores in this system.
* On SMP configurations this returns the value of a global variable set during
* system initialization to indicate the count of processors. The processor
* count depends on the hardware and application configuration. The value will
* always be less than or equal to the maximum count of application configured
* processors.
*
* @return The count of processors in the system.
*/
#define rtems_smp_get_number_of_processors() \
(_SMP_Processor_count)
#define rtems_smp_get_processor_count() \
_SMP_Get_processor_count()
/**
* @brief Obtain Current Core Number
@@ -58,7 +64,7 @@ extern "C" {
#define rtems_smp_get_current_processor() \
bsp_smp_processor_id()
/**@}*/
/** @} */
#ifdef __cplusplus
}

View File

@@ -107,6 +107,10 @@ $(PROJECT_INCLUDE)/rtems/rtems/signal.h: include/rtems/rtems/signal.h $(PROJECT_
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/rtems/signal.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/rtems/signal.h
$(PROJECT_INCLUDE)/rtems/rtems/smp.h: include/rtems/rtems/smp.h $(PROJECT_INCLUDE)/rtems/rtems/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/rtems/smp.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/rtems/smp.h
$(PROJECT_INCLUDE)/rtems/rtems/status.h: include/rtems/rtems/status.h $(PROJECT_INCLUDE)/rtems/rtems/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/rtems/status.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/rtems/status.h
@@ -168,11 +172,6 @@ $(PROJECT_INCLUDE)/rtems/rtems/taskmp.h: include/rtems/rtems/taskmp.h $(PROJECT_
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/rtems/taskmp.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/rtems/taskmp.h
endif
if HAS_SMP
$(PROJECT_INCLUDE)/rtems/rtems/smp.h: include/rtems/rtems/smp.h $(PROJECT_INCLUDE)/rtems/rtems/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/rtems/smp.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/rtems/smp.h
endif
$(PROJECT_INCLUDE)/rtems/rtems/asr.inl: inline/rtems/rtems/asr.inl $(PROJECT_INCLUDE)/rtems/rtems/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/rtems/asr.inl
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/rtems/asr.inl

View File

@@ -40,6 +40,7 @@ include_rtems_score_HEADERS += include/rtems/score/schedulercbs.h
include_rtems_score_HEADERS += include/rtems/score/scheduleredf.h
include_rtems_score_HEADERS += include/rtems/score/schedulerpriority.h
include_rtems_score_HEADERS += include/rtems/score/schedulersimple.h
include_rtems_score_HEADERS += include/rtems/score/smp.h
include_rtems_score_HEADERS += include/rtems/score/smplock.h
include_rtems_score_HEADERS += include/rtems/score/stack.h
include_rtems_score_HEADERS += include/rtems/score/states.h
@@ -62,9 +63,6 @@ include_rtems_score_HEADERS += include/rtems/score/basedefs.h
include_rtems_score_HEADERS += include/rtems/score/atomic.h
include_rtems_score_HEADERS += include/rtems/score/genericcpuatomic.h
include_rtems_score_HEADERS += include/rtems/score/genericatomicops.h
if HAS_SMP
include_rtems_score_HEADERS += include/rtems/score/smp.h
endif
if HAS_PTHREADS
include_rtems_score_HEADERS += include/rtems/score/corespinlock.h

View File

@@ -1,10 +1,9 @@
/**
* @file rtems/score/smp.h
* @file
*
* @brief Interface to the SuperCore SMP Support used Internally to RTEMS
* @ingroup ScoreSMP
*
* This include file defines the interface to the SuperCore
* SMP support that is used internally to RTEMS.
* @brief SuperCore SMP Support API
*/
/*
@@ -19,24 +18,22 @@
#ifndef _RTEMS_SCORE_SMP_H
#define _RTEMS_SCORE_SMP_H
#if defined (RTEMS_SMP)
#include <rtems/score/percpu.h>
/**
* @defgroup SuperCoreSMP SMP Support
*
* @ingroup Score
*
* This defines the interface of the SuperCore support
* code for SMP support.
*/
/**@{*/
#include <rtems/score/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup ScoreSMP SMP Support
*
* @ingroup Score
*
* This defines the interface of the SuperCore SMP support.
*
* @{
*/
/**
* This defines the bit which indicates the interprocessor interrupt
* has been requested so that RTEMS will reschedule on this CPU
@@ -58,14 +55,20 @@ extern "C" {
*/
#define RTEMS_BSP_SMP_SHUTDOWN 0x04
#ifndef ASM
/**
* @brief Number of CPUs in a SMP system.
*
* This variable is set during the SMP initialization sequence to
* indicate the number of CPUs in this system.
*/
SCORE_EXTERN uint32_t _SMP_Processor_count;
#if !defined( ASM )
#if defined( RTEMS_SMP )
SCORE_EXTERN uint32_t _SMP_Processor_count;
static inline uint32_t _SMP_Get_processor_count( void )
{
return _SMP_Processor_count;
}
#else
#define _SMP_Get_processor_count() ( ( uint32_t ) 1 )
#endif
#if defined( RTEMS_SMP )
/**
* @brief Sends a SMP message to a processor.
@@ -114,14 +117,15 @@ void _SMP_Request_other_cores_to_dispatch(void);
*/
void _SMP_Request_other_cores_to_shutdown(void);
#endif
#endif /* defined( RTEMS_SMP ) */
#endif /* !defined( ASM ) */
/** @} */
#ifdef __cplusplus
}
#endif
#endif
/**@}*/
#endif
/* end of include file */

View File

@@ -143,6 +143,10 @@ $(PROJECT_INCLUDE)/rtems/score/schedulersimple.h: include/rtems/score/schedulers
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/schedulersimple.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/schedulersimple.h
$(PROJECT_INCLUDE)/rtems/score/smp.h: include/rtems/score/smp.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/smp.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/smp.h
$(PROJECT_INCLUDE)/rtems/score/smplock.h: include/rtems/score/smplock.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/smplock.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/smplock.h
@@ -231,11 +235,6 @@ $(PROJECT_INCLUDE)/rtems/score/genericatomicops.h: include/rtems/score/genericat
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/genericatomicops.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/genericatomicops.h
if HAS_SMP
$(PROJECT_INCLUDE)/rtems/score/smp.h: include/rtems/score/smp.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/smp.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/smp.h
endif
if HAS_PTHREADS
$(PROJECT_INCLUDE)/rtems/score/corespinlock.h: include/rtems/score/corespinlock.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/corespinlock.h

View File

@@ -126,7 +126,7 @@ void _SMP_Send_message( int cpu, uint32_t message )
void _SMP_Broadcast_message( uint32_t message )
{
int self = bsp_smp_processor_id();
int ncpus = _SMP_Processor_count;
int ncpus = _SMP_Get_processor_count();
int cpu;
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
@@ -146,7 +146,7 @@ void _SMP_Broadcast_message( uint32_t message )
void _SMP_Request_other_cores_to_perform_first_context_switch( void )
{
int self = bsp_smp_processor_id();
int ncpus = _SMP_Processor_count;
int ncpus = _SMP_Get_processor_count();
int cpu;
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
@@ -165,7 +165,7 @@ void _SMP_Request_other_cores_to_dispatch( void )
{
if ( _System_state_Is_up( _System_state_Get() ) ) {
int self = bsp_smp_processor_id();
int ncpus = _SMP_Processor_count;
int ncpus = _SMP_Get_processor_count();
int cpu;
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
@@ -185,7 +185,7 @@ void _SMP_Request_other_cores_to_dispatch( void )
void _SMP_Request_other_cores_to_shutdown( void )
{
int self = bsp_smp_processor_id();
int ncpus = _SMP_Processor_count;
int ncpus = _SMP_Get_processor_count();
int cpu;
_SMP_Broadcast_message( RTEMS_BSP_SMP_SHUTDOWN );

View File

@@ -89,7 +89,7 @@ void _Thread_Create_idle( void )
#if defined(RTEMS_SMP)
int cpu;
for ( cpu=0 ; cpu < _SMP_Processor_count ; cpu++ ) {
for ( cpu=0 ; cpu < _SMP_Get_processor_count() ; cpu++ ) {
_Thread_Create_idle_helper(
_Objects_Build_name( 'I', 'D', 'L', 'E' ),
cpu

View File

@@ -44,12 +44,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMP01 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=0; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i< rtems_smp_get_processor_count() ; i++ ) {
if ( i != cpu_self ) {
ch = '0' + i;
@@ -74,7 +74,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
if ( i != cpu_self && TaskRan[i] == false)
allDone = false;
}

View File

@@ -45,7 +45,7 @@ rtems_task Init(
status = rtems_semaphore_obtain( Semaphore, RTEMS_WAIT, 0);
directive_failed( status,"rtems_semaphore_obtain of SEM1\n");
for ( i=1; i < rtems_smp_get_number_of_processors(); i++ ){
for ( i=1; i < rtems_smp_get_processor_count(); i++ ){
/* Create and start tasks for each CPU */
ch = '0' + i;

View File

@@ -49,7 +49,7 @@ rtems_task Init(
/* Initialize the TaskRan array */
TaskRan[0] = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
@@ -57,7 +57,7 @@ rtems_task Init(
PrintTaskInfo( "Init" );
/* for each remaining cpu create and start a task */
for ( i=1; i < rtems_smp_get_number_of_processors(); i++ ){
for ( i=1; i < rtems_smp_get_processor_count(); i++ ){
ch = '0' + i;
@@ -87,12 +87,12 @@ rtems_task Init(
RTEMS_DEFAULT_ATTRIBUTES,
&id
);
status = rtems_task_start(id,Test_task,rtems_smp_get_number_of_processors());
status = rtems_task_start(id,Test_task,rtems_smp_get_processor_count());
/* Wait on all tasks to run */
while (1) {
TestFinished = true;
for ( i=1; i < (rtems_smp_get_number_of_processors()+1) ; i++ ) {
for ( i=1; i < (rtems_smp_get_processor_count()+1) ; i++ ) {
if (TaskRan[i] == false)
TestFinished = false;
}

View File

@@ -58,7 +58,7 @@ rtems_task Init(
/* Set all Tasks to not ran except for the init task */
TaskRan[0] = true;
for ( i=1; i <= rtems_smp_get_number_of_processors() ; i++ )
for ( i=1; i <= rtems_smp_get_processor_count() ; i++ )
TaskRan[i] = false;
@@ -66,7 +66,7 @@ rtems_task Init(
* For each processor create and start a task alternating
* between RTEMS_PREEMPT and RTEMS_NO_PREEMPT.
*/
for ( i=1; i < rtems_smp_get_number_of_processors() ; i++ ){
for ( i=1; i < rtems_smp_get_processor_count() ; i++ ){
/* Create and start tasks for each CPU */
ch = '0' + i;
@@ -79,7 +79,7 @@ rtems_task Init(
status = rtems_task_create(
rtems_build_name( 'T', 'A', ch, ' ' ),
CONFIGURE_INIT_TASK_PRIORITY +
(2*rtems_smp_get_number_of_processors()) - (2*i),
(2*rtems_smp_get_processor_count()) - (2*i),
RTEMS_MINIMUM_STACK_SIZE,
((i%2) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT),
RTEMS_DEFAULT_ATTRIBUTES,
@@ -106,7 +106,7 @@ rtems_task Init(
* should preempt the longest running PREEMPTABLE
* task and run on that cpu.
*/
ch = '0' + rtems_smp_get_number_of_processors() ;
ch = '0' + rtems_smp_get_processor_count() ;
locked_printf(
"Create a TA%c a %s task\n",
ch,
@@ -128,7 +128,7 @@ rtems_task Init(
status = rtems_task_start(
id,
Test_task,
rtems_smp_get_number_of_processors()
rtems_smp_get_processor_count()
);
/*
@@ -136,7 +136,7 @@ rtems_task Init(
*/
while (1) {
allDone = true;
for ( i=1; i<=rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<=rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -36,7 +36,7 @@ rtems_task Init(
locked_print_initialize();
locked_printf( "\n\n*** TEST SMP05 ***\n" );
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
ch = '1' + i;
status = rtems_task_create(

View File

@@ -54,7 +54,7 @@ rtems_task Init(
status = rtems_clock_tick();
directive_failed( status, "clock tick" );
rtems_test_assert( rtems_smp_get_number_of_processors() > 1 );
rtems_test_assert( rtems_smp_get_processor_count() > 1 );
cpu_num = bsp_smp_processor_id();

View File

@@ -73,7 +73,7 @@ rtems_task Init(
/* Show that the init task is running on this cpu */
PrintTaskInfo( "Init", &time );
for ( i=1; i <= rtems_smp_get_number_of_processors() *3; i++ ) {
for ( i=1; i <= rtems_smp_get_processor_count() *3; i++ ) {
sprintf(ch, "%02" PRId32, i );
status = rtems_task_create(

View File

@@ -42,7 +42,7 @@ rtems_task Init(
for ( killtime=0; killtime<1000000; killtime++ )
;
for ( i=0; i<rtems_smp_get_number_of_processors() -1; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() -1; i++ ) {
ch = '1' + i;
status = rtems_task_create(

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic01 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic02 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic03 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic04 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic05 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic06 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -35,12 +35,12 @@ rtems_task Init(
locked_printf( "\n\n*** SMPatomic07 TEST ***\n" );
/* Initialize the TaskRan array */
for ( i=0; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=0; i<rtems_smp_get_processor_count() ; i++ ) {
TaskRan[i] = false;
}
/* Create and start tasks for each processor */
for ( i=1; i< rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i< rtems_smp_get_processor_count() ; i++ ) {
ch = '0' + i;
status = rtems_task_create(
@@ -60,7 +60,7 @@ rtems_task Init(
/* Wait on the all tasks to run */
while (1) {
allDone = true;
for ( i=1; i<rtems_smp_get_number_of_processors() ; i++ ) {
for ( i=1; i<rtems_smp_get_processor_count() ; i++ ) {
if (TaskRan[i] == false)
allDone = false;
}

View File

@@ -298,7 +298,7 @@ static void run_tests(
static void task(rtems_task_argument arg)
{
global_context *ctx = (global_context *) arg;
int cpu_count = (int) rtems_smp_get_number_of_processors();
int cpu_count = (int) rtems_smp_get_processor_count();
int cpu_self = rtems_smp_get_current_processor();
rtems_status_code sc;
barrier_state bs = BARRIER_STATE_INITIALIZER;
@@ -312,7 +312,7 @@ static void task(rtems_task_argument arg)
static void test(void)
{
global_context *ctx = &context;
int cpu_count = (int) rtems_smp_get_number_of_processors();
int cpu_count = (int) rtems_smp_get_processor_count();
int cpu_self = rtems_smp_get_current_processor();
int cpu;
int test;

View File

@@ -341,11 +341,7 @@ uninitialized =
/*partmp.h*/ 0 +
#endif
#if defined(RTEMS_SMP)
/*percpu.h*/ (_SMP_Processor_count * sizeof(Per_CPU_Control)) +
#else
/*percpu.h*/ (sizeof (Per_CPU_Control) ) +
#endif
/*percpu.h*/ (_SMP_Get_processor_count() * sizeof(Per_CPU_Control)) +
/*ratemon.h*/ (sizeof _Rate_monotonic_Information) +