forked from Imagelibrary/rtems
@@ -239,7 +239,7 @@ extern "C" {
|
||||
/* macros */
|
||||
|
||||
#define Shm_Is_master_node() \
|
||||
( SHM_MASTER ==_Configuration_MP_table-> node )
|
||||
( SHM_MASTER == rtems_object_get_local_node() )
|
||||
|
||||
#define Shm_Free_envelope( ecb ) \
|
||||
Shm_Locked_queue_Add( FREE_ENV_CB, (ecb) )
|
||||
|
||||
@@ -41,7 +41,7 @@ Shm_Print_statistics(void)
|
||||
packets_per_second++;
|
||||
|
||||
printk( "\n\nSHMDR STATISTICS (NODE %" PRId32 ")\n",
|
||||
Multiprocessing_configuration.node );
|
||||
rtems_object_get_local_node() );
|
||||
printk( "TICKS SINCE BOOT = %" PRId32 "\n", ticks );
|
||||
printk( "TICKS PER SECOND = %" PRId32 "\n", ticks_per_second );
|
||||
printk( "ISRs=%" PRId32 "\n", Shm_Interrupt_count );
|
||||
|
||||
@@ -46,7 +46,7 @@ rtems_mpci_entry Shm_Initialization( void )
|
||||
uint32_t remaining_memory;
|
||||
uint32_t local_node;
|
||||
|
||||
local_node = _Configuration_MP_table->node;
|
||||
local_node = rtems_object_get_local_node();
|
||||
|
||||
Shm_Get_configuration( local_node, &Shm_Configuration );
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ rtems_mpci_entry Shm_Send_packet(
|
||||
}
|
||||
else {
|
||||
for( nnum = SHM_FIRST_NODE ; nnum <= SHM_MAXIMUM_NODES ; nnum++ )
|
||||
if ( _Configuration_MP_table->node != nnum ) {
|
||||
if ( rtems_object_get_local_node() != nnum ) {
|
||||
struct pkt_cpy *pkt;
|
||||
|
||||
tmp_ecb = Shm_Allocate_envelope();
|
||||
|
||||
@@ -357,6 +357,16 @@ rtems_status_code rtems_object_get_class_information(
|
||||
rtems_object_api_class_information *info
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Get the local MPCI node number.
|
||||
*
|
||||
* @return The local MPCI node number.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE uint16_t rtems_object_get_local_node( void )
|
||||
{
|
||||
return _Objects_Local_node;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -338,6 +338,15 @@ RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Get_index(
|
||||
#define _Objects_Maximum_per_allocation( maximum ) \
|
||||
((Objects_Maximum) ((maximum) & ~OBJECTS_UNLIMITED_OBJECTS))
|
||||
|
||||
/**
|
||||
* @brief The local MPCI node number.
|
||||
*/
|
||||
#if defined(RTEMS_MULTIPROCESSING)
|
||||
extern uint16_t _Objects_Local_node;
|
||||
#else
|
||||
#define _Objects_Local_node ((uint16_t) 1)
|
||||
#endif
|
||||
|
||||
/** @} */
|
||||
/** @} */
|
||||
/** @} */
|
||||
|
||||
@@ -63,15 +63,6 @@ typedef enum {
|
||||
OBJECTS_FAKE_OBJECTS_SCHEDULERS = 1
|
||||
} Objects_Fake_objects_API;
|
||||
|
||||
/**
|
||||
* The following is referenced to the node number of the local node.
|
||||
*/
|
||||
#if defined(RTEMS_MULTIPROCESSING)
|
||||
extern uint16_t _Objects_Local_node;
|
||||
#else
|
||||
#define _Objects_Local_node ((uint16_t)1)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* The following is referenced to the number of nodes in the system.
|
||||
*/
|
||||
|
||||
@@ -51,7 +51,7 @@ int rtems_verror(
|
||||
|
||||
#if defined(RTEMS_MULTIPROCESSING)
|
||||
if (_System_state_Is_multiprocessing)
|
||||
fprintf(stderr, "[%" PRIu32 "] ", _Configuration_MP_table->node);
|
||||
fprintf(stderr, "[%" PRIu16 "] ", rtems_object_get_local_node());
|
||||
#endif
|
||||
|
||||
chars_written += vfprintf(stderr, printf_format, arglist);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
#include <rtems/score/objectimpl.h>
|
||||
#include <rtems.h>
|
||||
|
||||
/*
|
||||
* 4.3.1 Get Process Group IDs, P1003.1b-1993, p. 89
|
||||
@@ -16,5 +16,5 @@ pid_t getpgrp( void )
|
||||
* this will always be the local node;
|
||||
*/
|
||||
|
||||
return _Objects_Local_node;
|
||||
return rtems_object_get_local_node();
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <rtems/score/objectimpl.h>
|
||||
#include <rtems.h>
|
||||
#include <rtems/seterr.h>
|
||||
|
||||
/**
|
||||
@@ -19,7 +19,7 @@
|
||||
*/
|
||||
pid_t getpid( void )
|
||||
{
|
||||
return _Objects_Local_node;
|
||||
return rtems_object_get_local_node();
|
||||
}
|
||||
|
||||
#if defined(RTEMS_NEWLIB) && !defined(HAVE__GETPID_R)
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include <sys/utsname.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include <rtems/score/objectimpl.h>
|
||||
#include <rtems.h>
|
||||
#include <rtems/seterr.h>
|
||||
|
||||
/*
|
||||
@@ -47,7 +47,11 @@ int uname(
|
||||
|
||||
strncpy( name->sysname, "RTEMS", sizeof(name->sysname) );
|
||||
|
||||
snprintf( name->nodename, sizeof(name->nodename), "Node %" PRId16, _Objects_Local_node );
|
||||
snprintf(
|
||||
name->nodename,
|
||||
sizeof(name->nodename),
|
||||
"Node %" PRId16, rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
strncpy( name->release, RTEMS_VERSION, sizeof(name->release) );
|
||||
|
||||
|
||||
@@ -102,8 +102,7 @@ uint32_t is_configured_multiprocessing(void)
|
||||
|
||||
uint32_t get_node(void)
|
||||
{
|
||||
/* XXX HACK -- use public API */
|
||||
return _Objects_Local_node;
|
||||
return rtems_object_get_local_node();
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
|
||||
@@ -36,10 +36,10 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 1 -- NODE %" PRIu32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
if ( Multiprocessing_configuration.node != 1 ) c = 'S';
|
||||
if ( rtems_object_get_local_node() != 1 ) c = 'S';
|
||||
else c = 'M';
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( c, 'A', '1', ' ' );
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 3 -- NODE %" PRIu32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[ Multiprocessing_configuration.node ],
|
||||
Task_name[ rtems_object_get_local_node() ],
|
||||
1,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_NO_PREEMPT,
|
||||
|
||||
@@ -20,7 +20,7 @@ static void Test_Task_Support(
|
||||
rtems_event_set events;
|
||||
rtems_status_code status;
|
||||
|
||||
if ( Multiprocessing_configuration.node == node ) {
|
||||
if ( rtems_object_get_local_node() == node ) {
|
||||
|
||||
for ( ; ; ) {
|
||||
|
||||
@@ -98,7 +98,7 @@ rtems_task Test_task(
|
||||
directive_failed( status, "rtems_task_ident" );
|
||||
|
||||
puts( "Getting TID of remote task" );
|
||||
remote_node = (Multiprocessing_configuration.node == 1) ? 2 : 1;
|
||||
remote_node = (rtems_object_get_local_node() == 1) ? 2 : 1;
|
||||
printf( "Remote task's name is : " );
|
||||
put_name( Task_name[ remote_node ], TRUE );
|
||||
|
||||
@@ -130,7 +130,7 @@ rtems_task Test_task(
|
||||
);
|
||||
directive_failed( status, "rtems_timer_fire_after" );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
status = rtems_task_wake_after( 2 * rtems_clock_get_ticks_per_second() );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 4 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,8 +42,8 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[ Multiprocessing_configuration.node ],
|
||||
Multiprocessing_configuration.node,
|
||||
Task_name[ rtems_object_get_local_node() ],
|
||||
rtems_object_get_local_node(),
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_DEFAULT_MODES,
|
||||
RTEMS_GLOBAL,
|
||||
|
||||
@@ -22,8 +22,6 @@
|
||||
#include "system.h"
|
||||
#include "tmacros.h"
|
||||
|
||||
extern rtems_multiprocessing_table Multiprocessing_configuration;
|
||||
|
||||
rtems_task Test_task(
|
||||
rtems_task_argument argument
|
||||
)
|
||||
@@ -39,7 +37,7 @@ rtems_task Test_task(
|
||||
directive_failed( status, "rtems_task_ident" );
|
||||
|
||||
puts( "Getting TID of remote task" );
|
||||
remote_node = (Multiprocessing_configuration.node == 1) ? 2 : 1;
|
||||
remote_node = (rtems_object_get_local_node() == 1) ? 2 : 1;
|
||||
puts_nocr( "Remote task's name is : " );
|
||||
put_name( Task_name[ remote_node ], TRUE );
|
||||
|
||||
@@ -55,7 +53,7 @@ rtems_task Test_task(
|
||||
|
||||
status = rtems_task_set_priority(
|
||||
remote_tid,
|
||||
Multiprocessing_configuration.node,
|
||||
rtems_object_get_local_node(),
|
||||
&previous_priority
|
||||
);
|
||||
directive_failed( status, "rtems_task_set_priority" );
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 5 -- NODE %" PRIu32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[Multiprocessing_configuration.node],
|
||||
Task_name[rtems_object_get_local_node()],
|
||||
1,
|
||||
RTEMS_MINIMUM_STACK_SIZE * 2,
|
||||
RTEMS_TIMESLICE,
|
||||
|
||||
@@ -50,7 +50,7 @@ rtems_task Test_task(
|
||||
status = rtems_signal_catch( Process_asr, RTEMS_NO_ASR|RTEMS_NO_PREEMPT );
|
||||
directive_failed( status, "rtems_signal_catch" );
|
||||
|
||||
if (Multiprocessing_configuration.node == 1) {
|
||||
if (rtems_object_get_local_node() == 1) {
|
||||
remote_node = 2;
|
||||
remote_signal = RTEMS_SIGNAL_18;
|
||||
expected_signal = RTEMS_SIGNAL_17;
|
||||
@@ -81,7 +81,7 @@ rtems_task Test_task(
|
||||
);
|
||||
directive_failed( status, "rtems_timer_fire_after" );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Sending signal to remote task" );
|
||||
do {
|
||||
status = rtems_signal_send( remote_tid, remote_signal );
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 6 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[Multiprocessing_configuration.node],
|
||||
Task_name[rtems_object_get_local_node()],
|
||||
1,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_DEFAULT_MODES,
|
||||
|
||||
@@ -80,7 +80,7 @@ rtems_task Test_task(
|
||||
|
||||
Stop_Test = false;
|
||||
|
||||
remote_node = (Multiprocessing_configuration.node == 1) ? 2 : 1;
|
||||
remote_node = (rtems_object_get_local_node() == 1) ? 2 : 1;
|
||||
puts_nocr( "Remote task's name is : " );
|
||||
put_name( Task_name[ remote_node ], TRUE );
|
||||
|
||||
@@ -94,7 +94,7 @@ rtems_task Test_task(
|
||||
} while ( status != RTEMS_SUCCESSFUL );
|
||||
directive_failed( status, "rtems_task_ident FAILED!!" );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 )
|
||||
if ( rtems_object_get_local_node() == 1 )
|
||||
puts( "Sending events to remote task" );
|
||||
else
|
||||
puts( "Receiving events from remote task" );
|
||||
@@ -115,7 +115,7 @@ rtems_task Test_task(
|
||||
|
||||
event_for_this_iteration = Event_set_table[ count % 32 ];
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
status = rtems_event_send( remote_tid, event_for_this_iteration );
|
||||
directive_failed( status, "rtems_event_send" );
|
||||
|
||||
@@ -129,7 +129,7 @@ rtems_task Test_task(
|
||||
&event_out
|
||||
);
|
||||
if ( rtems_are_statuses_equal( status, RTEMS_TIMEOUT ) ) {
|
||||
if ( Multiprocessing_configuration.node == 2 )
|
||||
if ( rtems_object_get_local_node() == 2 )
|
||||
puts( "\nCorrect behavior if the other node exitted." );
|
||||
else
|
||||
puts( "\nERROR... node 1 died" );
|
||||
@@ -146,7 +146,7 @@ rtems_task Test_task(
|
||||
|
||||
putchar( '\n' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 2 ) {
|
||||
if ( rtems_object_get_local_node() == 2 ) {
|
||||
/* Flush events */
|
||||
puts( "Flushing RTEMS_EVENT_16" );
|
||||
(void) rtems_event_receive(RTEMS_EVENT_16, RTEMS_NO_WAIT, 0, &event_out);
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 7 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[Multiprocessing_configuration.node],
|
||||
Task_name[rtems_object_get_local_node()],
|
||||
1,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_TIMESLICE,
|
||||
|
||||
@@ -45,7 +45,7 @@ rtems_task Test_task(
|
||||
|
||||
Stop_Test = false;
|
||||
|
||||
remote_node = (Multiprocessing_configuration.node == 1) ? 2 : 1;
|
||||
remote_node = (rtems_object_get_local_node() == 1) ? 2 : 1;
|
||||
puts_nocr( "Remote task's name is : " );
|
||||
put_name( Task_name[ remote_node ], TRUE );
|
||||
|
||||
@@ -58,7 +58,7 @@ rtems_task Test_task(
|
||||
);
|
||||
} while ( !rtems_is_status_successful( status ) );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Sending first event to remote task" );
|
||||
status = rtems_event_send( remote_tid, RTEMS_EVENT_16 );
|
||||
directive_failed( status, "rtems_event_send" );
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 8 -- NODE %" PRIu32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
Semaphore_name[ 1 ] = rtems_build_name( 'S', 'E', 'M', '\0' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Creating Sempahore (Global)" );
|
||||
status = rtems_semaphore_create(
|
||||
Semaphore_name[ 1 ],
|
||||
@@ -56,7 +56,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[ Multiprocessing_configuration.node ],
|
||||
Task_name[ rtems_object_get_local_node() ],
|
||||
1,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_TIMESLICE,
|
||||
|
||||
@@ -39,7 +39,7 @@ rtems_task Test_task(
|
||||
);
|
||||
} while ( !rtems_is_status_successful( status ) );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 2 ) {
|
||||
if ( rtems_object_get_local_node() == 2 ) {
|
||||
status = rtems_semaphore_delete( Semaphore_id[ 1 ] );
|
||||
fatal_directive_status(
|
||||
status,
|
||||
@@ -70,7 +70,7 @@ rtems_task Test_task(
|
||||
rtems_test_exit( 0 );
|
||||
}
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 && ++count == 1000 ) {
|
||||
if ( rtems_object_get_local_node() == 1 && ++count == 1000 ) {
|
||||
status = rtems_task_wake_after( rtems_clock_get_ticks_per_second() );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 9 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
Queue_name[ 1 ] = rtems_build_name( 'M', 'S', 'G', ' ' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Creating Message Queue (Global)" );
|
||||
status = rtems_message_queue_create(
|
||||
Queue_name[ 1 ],
|
||||
@@ -56,7 +56,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Test_task (local)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[Multiprocessing_configuration.node],
|
||||
Task_name[rtems_object_get_local_node()],
|
||||
1,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_TIMESLICE,
|
||||
|
||||
@@ -49,7 +49,7 @@ rtems_task Test_task(
|
||||
);
|
||||
} while ( !rtems_is_status_successful( status ) );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 2 ) {
|
||||
if ( rtems_object_get_local_node() == 2 ) {
|
||||
status = rtems_message_queue_delete( Queue_id[ 1 ] );
|
||||
fatal_directive_status(
|
||||
status,
|
||||
|
||||
@@ -40,7 +40,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 10 -- NODE %" PRIu32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( 'T', 'A', '1', ' ' );
|
||||
@@ -51,7 +51,7 @@ rtems_task Init(
|
||||
|
||||
Semaphore_name[ 1 ] = rtems_build_name( 'S', 'E', 'M', ' ' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Creating Message Queue (Global)" );
|
||||
status = rtems_message_queue_create(
|
||||
Queue_name[ 1 ],
|
||||
|
||||
@@ -40,7 +40,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 11 -- NODE %" PRIu32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -50,7 +50,7 @@ rtems_task Init(
|
||||
|
||||
Semaphore_name[ 1 ] = rtems_build_name( 'S', 'E', 'M', ' ' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Attempting to create Test_task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[ 1 ],
|
||||
|
||||
@@ -42,7 +42,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 12 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -52,7 +52,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Got to initialization task" );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 2 ) {
|
||||
if ( rtems_object_get_local_node() == 2 ) {
|
||||
status = rtems_task_wake_after( rtems_clock_get_ticks_per_second() );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 13 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Task_name[ 1 ] = rtems_build_name( '1', '1', '1', ' ' );
|
||||
@@ -44,7 +44,7 @@ rtems_task Init(
|
||||
|
||||
Semaphore_name[ 1 ] = rtems_build_name( 'S', 'E', 'M', ' ' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Creating Message Queue (Global)" );
|
||||
status = rtems_message_queue_create(
|
||||
Queue_name[ 1 ],
|
||||
@@ -103,7 +103,7 @@ rtems_task Init(
|
||||
status = rtems_task_start( Task_id[ 2 ], Test_task2, 0 );
|
||||
directive_failed( status, "rtems_task_start" );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
status = rtems_task_wake_after( 5 * rtems_clock_get_ticks_per_second() );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ rtems_task Test_task1(
|
||||
);
|
||||
} while ( !rtems_is_status_successful( status ) );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Receiving message ..." );
|
||||
status = rtems_message_queue_receive(
|
||||
Queue_id[ 1 ],
|
||||
|
||||
@@ -40,7 +40,7 @@ rtems_task Test_task2(
|
||||
|
||||
directive_failed( status, "rtems_semaphore_ident" );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
status = rtems_task_wake_after( rtems_clock_get_ticks_per_second() );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
|
||||
|
||||
@@ -23,8 +23,6 @@
|
||||
|
||||
#include "system.h"
|
||||
|
||||
extern rtems_multiprocessing_table Multiprocessing_configuration;
|
||||
|
||||
rtems_task Test_task(
|
||||
rtems_task_argument argument
|
||||
)
|
||||
@@ -35,7 +33,7 @@ rtems_task Test_task(
|
||||
rtems_id remote_tid;
|
||||
rtems_event_set event_out;
|
||||
|
||||
remote_node = ((Multiprocessing_configuration.node == 1) ? 2 : 1);
|
||||
remote_node = ((rtems_object_get_local_node() == 1) ? 2 : 1);
|
||||
|
||||
puts( "About to go to sleep!" );
|
||||
status = rtems_task_wake_after( rtems_clock_get_ticks_per_second() );
|
||||
@@ -59,7 +57,7 @@ rtems_task Test_task(
|
||||
rtems_task_wake_after(2);
|
||||
}
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Sending events to remote task" );
|
||||
while ( Stop_Test == false ) {
|
||||
for ( count=EVENT_TASK_DOT_COUNT; Stop_Test == false && count; count-- ) {
|
||||
|
||||
@@ -51,7 +51,7 @@ rtems_task Init(
|
||||
|
||||
printf(
|
||||
"\n\n*** TEST 14 -- NODE %" PRId32 " ***\n",
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
|
||||
Stop_Test = false;
|
||||
@@ -90,7 +90,7 @@ rtems_task Init(
|
||||
|
||||
Timer_name[ 1 ] = rtems_build_name( 'T', 'M', 'R', ' ' );
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
puts( "Creating Semaphore (Global)" );
|
||||
status = rtems_semaphore_create(
|
||||
Semaphore_name[ 1 ],
|
||||
@@ -125,7 +125,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Event task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Task_name[ Multiprocessing_configuration.node ],
|
||||
Task_name[ rtems_object_get_local_node() ],
|
||||
2,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_TIMESLICE,
|
||||
@@ -140,7 +140,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Semaphore task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Semaphore_task_name[ Multiprocessing_configuration.node ],
|
||||
Semaphore_task_name[ rtems_object_get_local_node() ],
|
||||
2,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_TIMESLICE,
|
||||
@@ -155,7 +155,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Message Queue task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Queue_task_name[ Multiprocessing_configuration.node ],
|
||||
Queue_task_name[ rtems_object_get_local_node() ],
|
||||
2,
|
||||
RTEMS_MINIMUM_STACK_SIZE,
|
||||
RTEMS_TIMESLICE,
|
||||
@@ -171,7 +171,7 @@ rtems_task Init(
|
||||
|
||||
puts( "Creating Partition task (Global)" );
|
||||
status = rtems_task_create(
|
||||
Partition_task_name[ Multiprocessing_configuration.node ],
|
||||
Partition_task_name[ rtems_object_get_local_node() ],
|
||||
2,
|
||||
RTEMS_MINIMUM_STACK_SIZE * 2,
|
||||
RTEMS_TIMESLICE,
|
||||
|
||||
@@ -53,7 +53,7 @@ rtems_task Message_queue_task(
|
||||
rtems_task_wake_after(2);
|
||||
}
|
||||
|
||||
if ( Multiprocessing_configuration.node == 1 ) {
|
||||
if ( rtems_object_get_local_node() == 1 ) {
|
||||
status = rtems_message_queue_send(
|
||||
Queue_id[ 1 ],
|
||||
(long (*)[4])Msg_buffer[ index ],
|
||||
@@ -94,7 +94,7 @@ rtems_task Message_queue_task(
|
||||
directive_failed( status, "rtems_message_queue_send" );
|
||||
|
||||
if (Stop_Test == false)
|
||||
if ( Multiprocessing_configuration.node == 1 && --yield_count == 0 ) {
|
||||
if ( rtems_object_get_local_node() == 1 && --yield_count == 0 ) {
|
||||
status = rtems_task_wake_after( RTEMS_YIELD_PROCESSOR );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
|
||||
|
||||
@@ -22,8 +22,6 @@
|
||||
|
||||
#include "system.h"
|
||||
|
||||
extern rtems_multiprocessing_table Multiprocessing_configuration;
|
||||
|
||||
rtems_task Partition_task(
|
||||
rtems_task_argument argument
|
||||
)
|
||||
@@ -59,7 +57,7 @@ rtems_task Partition_task(
|
||||
directive_failed( status, "rtems_partition_return_buffer" );
|
||||
|
||||
if (Stop_Test == false)
|
||||
if ( Multiprocessing_configuration.node == 1 && --yield_count == 0 ) {
|
||||
if ( rtems_object_get_local_node() == 1 && --yield_count == 0 ) {
|
||||
status = rtems_task_wake_after( 1 );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
yield_count = 100;
|
||||
|
||||
@@ -21,8 +21,6 @@
|
||||
|
||||
#include "system.h"
|
||||
|
||||
extern rtems_multiprocessing_table Multiprocessing_configuration;
|
||||
|
||||
rtems_task Semaphore_task(
|
||||
rtems_task_argument argument
|
||||
)
|
||||
@@ -60,7 +58,7 @@ rtems_task Semaphore_task(
|
||||
directive_failed( status, "rtems_semaphore_release" );
|
||||
|
||||
if ( Stop_Test == false )
|
||||
if ( Multiprocessing_configuration.node == 1 && --yield_count == 0 ) {
|
||||
if ( rtems_object_get_local_node() == 1 && --yield_count == 0 ) {
|
||||
status = rtems_task_wake_after( RTEMS_YIELD_PROCESSOR );
|
||||
directive_failed( status, "rtems_task_wake_after" );
|
||||
|
||||
|
||||
@@ -104,8 +104,6 @@ TEST_EXTERN rtems_name Timer_name[ 4 ]; /* event timer names */
|
||||
|
||||
TEST_EXTERN uint32_t Msg_buffer[ 4 ][ 4 ];
|
||||
|
||||
extern rtems_multiprocessing_table Multiprocessing_configuration;
|
||||
|
||||
TEST_EXTERN volatile bool Stop_Test;
|
||||
TEST_EXTERN rtems_id timer_id;
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ rtems_task Init(
|
||||
status = rtems_task_start(
|
||||
tid,
|
||||
Application_task,
|
||||
Multiprocessing_configuration.node
|
||||
rtems_object_get_local_node()
|
||||
);
|
||||
rtems_test_assert(status == RTEMS_SUCCESSFUL);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user