smp: Add and use _Per_CPU_Get()

Add and use _Per_CPU_Get_by_index() and _Per_CPU_Get_index().  Add
_Per_CPU_Send_interrupt().  This avoids direct access of
_Per_CPU_Information.
This commit is contained in:
Sebastian Huber
2013-07-26 14:10:25 +02:00
parent ff63d2dbf9
commit fe52e7c07c
10 changed files with 81 additions and 49 deletions

View File

@@ -269,7 +269,7 @@ boot_cpu(imps_processor *proc)
);
reset[1] = (uint32_t)secondary_cpu_initialize;
reset[2] = (uint32_t)_Per_CPU_Information[apicid].interrupt_stack_high;
reset[2] = (uint32_t)_Per_CPU_Get_by_index(apicid)->interrupt_stack_high;
/*
* Generic CPU startup sequence starts here.

View File

@@ -87,6 +87,7 @@ static void mmu_config_undo(void)
static void release_core_1(void)
{
const Per_CPU_Control *second_cpu = _Per_CPU_Get_by_index(1);
uboot_spin_table *spin_table = (uboot_spin_table *) SPIN_TABLE;
qoriq_mmu_context mmu_context;
@@ -96,7 +97,7 @@ static void release_core_1(void)
qoriq_mmu_write_to_tlb1(&mmu_context, TLB_BEGIN);
spin_table->pir = 1;
spin_table->r3_lower = (uint32_t) _Per_CPU_Information[1].interrupt_stack_high;
spin_table->r3_lower = (uint32_t) second_cpu->interrupt_stack_high;
spin_table->addr_upper = 0;
rtems_cache_flush_multiple_data_lines(spin_table, sizeof(*spin_table));
ppc_synchronize_data();
@@ -108,13 +109,15 @@ static void release_core_1(void)
void qoriq_secondary_cpu_initialize(void)
{
const Per_CPU_Control *second_cpu = _Per_CPU_Get_by_index(1);
/* Disable decrementer */
PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(BOOKE_TCR, BOOKE_TCR_DIE);
/* Initialize exception handler */
ppc_exc_initialize_with_vector_base(
PPC_INTERRUPT_DISABLE_MASK_DEFAULT,
(uintptr_t) _Per_CPU_Information[1].interrupt_stack_low,
(uintptr_t) second_cpu->interrupt_stack_low,
rtems_configuration_get_interrupt_stack_size(),
bsp_exc_vector_base
);

View File

@@ -86,12 +86,13 @@ uint32_t bsp_smp_initialize( uint32_t configured_cpu_count )
return 1;
for ( cpu=1 ; cpu < found_cpus ; cpu++ ) {
const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
#if defined(RTEMS_DEBUG)
printk( "Waking CPU %d\n", cpu );
#endif
bsp_ap_stack = _Per_CPU_Information[cpu].interrupt_stack_high -
bsp_ap_stack = per_cpu->interrupt_stack_high -
CPU_MINIMUM_STACK_FRAME_SIZE;
bsp_ap_entry = leon3_secondary_cpu_initialize;
@@ -101,9 +102,8 @@ uint32_t bsp_smp_initialize( uint32_t configured_cpu_count )
printk(
"CPU %d is %s\n",
cpu,
_Per_CPU_Information[cpu].state
== PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ?
"online" : "offline"
per_cpu->state == PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ?
"online" : "offline"
);
#endif
}

View File

@@ -47,7 +47,7 @@ void rtems_cpu_usage_reset( void )
processor_count = rtems_smp_get_processor_count();
for ( processor = 0 ; processor < processor_count ; ++processor ) {
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ processor ];
Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );
per_cpu->time_of_last_context_switch = CPU_usage_Uptime_at_last_reset;
}

View File

@@ -233,7 +233,31 @@ typedef struct {
*/
extern Per_CPU_Control _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
#if defined( RTEMS_SMP )
static inline Per_CPU_Control *_Per_CPU_Get( void )
{
return &_Per_CPU_Information[ _SMP_Get_current_processor() ];
}
#else
#define _Per_CPU_Get() ( &_Per_CPU_Information[ 0 ] )
#endif
static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
{
return &_Per_CPU_Information[ index ];
}
static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *per_cpu )
{
return ( uint32_t ) ( per_cpu - &_Per_CPU_Information[ 0 ] );
}
#if defined(RTEMS_SMP)
static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *per_cpu )
{
_CPU_SMP_Send_interrupt( _Per_CPU_Get_index( per_cpu ) );
}
/**
* @brief Set of Pointers to Per CPU Core Information
*
@@ -280,19 +304,19 @@ void _Per_CPU_Wait_for_state(
* Thus when built for non-SMP, there should be no performance penalty.
*/
#define _Thread_Heir \
_Per_CPU_Information[_SMP_Get_current_processor()].heir
_Per_CPU_Get()->heir
#define _Thread_Executing \
_Per_CPU_Information[_SMP_Get_current_processor()].executing
_Per_CPU_Get()->executing
#define _ISR_Nest_level \
_Per_CPU_Information[_SMP_Get_current_processor()].isr_nest_level
_Per_CPU_Get()->isr_nest_level
#define _CPU_Interrupt_stack_low \
_Per_CPU_Information[_SMP_Get_current_processor()].interrupt_stack_low
_Per_CPU_Get()->interrupt_stack_low
#define _CPU_Interrupt_stack_high \
_Per_CPU_Information[_SMP_Get_current_processor()].interrupt_stack_high
_Per_CPU_Get()->interrupt_stack_high
#define _Thread_Dispatch_necessary \
_Per_CPU_Information[_SMP_Get_current_processor()].dispatch_necessary
_Per_CPU_Get()->dispatch_necessary
#define _Thread_Time_of_last_context_switch \
_Per_CPU_Information[_SMP_Get_current_processor()].time_of_last_context_switch
_Per_CPU_Get()->time_of_last_context_switch
#endif /* ASM */

View File

@@ -39,10 +39,10 @@
/*
* Initialize per cpu pointer table
*/
_Per_CPU_Information_p[0] = &_Per_CPU_Information[0];
_Per_CPU_Information_p[0] = _Per_CPU_Get_by_index( 0 );
for ( cpu = 1 ; cpu < max_cpus; ++cpu ) {
Per_CPU_Control *p = &_Per_CPU_Information[cpu];
Per_CPU_Control *p = _Per_CPU_Get_by_index( cpu );
_Per_CPU_Information_p[cpu] = p;
@@ -68,8 +68,10 @@
_SMP_Processor_count = max_cpus;
for ( cpu = 1 ; cpu < max_cpus; ++cpu ) {
const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
_Per_CPU_Wait_for_state(
&_Per_CPU_Information[ cpu ],
per_cpu,
PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING
);
}

View File

@@ -85,8 +85,8 @@ void _Scheduler_default_Tick( void )
uint32_t processor;
for ( processor = 0 ; processor < processor_count ; ++processor ) {
_Scheduler_default_Tick_for_executing(
_Per_CPU_Information[ processor ].executing
);
const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );
_Scheduler_default_Tick_for_executing( per_cpu->executing );
}
}

View File

@@ -30,19 +30,18 @@
void rtems_smp_secondary_cpu_initialize( void )
{
uint32_t self = _SMP_Get_current_processor();
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ self ];
Per_CPU_Control *self_cpu = _Per_CPU_Get();
Thread_Control *heir;
#if defined(RTEMS_DEBUG)
printk( "Made it to %d -- ", self );
printk( "Made it to %d -- ", _Per_CPU_Get_index( self_cpu ) );
#endif
_Per_CPU_Change_state( per_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING );
_Per_CPU_Change_state( self_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING );
_Per_CPU_Wait_for_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
_Per_CPU_Wait_for_state( self_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
_Per_CPU_Change_state( per_cpu, PER_CPU_STATE_UP );
_Per_CPU_Change_state( self_cpu, PER_CPU_STATE_UP );
/*
* The Scheduler will have selected the heir thread for each CPU core.
@@ -50,11 +49,11 @@ void rtems_smp_secondary_cpu_initialize( void )
* force a switch to the designated heir and make it executing on
* THIS core.
*/
heir = per_cpu->heir;
heir = self_cpu->heir;
heir->is_executing = true;
per_cpu->executing->is_executing = false;
per_cpu->executing = heir;
per_cpu->dispatch_necessary = false;
self_cpu->executing->is_executing = false;
self_cpu->executing = heir;
self_cpu->dispatch_necessary = false;
/*
* Threads begin execution in the _Thread_Handler() function. This function
@@ -67,24 +66,28 @@ void rtems_smp_secondary_cpu_initialize( void )
void rtems_smp_process_interrupt( void )
{
uint32_t self = _SMP_Get_current_processor();
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ self ];
Per_CPU_Control *self_cpu = _Per_CPU_Get();
if ( per_cpu->message != 0 ) {
if ( self_cpu->message != 0 ) {
uint32_t message;
ISR_Level level;
_Per_CPU_Lock_acquire( per_cpu, level );
message = per_cpu->message;
per_cpu->message = 0;
_Per_CPU_Lock_release( per_cpu, level );
_Per_CPU_Lock_acquire( self_cpu, level );
message = self_cpu->message;
self_cpu->message = 0;
_Per_CPU_Lock_release( self_cpu, level );
#if defined(RTEMS_DEBUG)
{
void *sp = __builtin_frame_address(0);
if ( !(message & RTEMS_BSP_SMP_SHUTDOWN) ) {
printk( "ISR on CPU %d -- (0x%02x) (0x%p)\n", self, message, sp );
printk(
"ISR on CPU %d -- (0x%02x) (0x%p)\n",
_Per_CPU_Get_index( self_cpu ),
message,
sp
);
if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF )
printk( "signal to self\n" );
if ( message & RTEMS_BSP_SMP_SHUTDOWN )
@@ -99,9 +102,9 @@ void rtems_smp_process_interrupt( void )
_Thread_Dispatch_set_disable_level( 0 );
_Per_CPU_Change_state( per_cpu, PER_CPU_STATE_SHUTDOWN );
_Per_CPU_Change_state( self_cpu, PER_CPU_STATE_SHUTDOWN );
_CPU_Fatal_halt( self );
_CPU_Fatal_halt( _Per_CPU_Get_index( self_cpu ) );
/* does not continue past here */
}
}
@@ -109,7 +112,7 @@ void rtems_smp_process_interrupt( void )
void _SMP_Send_message( uint32_t cpu, uint32_t message )
{
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ cpu ];
Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
ISR_Level level;
#if defined(RTEMS_DEBUG)
@@ -132,7 +135,7 @@ void _SMP_Broadcast_message( uint32_t message )
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
if ( cpu != self ) {
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ cpu ];
Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
ISR_Level level;
_Per_CPU_Lock_acquire( per_cpu, level );
@@ -151,7 +154,7 @@ void _SMP_Request_other_cores_to_perform_first_context_switch( void )
uint32_t cpu;
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ cpu ];
Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
if ( cpu != self ) {
_Per_CPU_Change_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
@@ -194,7 +197,7 @@ void _SMP_Request_other_cores_to_shutdown( void )
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
if ( cpu != self ) {
_Per_CPU_Wait_for_state(
&_Per_CPU_Information[ cpu ],
_Per_CPU_Get_by_index( cpu ),
PER_CPU_STATE_SHUTDOWN
);
}

View File

@@ -73,7 +73,7 @@ void _Thread_Create_idle( void )
uint32_t processor;
for ( processor = 0 ; processor < processor_count ; ++processor ) {
Per_CPU_Control *per_cpu = &_Per_CPU_Information[ processor ];
Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );
_Thread_Create_idle_for_cpu( per_cpu );
}

View File

@@ -71,12 +71,12 @@ static bool is_per_cpu_state_ok(void)
uint32_t i;
for (i = 0; i < n; ++i) {
const Thread_Control *thread = _Per_CPU_Information[i].executing;
const Thread_Control *thread = _Per_CPU_Get_by_index(i)->executing;
uint32_t count = 0;
uint32_t j;
for (j = 0; j < n; ++j) {
const Per_CPU_Control *cpu = &_Per_CPU_Information[j];
const Per_CPU_Control *cpu = _Per_CPU_Get_by_index(j);
const Thread_Control *executing = cpu->executing;
const Thread_Control *heir = cpu->heir;