2011-08-22 Jennifer Averett <Jennifer.Averett@OARcorp.com>

PR 1876
	* score/Makefile.am, score/include/rtems/score/isr.h, score/src/isr.c,
	score/src/smp.c, score/src/smplock.c, score/src/threaddispatch.c,
	score/src/threaddispatchdisablelevel.c: Add smp isr support.
	* score/src/isrsmp.c: New file.
This commit is contained in:
Jennifer Averett
2011-08-22 18:26:08 +00:00
parent 3f1545b851
commit dad36c52b8
9 changed files with 500 additions and 92 deletions

View File

@@ -1,3 +1,11 @@
2011-08-22 Jennifer Averett <Jennifer.Averett@OARcorp.com>
PR 1876
* score/Makefile.am, score/include/rtems/score/isr.h, score/src/isr.c,
score/src/smp.c, score/src/smplock.c, score/src/threaddispatch.c,
score/src/threaddispatchdisablelevel.c: Add smp isr support.
* score/src/isrsmp.c: New file.
2011-08-21 Petr Benes <benesp16@fel.cvut.cz>
PR 1886/cpukit

View File

@@ -133,7 +133,7 @@ libscore_a_SOURCES += src/mpci.c src/objectmp.c src/threadmp.c
endif
if HAS_SMP
libscore_a_SOURCES += src/smp.c src/smplock.c \
libscore_a_SOURCES += src/isrsmp.c src/smp.c src/smplock.c \
src/schedulersimplesmpblock.c src/schedulersimplesmpschedule.c \
src/schedulersimplesmpunblock.c src/schedulersimplesmptick.c
endif

View File

@@ -90,7 +90,7 @@ SCORE_EXTERN ISR_Handler_entry *_ISR_Vector_table;
void _ISR_Handler_initialization ( void );
/**
* @brief Disable Interrupts
* @brief Disable Interrupts on This Core
*
* This routine disables all interrupts so that a critical section
* of code can be executing without being interrupted.
@@ -98,30 +98,30 @@ void _ISR_Handler_initialization ( void );
* @return The argument @a _level will contain the previous interrupt
* mask level.
*/
#define _ISR_Disable( _level ) \
#define _ISR_Disable_on_this_core( _level ) \
do { \
_CPU_ISR_Disable( _level ); \
RTEMS_COMPILER_MEMORY_BARRIER(); \
} while (0)
/**
* @brief Enable Interrupts
* @brief Enable Interrupts on This Core
*
* This routine enables interrupts to the previous interrupt mask
* LEVEL. It is used at the end of a critical section of code to
* enable interrupts so they can be processed again.
*
* @param[in] level contains the interrupt level mask level
* previously returned by @ref _ISR_Disable_on_core.
* previously returned by @ref _ISR_Disable_on_this_core.
*/
#define _ISR_Enable( _level ) \
#define _ISR_Enable_on_this_core( _level ) \
do { \
RTEMS_COMPILER_MEMORY_BARRIER(); \
_CPU_ISR_Enable( _level ); \
} while (0)
/**
* @brief Temporarily Enable Interrupts
* @brief Temporarily Enable Interrupts on This Core
*
* This routine temporarily enables interrupts to the previous
* interrupt mask level and then disables all interrupts so that
@@ -137,15 +137,139 @@ void _ISR_Handler_initialization ( void );
* properly protects itself.
*
* @param[in] level contains the interrupt level mask level
* previously returned by @ref _ISR_Disable_on_core.
* previously returned by @ref _ISR_Disable_on_this_core.
*/
#define _ISR_Flash( _level ) \
#define _ISR_Flash_on_this_core( _level ) \
do { \
RTEMS_COMPILER_MEMORY_BARRIER(); \
_CPU_ISR_Flash( _level ); \
RTEMS_COMPILER_MEMORY_BARRIER(); \
} while (0)
#if defined(RTEMS_SMP)
/**
* @brief Initialize SMP Interrupt Critical Section Support
*
* This method initializes the variables required by the SMP implementation
* of interrupt critical section management.
*/
void _ISR_SMP_Initialize(void);
/**
* @brief Enter Interrupt Critical Section on SMP System
*
* This method is used to enter an interrupt critical section that
* is honored across all cores in an SMP system.
*
* @return This method returns the previous interrupt mask level.
*/
ISR_Level _ISR_SMP_Disable(void);
/**
* @brief Exit Interrupt Critical Section on SMP System
*
* This method is used to exit an interrupt critical section that
* is honored across all cores in an SMP system.
*
* @param[in] level contains the interrupt level mask level
* previously returned by @ref _ISR_SMP_Disable.
*/
void _ISR_SMP_Enable(ISR_Level level);
/**
* @brief Temporarily Exit Interrupt Critical Section on SMP System
*
* This method is used to temporarily exit an interrupt critical section
* that is honored across all cores in an SMP system.
*
* @param[in] level contains the interrupt level mask level
* previously returned by @ref _ISR_SMP_Disable.
*/
void _ISR_SMP_Flash(ISR_Level level);
/**
* @brief Enter SMP interrupt code
*
* This method is used to enter the SMP interrupt section.
*
* @return This method returns the isr level.
*/
int _ISR_SMP_Enter(void);
/**
* @brief Exit SMP interrupt code
*
* This method is used to exit the SMP interrupt.
*
* @return This method returns 0 on a simple return and returns 1 on a
* dispatching return.
*/
int _ISR_SMP_Exit(void);
#endif
/**
* @brief Enter Interrupt Disable Critical Section
*
* This routine enters an interrupt disable critical section. When
* in an SMP configuration, this involves obtaining a spinlock to ensure
* that only one core is inside an interrupt disable critical section.
* When on a single core system, this only involves disabling local
* CPU interrupts.
*
* @return The argument @a _level will contain the previous interrupt
* mask level.
*/
#if defined(RTEMS_SMP)
#define _ISR_Disable( _level ) \
_level = _ISR_SMP_Disable();
#else
#define _ISR_Disable( _level ) \
_ISR_Disable_on_this_core( _level );
#endif
/**
* @brief Exits Interrupt Disable Critical Section
*
* This routine exits an interrupt disable critical section. When
* in an SMP configuration, this involves releasing a spinlock.
* When on a single core system, this only involves disabling local
* CPU interrupts.
*
* @return The argument @a _level will contain the previous interrupt
* mask level.
*/
#if defined(RTEMS_SMP)
#define _ISR_Enable( _level ) \
_ISR_SMP_Enable( _level );
#else
#define _ISR_Enable( _level ) \
_ISR_Enable_on_this_core( _level );
#endif
/**
* @brief Temporarily Exit Interrupt Disable Critical Section
*
* This routine is used to temporarily enable interrupts
* during a long critical section. It is used in long sections of
* critical code when a point is reached at which interrupts can
* be temporarily enabled. Deciding where to flash interrupts
* in a long critical section is often difficult and the point
* must be selected with care to ensure that the critical section
* properly protects itself.
*
* @return The argument @a _level will contain the previous interrupt
* mask level.
*/
#if defined(RTEMS_SMP)
#define _ISR_Flash( _level ) \
_ISR_SMP_Flash( _level );
#else
#define _ISR_Flash( _level ) \
_ISR_Flash_on_this_core( _level );
#endif
/**
* @brief Install Interrupt Handler Vector
*
@@ -201,7 +325,7 @@ void _ISR_Handler_initialization ( void );
* ensure that the necessary thread scheduling operations are
* performed when the outermost interrupt service routine exits.
*
* @note Implemented in assembly language.
* @note Typically implemented in assembly language.
*/
void _ISR_Handler( void );

View File

@@ -80,4 +80,7 @@ void _ISR_Handler_initialization( void )
_CPU_Install_interrupt_stack();
#endif
#if defined(RTEMS_SMP)
_ISR_SMP_Initialize();
#endif
}

104
cpukit/score/src/isrsmp.c Normal file
View File

@@ -0,0 +1,104 @@
/*
* ISR Enable/Disable for SMP Configurations
*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*
* $Id$
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/system.h>
#include <rtems/score/isr.h>
#include <rtems/score/thread.h>
#include <rtems/score/smp.h>
void _ISR_SMP_Initialize(void)
{
}
ISR_Level _ISR_SMP_Disable(void)
{
ISR_Level level;
_ISR_Disable_on_this_core( level );
return level;
}
void _ISR_SMP_Enable(ISR_Level level)
{
_ISR_Enable_on_this_core( level );
}
void _ISR_SMP_Flash(ISR_Level level)
{
ISR_Level ignored;
_ISR_SMP_Enable( level );
ignored = _ISR_SMP_Disable();
}
int _ISR_SMP_Enter(void)
{
uint32_t isr_nest_level;
ISR_Level level;
_ISR_Disable_on_this_core( level );
isr_nest_level = _ISR_Nest_level++;
_Thread_Disable_dispatch();
return isr_nest_level;
}
/*
* Return values:
* 0 - simple return
* 1 - dispatching return
*/
int _ISR_SMP_Exit(void)
{
ISR_Level level;
int retval;
retval = 0;
_ISR_Disable_on_this_core( level );
_ISR_Nest_level--;
if ( _ISR_Nest_level == 0 ) {
if ( _Thread_Dispatch_necessary ) {
if ( _Thread_Dispatch_get_disable_level() == 1 ) {
retval = 1;
}
}
}
/*
* SPARC has special support to avoid some nasty recursive type behaviour.
* When dispatching in a thread and we want to return to it then it needs
* to finish.
*/
#if defined(__sparc__)
if ( _CPU_ISR_Dispatch_disable )
retval = 0;
#endif
_ISR_Enable_on_this_core( level );
_Thread_Dispatch_decrement_disable_level();
if ( retval == 0 )
_SMP_Request_other_cores_to_dispatch();
return retval;
}

View File

@@ -18,10 +18,6 @@
#include <rtems/score/smp.h>
#include <rtems/score/thread.h>
#if defined(RTEMS_SMP)
#define RTEMS_DEBUG
#endif
#if defined(RTEMS_DEBUG)
#include <rtems/bspIo.h>
#endif
@@ -32,6 +28,9 @@
void rtems_smp_run_first_task(int cpu)
{
Thread_Control *heir;
ISR_Level level;
_ISR_Disable_on_this_core( level );
/*
* The Scheduler will have selected the heir thread for each CPU core.
@@ -50,16 +49,14 @@ void rtems_smp_run_first_task(int cpu)
*/
void rtems_smp_secondary_cpu_initialize(void)
{
int cpu;
int cpu;
ISR_Level level;
cpu = bsp_smp_processor_id();
_ISR_Disable_on_this_core( level );
bsp_smp_secondary_cpu_initialize(cpu);
#if defined(RTEMS_DEBUG)
printk( "Made it to %d -- ", cpu );
#endif
/*
* Inform the primary CPU that this secondary CPU is initialized
* and ready to dispatch to the first thread it is supposed to
@@ -67,20 +64,31 @@ void rtems_smp_secondary_cpu_initialize(void)
*/
_Per_CPU_Information[cpu].state = RTEMS_BSP_SMP_CPU_INITIALIZED;
#if defined(RTEMS_DEBUG)
printk( "Made it to %d -- ", cpu );
#endif
/*
* With this secondary core out of reset, we can wait for the
* request to switch to the first task.
*
* XXX When SMP ISR code is complete, do we want interrupts on
* XXX or off at this point?
*/
_ISR_Set_level( 0 );
while(1) {
uint32_t message;
bsp_smp_wait_for(
(volatile unsigned int *)&_Per_CPU_Information[cpu].message,
RTEMS_BSP_SMP_FIRST_TASK,
10000
);
level = _SMP_lock_spinlock_simple_Obtain( &_Per_CPU_Information[cpu].lock );
message = _Per_CPU_Information[cpu].message;
if ( message & RTEMS_BSP_SMP_FIRST_TASK ) {
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
_ISR_Set_level( 0 );
}
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
}
}
@@ -119,33 +127,25 @@ void rtems_smp_process_interrupt(void)
#endif
if ( message & RTEMS_BSP_SMP_FIRST_TASK ) {
/*
* XXX Thread dispatch disable level at this point will have to be
* XXX revisited when Interrupts on SMP is addressed.
*/
_Thread_Dispatch_disable_level--; /* undo ISR code */
_Per_CPU_Information[cpu].isr_nest_level = 0;
_Per_CPU_Information[cpu].message &= ~message;
_Per_CPU_Information[cpu].state = RTEMS_BSP_SMP_CPU_UP;
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
_Thread_Disable_dispatch();
rtems_smp_run_first_task(cpu);
/* does not return */
}
if ( message & RTEMS_BSP_SMP_SHUTDOWN ) {
/*
* XXX Thread dispatch disable level at this point will have to be
* XXX revisited when Interrupts on SMP is addressed.
*/
_Per_CPU_Information[cpu].message &= ~message;
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
_Thread_Dispatch_disable_level--; /* undo ISR code */
_Per_CPU_Information[cpu].isr_nest_level = 0;
_Per_CPU_Information[cpu].state = RTEMS_BSP_SMP_CPU_SHUTDOWN;
_ISR_Disable( level );
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
_Thread_Enable_dispatch(); /* undo ISR code */
_ISR_Disable_on_this_core( level );
while(1)
;
/* does not continue past here */
@@ -155,10 +155,6 @@ void rtems_smp_process_interrupt(void)
#if defined(RTEMS_DEBUG)
printk( "switch needed\n" );
#endif
/*
* XXX Thread dispatch disable level at this point will have to be
* XXX revisited when Interrupts on SMP is addressed.
*/
_Per_CPU_Information[cpu].message &= ~message;
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
}
@@ -174,6 +170,11 @@ void _SMP_Send_message(
{
ISR_Level level;
#if defined(RTEMS_DEBUG)
if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF )
printk( "Send 0x%x to %d\n", message, cpu );
#endif
level = _SMP_lock_spinlock_simple_Obtain( &_Per_CPU_Information[cpu].lock );
_Per_CPU_Information[cpu].message |= message;
_SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level );
@@ -210,15 +211,9 @@ void _SMP_Request_other_cores_to_perform_first_context_switch(void)
{
int cpu;
_Per_CPU_Information[cpu].state = RTEMS_BSP_SMP_CPU_UP;
for (cpu=1 ; cpu < _SMP_Processor_count ; cpu++ ) {
_SMP_Send_message( cpu, RTEMS_BSP_SMP_FIRST_TASK );
while (_Per_CPU_Information[cpu].state != RTEMS_BSP_SMP_CPU_UP ) {
bsp_smp_wait_for(
(volatile unsigned int *)&_Per_CPU_Information[cpu].state,
RTEMS_BSP_SMP_CPU_UP,
10000
);
}
}
}
@@ -243,11 +238,6 @@ void _SMP_Request_other_cores_to_dispatch(void)
if ( !_Per_CPU_Information[i].dispatch_necessary )
continue;
_SMP_Send_message( i, RTEMS_BSP_SMP_CONTEXT_SWITCH_NECESSARY );
bsp_smp_wait_for(
(volatile unsigned int *)&_Per_CPU_Information[i].message,
0,
10000
);
}
}
@@ -256,28 +246,32 @@ void _SMP_Request_other_cores_to_dispatch(void)
*/
void _SMP_Request_other_cores_to_shutdown(void)
{
bool allDown;
int ncpus;
int cpu;
bool allDown;
int ncpus;
int n;
int cpu;
cpu = bsp_smp_processor_id();
ncpus = _SMP_Processor_count;
_SMP_Broadcast_message( RTEMS_BSP_SMP_SHUTDOWN );
allDown = true;
for (cpu=1 ; cpu<ncpus ; cpu++ ) {
for (n=0 ; n<ncpus ; n++ ) {
if ( n == cpu )
continue;
bsp_smp_wait_for(
(unsigned int *)&_Per_CPU_Information[cpu].state,
(unsigned int *)&_Per_CPU_Information[n].state,
RTEMS_BSP_SMP_CPU_SHUTDOWN,
10000
);
if ( _Per_CPU_Information[cpu].state != RTEMS_BSP_SMP_CPU_SHUTDOWN )
if ( _Per_CPU_Information[n].state != RTEMS_BSP_SMP_CPU_SHUTDOWN )
allDown = false;
}
if ( !allDown )
printk( "All CPUs not successfully shutdown -- timed out\n" );
printk( "not all down -- timed out\n" );
#if defined(RTEMS_DEBUG)
else
else
printk( "All CPUs shutdown successfully\n" );
#endif
}

View File

@@ -16,11 +16,62 @@
#include <rtems/system.h>
#include <rtems/score/smplock.h>
#include <rtems/score/smp.h>
#include <rtems/score/isr.h>
#if defined (RTEMS_DEBUG)
/*
* Some debug stuff that is being left in, but disabled. This will keep
* a log of lock/unlock sequences that can be printed out when the
* lockcount appears to have gotten off track.
*/
/* #define SMPLOCK_DEBUG */
#if defined (SMPLOCK_DEBUG)
#include <rtems/score/thread.h>
#include <rtems/bspIo.h>
#include <rtems/score/percpu.h>
#if (0)
#define ENABLE_ONESHOT_DEBUG_LOGGING
#else
#define ENABLE_LOOPED_DEBUG_LOGGING
#endif
#define ENABLE_DEBUG_LOGGING
#endif
/*
* Prototypes and structures used in the debug lock/unlock error log.
*/
#if defined(ENABLE_DEBUG_LOGGING)
typedef struct {
char action;
char lock;
char cpu;
char count;
uint32_t nest_level;
void *ret1;
void *ret2;
void *ret3;
void *ret4;
} debug_spinlog_t;
extern void start(void);
extern void _fini(void);
#define DEBUG_SPINLOG_MAX 1024
debug_spinlog_t DEBUG_SPINLOG[DEBUG_SPINLOG_MAX];
int DEBUG_SPINLOG_INDEX = 0;
static void debug_logit(
char act,
SMP_lock_spinlock_nested_Control *lock
);
static void debug_dump_log(void);
#else
#define debug_logit( _act, _lock )
#define debug_dump_log()
#endif
/*
* SMP spinlock simple methods
*/
void _SMP_lock_spinlock_simple_Initialize(
SMP_lock_spinlock_simple_Control *lock
)
@@ -32,14 +83,16 @@ ISR_Level _SMP_lock_spinlock_simple_Obtain(
SMP_lock_spinlock_simple_Control *lock
)
{
ISR_Level level;
ISR_Level level = 0;
uint32_t value = 1;
uint32_t previous;
/* Note: Disable provides an implicit memory barrier. */
_ISR_Disable( level );
_ISR_Disable_on_this_core( level );
do {
RTEMS_COMPILER_MEMORY_BARRIER();
SMP_CPU_SWAP( lock, value, previous );
RTEMS_COMPILER_MEMORY_BARRIER();
} while (previous == 1);
return level;
@@ -51,15 +104,53 @@ void _SMP_lock_spinlock_simple_Release(
)
{
*lock = 0;
_ISR_Enable( level );
_ISR_Enable_on_this_core( level );
}
/*
* SMP spinlock nested methods.
*/
void _SMP_lock_spinlock_nested_Initialize(
SMP_lock_spinlock_nested_Control *lock
)
{
lock->count = 0;
lock->cpu_id = 0;
lock->cpu_id = -1;
}
void _SMP_lock_spinlock_nested_Release(
SMP_lock_spinlock_nested_Control *lock,
ISR_Level level
)
{
#if defined (RTEMS_DEBUG) || defined(SMPLOCK_DEBUG)
if ( lock->count == 0 ) {
printk(
"\ncpu %d lock %d Releasing spinlock when count is already "
"zero (%p from %p,%p)?!?!\n",
bsp_smp_processor_id(),
lock->cpu_id,
lock,
__builtin_return_address(0),
__builtin_return_address(1)
);
debug_dump_log();
return;
}
#endif
/* assume we actually have it */
if (lock->count == 1) {
lock->cpu_id = -1;
debug_logit( 'U', lock );
RTEMS_COMPILER_MEMORY_BARRIER();
lock->count = 0;
} else {
debug_logit( 'u', lock );
lock->count--;
}
_ISR_Enable_on_this_core( level );
}
ISR_Level _SMP_lock_spinlock_nested_Obtain(
@@ -72,36 +163,121 @@ ISR_Level _SMP_lock_spinlock_nested_Obtain(
int cpu_id;
/* Note: Disable provides an implicit memory barrier. */
_ISR_Disable( level );
_ISR_Disable_on_this_core( level );
cpu_id = bsp_smp_processor_id();
/* Deal with nested calls from one cpu */
if ( (lock->count > 0) && (cpu_id == lock->cpu_id) ) {
lock->count++;
return level;
/*
* Attempt to obtain the lock. If we do not get it immediately, then
* do a single "monitor" iteration. This should allow the loop to back
* off the bus a bit and allow the other core to finish sooner.
*/
while (1) {
RTEMS_COMPILER_MEMORY_BARRIER();
SMP_CPU_SWAP( &lock->count, value, previous );
RTEMS_COMPILER_MEMORY_BARRIER();
if ( previous == 0 ) {
/* was not locked */
break;
}
/* Deal with nested calls from one cpu */
if (cpu_id == lock->cpu_id) {
lock->count++;
debug_logit( 'l', lock );
return level;
}
}
do {
SMP_CPU_SWAP( lock, value, previous );
} while (previous == 1);
lock->count++;
lock->cpu_id = cpu_id;
debug_logit( 'L', lock );
return level;
}
void _SMP_lock_spinlock_nested_Release(
SMP_lock_spinlock_nested_Control *lock,
ISR_Level level
)
{
#if defined(RTEMS_DEBUG)
if ( lock->count == 0 )
printk ("Releasing spinlock when count is already zero?!?!\n");
#endif
lock->count--;
/*
* Debug log for debugging nested lock/unlock problems.
*/
#if defined(ENABLE_DEBUG_LOGGING)
static void debug_logit(
char act,
SMP_lock_spinlock_nested_Control *lock
)
{
debug_debug_spinlog_t *sp;
if ( DEBUG_SPINLOG_INDEX == DEBUG_SPINLOG_MAX )
#if defined (ENABLE_LOOPED_DEBUG_LOGGING)
DEBUG_SPINLOG_INDEX = 0;
#else
return;
#endif
_ISR_Enable( level );
}
sp = &DEBUG_SPINLOG[ DEBUG_SPINLOG_INDEX++ ];
sp->action = act;
#if 0
if ( lock == &_ISR_SMP_Lock )
sp->lock = 'I';
else
#endif
if ( lock == &_Thread_Dispatch_disable_level_lock )
sp->lock = 'D';
sp->cpu = bsp_smp_processor_id() + '0';
sp->count = lock->count;
#if 0
if ( sp->lock == 'I' ) {
if ( _Thread_Dispatch_smp_spin_lock.id == 0 )
printk( "not nested %p from %p %p %p %p\n", sp,
__builtin_return_address(0), __builtin_return_address(1),
__builtin_return_address(2), __builtin_return_address(3)
);
}
#endif
sp->nest_level = _ISR_Nest_level;
sp->ret1 = 0;
sp->ret2 = 0;
sp->ret3 = 0;
sp->ret4 = 0;
sp->ret1 = __builtin_return_address(0);
if ( sp->ret1 >= start && sp->ret1 <= _fini ) {
sp->ret2 = __builtin_return_address(1);
if ( sp->ret2 >= start && sp->ret2 <= _fini ) {
sp->ret3 = __builtin_return_address(2);
if ( sp->ret3 >= start && sp->ret3 <= _fini ) {
sp->ret4 = __builtin_return_address(3);
}
}
}
}
static void debug_dump_log(void)
{
debug_debug_spinlog_t *sp;
int index;
bool done =false;
#if defined (ENABLE_ONESHOT_DEBUG_LOGGING)
index = 0;
#else
if (DEBUG_SPINLOG_INDEX >= DEBUG_SPINLOG_MAX)
index = 0;
else
index = DEBUG_SPINLOG_INDEX;
#endif
do {
sp = &DEBUG_SPINLOG[ index ];
printk("%d) act %c lock %c cpu %c count=%d nest %d (%p, %p, %p, %p)\n",
index, sp->action, sp->lock, sp->cpu, sp->count, sp->nest_level,
sp->ret1, sp->ret2, sp->ret3, sp->ret4
);
index++;
if (index == DEBUG_SPINLOG_INDEX)
break;
if (index >= DEBUG_SPINLOG_MAX)
index = 0;
} while (1);
}
#endif

View File

@@ -169,9 +169,10 @@ void _Thread_Dispatch( void )
}
post_switch:
_Thread_Dispatch_set_disable_level( 0 );
_ISR_Enable( level );
_Thread_Unnest_dispatch();
_API_extensions_Run_postswitch();
}

View File

@@ -66,8 +66,7 @@ uint32_t _Thread_Dispatch_increment_disable_level(void)
_Thread_Dispatch_disable_level++;
level = _Thread_Dispatch_disable_level;
_ISR_Enable(isr_level);
_ISR_Enable_on_this_core(isr_level);
return level;
}
@@ -79,8 +78,7 @@ uint32_t _Thread_Dispatch_decrement_disable_level(void)
/* First we must disable ISRs in order to protect
* accesses to the dispatch disable level.
*/
_ISR_Disable( isr_level );
_ISR_Disable_on_this_core( isr_level );
_Thread_Dispatch_disable_level--;
level = _Thread_Dispatch_disable_level;