forked from Imagelibrary/rtems
score: Per-CPU thread dispatch disable level
Use a per-CPU thread dispatch disable level. So instead of one global thread dispatch disable level we have now one instance per processor. This is a major performance improvement for SMP. On non-SMP configurations this may simplifiy the interrupt entry/exit code. The giant lock is still present, but it is now decoupled from the thread dispatching in _Thread_Dispatch(), _Thread_Handler(), _Thread_Restart_self() and the interrupt entry/exit. Access to the giant lock is now available via _Giant_Acquire() and _Giant_Release(). The giant lock is still implicitly acquired via _Thread_Dispatch_decrement_disable_level(). The giant lock is only acquired for high-level operations in interrupt handlers (e.g. release of a semaphore, sending of an event). As a side-effect this change fixes the lost thread dispatch necessary indication bug in _Thread_Dispatch(). A per-CPU thread dispatch disable level greatly simplifies the SMP support for the interrupt entry/exit code since no spin locks have to be acquired in this area. It is only necessary to get the current processor index and use this to calculate the address of the own per-CPU control. This reduces the interrupt latency considerably. All elements for the interrupt entry/exit code are now part of the Per_CPU_Control structure: thread dispatch disable level, ISR nest level and thread dispatch necessary. Nothing else is required (except CPU port specific stuff like on SPARC).
This commit is contained in:
@@ -87,7 +87,6 @@
|
||||
#endif
|
||||
|
||||
|
||||
EXTERN(_Thread_Dispatch_disable_level,4)
|
||||
.extern _Thread_Dispatch
|
||||
.extern _ISR_Vector_table
|
||||
|
||||
@@ -239,10 +238,10 @@ _chk_int:
|
||||
/*
|
||||
* _Thread_Dispatch_disable_level++;
|
||||
*/
|
||||
lw t1,_Thread_Dispatch_disable_level
|
||||
lw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
NOP
|
||||
add t1,t1,1
|
||||
sw t1,_Thread_Dispatch_disable_level
|
||||
sw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
|
||||
/* DEBUG - Add the following code to disable interrupts and clear
|
||||
* EXL in status register, this will allow memory
|
||||
@@ -302,10 +301,10 @@ _ISR_Handler_cleanup:
|
||||
/*
|
||||
* --_Thread_Dispatch_disable_level;
|
||||
*/
|
||||
lw t1,_Thread_Dispatch_disable_level
|
||||
lw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
NOP
|
||||
add t1,t1,-1
|
||||
sw t1,_Thread_Dispatch_disable_level
|
||||
sw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
/*
|
||||
* if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
|
||||
* goto the label "exit interrupt (simple case)"
|
||||
|
||||
@@ -176,7 +176,7 @@ PUBLIC(_ISR_Handler)
|
||||
* nest and thread dispatch disable levels are unnested.
|
||||
*/
|
||||
|
||||
setx SYM(_Thread_Dispatch_disable_level), %o5, %g3
|
||||
setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3
|
||||
lduw [%g3], %g5
|
||||
setx ISR_NEST_LEVEL, %o5, %g6
|
||||
lduw [%g6], %g4
|
||||
@@ -299,7 +299,7 @@ PUBLIC(_ISR_Handler)
|
||||
* window for _ISR_Handler.
|
||||
*/
|
||||
|
||||
setx SYM(_Thread_Dispatch_disable_level), %o5, %g3
|
||||
setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3
|
||||
lduw [%g3],%g5
|
||||
lduw [%g6],%g4
|
||||
sub %g5, 1, %g5
|
||||
|
||||
@@ -26,15 +26,23 @@
|
||||
|
||||
rtems_status_code rtems_clock_tick( void )
|
||||
{
|
||||
#if defined( RTEMS_SMP )
|
||||
_Thread_Disable_dispatch();
|
||||
#endif
|
||||
|
||||
_TOD_Tickle_ticks();
|
||||
|
||||
_Watchdog_Tickle_ticks();
|
||||
|
||||
_Scheduler_Tick();
|
||||
|
||||
#if defined( RTEMS_SMP )
|
||||
_Thread_Enable_dispatch();
|
||||
#else
|
||||
if ( _Thread_Is_context_switch_necessary() &&
|
||||
_Thread_Dispatch_is_enabled() )
|
||||
_Thread_Dispatch();
|
||||
#endif
|
||||
|
||||
return RTEMS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
@@ -34,7 +34,15 @@ rtems_status_code rtems_task_restart(
|
||||
|
||||
case OBJECTS_LOCAL:
|
||||
if ( _Thread_Restart( the_thread, NULL, argument ) ) {
|
||||
if ( _Thread_Is_executing( the_thread ) ) {
|
||||
_Objects_Put_and_keep_thread_dispatch_disabled(
|
||||
&the_thread->Object
|
||||
);
|
||||
_Thread_Restart_self();
|
||||
} else {
|
||||
_Objects_Put( &the_thread->Object );
|
||||
}
|
||||
|
||||
return RTEMS_SUCCESSFUL;
|
||||
}
|
||||
_Objects_Put( &the_thread->Object );
|
||||
|
||||
@@ -116,7 +116,6 @@ libscore_a_SOURCES += src/mpci.c src/objectmp.c src/threadmp.c
|
||||
endif
|
||||
|
||||
if HAS_SMP
|
||||
libscore_a_SOURCES += src/isrsmp.c
|
||||
libscore_a_SOURCES += src/schedulersimplesmp.c
|
||||
libscore_a_SOURCES += src/smp.c
|
||||
endif
|
||||
|
||||
@@ -313,7 +313,6 @@ SYM(_CPU_Context_restore):
|
||||
|
||||
.globl SYM(_ISR_Handler)
|
||||
SYM(_ISR_Handler):
|
||||
.extern SYM(_Thread_Dispatch_disable_level)
|
||||
/* all interrupts are disabled at this point */
|
||||
/* the following few items are pushed onto the task stack for at
|
||||
most one interrupt; nested interrupts will be using the interrupt
|
||||
@@ -338,8 +337,8 @@ SYM(_ISR_Handler):
|
||||
[--sp] = r0;
|
||||
noStackSwitch:
|
||||
/* disable thread dispatch */
|
||||
p0.h = SYM(_Thread_Dispatch_disable_level);
|
||||
p0.l = SYM(_Thread_Dispatch_disable_level);
|
||||
p0.h = THREAD_DISPATCH_DISABLE_LEVEL;
|
||||
p0.l = THREAD_DISPATCH_DISABLE_LEVEL;
|
||||
r0 = [p0];
|
||||
r0 += 1;
|
||||
[p0] = r0;
|
||||
@@ -459,8 +458,8 @@ noStackRestore:
|
||||
|
||||
/* check this stuff to ensure context_switch_necessary and
|
||||
isr_signals_to_thread_executing are being handled appropriately. */
|
||||
p0.h = SYM(_Thread_Dispatch_disable_level);
|
||||
p0.l = SYM(_Thread_Dispatch_disable_level);
|
||||
p0.h = THREAD_DISPATCH_DISABLE_LEVEL;
|
||||
p0.l = THREAD_DISPATCH_DISABLE_LEVEL;
|
||||
r0 = [p0];
|
||||
r0 += -1;
|
||||
[p0] = r0;
|
||||
|
||||
@@ -127,9 +127,9 @@ nested:
|
||||
mov.l @ISR_NEST_LEVEL,er1
|
||||
inc.l #1,er1
|
||||
mov.l er1,@ISR_NEST_LEVEL
|
||||
mov.l @SYM(_Thread_Dispatch_disable_level),er1
|
||||
mov.l @THREAD_DISPATCH_DISABLE_LEVEL,er1
|
||||
inc.l #1,er1
|
||||
mov.l er1,@SYM(_Thread_Dispatch_disable_level)
|
||||
mov.l er1,@THREAD_DISPATCH_DISABLE_LEVEL
|
||||
|
||||
/* Vector to ISR */
|
||||
|
||||
@@ -145,9 +145,9 @@ nested:
|
||||
mov.l @ISR_NEST_LEVEL,er1
|
||||
dec.l #1,er1
|
||||
mov.l er1,@ISR_NEST_LEVEL
|
||||
mov.l @SYM(_Thread_Dispatch_disable_level),er1
|
||||
mov.l @THREAD_DISPATCH_DISABLE_LEVEL,er1
|
||||
dec.l #1,er1
|
||||
mov.l er1,@SYM(_Thread_Dispatch_disable_level)
|
||||
mov.l er1,@THREAD_DISPATCH_DISABLE_LEVEL
|
||||
bne exit
|
||||
|
||||
mov.b @DISPATCH_NEEDED,er1
|
||||
|
||||
@@ -248,7 +248,7 @@ norst:
|
||||
|
||||
SYM (_ISR_Handler):
|
||||
| disable multitasking
|
||||
addql #1,SYM (_Thread_Dispatch_disable_level)
|
||||
addql #1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
#if ( !defined(__mcoldfire__) )
|
||||
moveml d0-d1/a0-a1,a7@- | save d0-d1,a0-a1
|
||||
#else
|
||||
@@ -298,7 +298,7 @@ SYM (_ISR_Handler):
|
||||
movel (a7),a7 | Restore task stack pointer
|
||||
1:
|
||||
#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */
|
||||
subql #1,SYM (_Thread_Dispatch_disable_level)
|
||||
subql #1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
| unnest multitasking
|
||||
bne.b exit | If dispatch disabled, exit
|
||||
|
||||
|
||||
@@ -582,8 +582,6 @@ FRAME(_CPU_Context_restore,sp,0,ra)
|
||||
|
||||
ENDFRAME(_CPU_Context_restore)
|
||||
|
||||
ASM_EXTERN(_Thread_Dispatch_disable_level,4)
|
||||
|
||||
.extern _Thread_Dispatch
|
||||
.extern _ISR_Vector_table
|
||||
|
||||
@@ -892,10 +890,10 @@ _ISR_Handler_1:
|
||||
/*
|
||||
* _Thread_Dispatch_disable_level++;
|
||||
*/
|
||||
lw t1,_Thread_Dispatch_disable_level
|
||||
lw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
NOP
|
||||
add t1,t1,1
|
||||
sw t1,_Thread_Dispatch_disable_level
|
||||
sw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
|
||||
/*
|
||||
* Call the CPU model or BSP specific routine to decode the
|
||||
@@ -928,10 +926,10 @@ _ISR_Handler_1:
|
||||
/*
|
||||
* --_Thread_Dispatch_disable_level;
|
||||
*/
|
||||
lw t1,_Thread_Dispatch_disable_level
|
||||
lw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
NOP
|
||||
add t1,t1,-1
|
||||
sw t1,_Thread_Dispatch_disable_level
|
||||
sw t1,THREAD_DISPATCH_DISABLE_LEVEL
|
||||
/*
|
||||
* if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
|
||||
* goto the label "exit interrupt (simple case)"
|
||||
|
||||
@@ -96,28 +96,6 @@ SCORE_EXTERN ISR_Handler_entry *_ISR_Vector_table;
|
||||
*/
|
||||
void _ISR_Handler_initialization ( void );
|
||||
|
||||
#if defined( RTEMS_SMP )
|
||||
/**
|
||||
* @brief Enter SMP interrupt code.
|
||||
*
|
||||
* This method is used to enter the SMP interrupt section.
|
||||
*
|
||||
* @retval This method returns the isr level.
|
||||
*/
|
||||
int _ISR_SMP_Enter(void);
|
||||
|
||||
/**
|
||||
* @brief Exit SMP interrupt code.
|
||||
*
|
||||
* This method is used to exit the SMP interrupt.
|
||||
*
|
||||
* @retval This method returns 0 on a simple return and returns 1 on a
|
||||
* dispatching return.
|
||||
*/
|
||||
int _ISR_SMP_Exit(void);
|
||||
|
||||
#endif /* defined( RTEMS_SMP ) */
|
||||
|
||||
/**
|
||||
* @brief Install interrupt handler vector.
|
||||
*
|
||||
|
||||
@@ -879,6 +879,23 @@ RTEMS_INLINE_ROUTINE void _Objects_Put_without_thread_dispatch(
|
||||
_Thread_Unnest_dispatch();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Puts back an object obtained with _Objects_Get().
|
||||
*
|
||||
* The thread dispatch disable level will remain unchanged.
|
||||
*
|
||||
* On SMP configurations the Giant lock will be released.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE void _Objects_Put_and_keep_thread_dispatch_disabled(
|
||||
Objects_Control *the_object
|
||||
)
|
||||
{
|
||||
(void) the_object;
|
||||
#if defined(RTEMS_SMP)
|
||||
_Giant_Release();
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Puts back an object obtained with _Objects_Get_isr_disable().
|
||||
*/
|
||||
|
||||
@@ -166,6 +166,12 @@ typedef struct {
|
||||
*/
|
||||
uint32_t isr_nest_level;
|
||||
|
||||
/**
|
||||
* @brief The thread dispatch critical section nesting counter which is used
|
||||
* to prevent context switches at inopportune moments.
|
||||
*/
|
||||
volatile uint32_t thread_dispatch_disable_level;
|
||||
|
||||
/** This is set to true when this CPU needs to run the dispatcher. */
|
||||
volatile bool dispatch_necessary;
|
||||
|
||||
@@ -267,9 +273,14 @@ extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
|
||||
#if defined( RTEMS_SMP )
|
||||
static inline Per_CPU_Control *_Per_CPU_Get( void )
|
||||
{
|
||||
_Assert_Thread_dispatching_repressed();
|
||||
Per_CPU_Control *per_cpu =
|
||||
&_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu;
|
||||
|
||||
return &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu;
|
||||
_Assert(
|
||||
per_cpu->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
|
||||
);
|
||||
|
||||
return per_cpu;
|
||||
}
|
||||
#else
|
||||
#define _Per_CPU_Get() ( &_Per_CPU_Information[ 0 ].per_cpu )
|
||||
@@ -325,6 +336,8 @@ void _Per_CPU_Wait_for_state(
|
||||
* On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
|
||||
* Thus when built for non-SMP, there should be no performance penalty.
|
||||
*/
|
||||
#define _Thread_Dispatch_disable_level \
|
||||
_Per_CPU_Get()->thread_dispatch_disable_level
|
||||
#define _Thread_Heir \
|
||||
_Per_CPU_Get()->heir
|
||||
#define _Thread_Executing \
|
||||
@@ -373,9 +386,13 @@ void _Per_CPU_Wait_for_state(
|
||||
*/
|
||||
#define PER_CPU_ISR_NEST_LEVEL \
|
||||
PER_CPU_END_STACK
|
||||
#define PER_CPU_DISPATCH_NEEDED \
|
||||
#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
|
||||
PER_CPU_ISR_NEST_LEVEL + 4
|
||||
#define PER_CPU_DISPATCH_NEEDED \
|
||||
PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
|
||||
|
||||
#define THREAD_DISPATCH_DISABLE_LEVEL \
|
||||
(SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
|
||||
#define ISR_NEST_LEVEL \
|
||||
(SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
|
||||
#define DISPATCH_NEEDED \
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#ifndef _RTEMS_SCORE_THREADDISPATCH_H
|
||||
#define _RTEMS_SCORE_THREADDISPATCH_H
|
||||
|
||||
#include <rtems/score/cpu.h>
|
||||
#include <rtems/score/percpu.h>
|
||||
#include <rtems/score/smplock.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -39,13 +39,6 @@ extern "C" {
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* The following declares the dispatch critical section nesting
|
||||
* counter which is used to prevent context switches at inopportune
|
||||
* moments.
|
||||
*/
|
||||
SCORE_EXTERN volatile uint32_t _Thread_Dispatch_disable_level;
|
||||
|
||||
/**
|
||||
* @brief Indicates if the executing thread is inside a thread dispatch
|
||||
* critical section.
|
||||
@@ -56,36 +49,64 @@ SCORE_EXTERN volatile uint32_t _Thread_Dispatch_disable_level;
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void)
|
||||
{
|
||||
return _Thread_Dispatch_disable_level == 0;
|
||||
}
|
||||
bool enabled;
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
typedef struct {
|
||||
SMP_lock_Control lock;
|
||||
uint32_t owner_cpu;
|
||||
uint32_t nest_level;
|
||||
} Thread_Dispatch_disable_level_lock_control;
|
||||
ISR_Level level;
|
||||
|
||||
/**
|
||||
* The following declares the smp spinlock to be used to control
|
||||
* the dispatch critical section accesses across cpus.
|
||||
_ISR_Disable( level );
|
||||
#endif
|
||||
|
||||
enabled = _Thread_Dispatch_disable_level == 0;
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
_ISR_Enable( level );
|
||||
#endif
|
||||
|
||||
return enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @briefs Gets thread dispatch disable level.
|
||||
*
|
||||
* @return The value of the thread dispatch level.
|
||||
*/
|
||||
SCORE_EXTERN Thread_Dispatch_disable_level_lock_control
|
||||
_Thread_Dispatch_disable_level_lock;
|
||||
RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_get_disable_level(void)
|
||||
{
|
||||
return _Thread_Dispatch_disable_level;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initializes the thread dispatching subsystem.
|
||||
/**
|
||||
* @brief Thread dispatch initialization.
|
||||
*
|
||||
* This routine initializes the thread dispatching subsystem.
|
||||
*/
|
||||
void _Thread_Dispatch_initialization(void);
|
||||
RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
|
||||
{
|
||||
_Thread_Dispatch_disable_level = 1;
|
||||
}
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
/**
|
||||
* @brief Acquires the giant lock.
|
||||
*
|
||||
* The giant lock is a recursive SMP lock protecting nearly all operating
|
||||
* system services.
|
||||
*
|
||||
* This lock is implicitly acquired in
|
||||
* _Thread_Dispatch_increment_disable_level().
|
||||
*
|
||||
* Thread dispatching must be disabled before this lock can be acquired.
|
||||
*/
|
||||
void _Giant_Acquire( void );
|
||||
|
||||
/**
|
||||
* @brief Returns value of the the thread dispatch level.
|
||||
* @brief Releases the giant lock.
|
||||
*
|
||||
* This routine returns value of the the thread dispatch level.
|
||||
* This lock is implicitly released in
|
||||
* _Thread_Dispatch_decrement_disable_level().
|
||||
*/
|
||||
uint32_t _Thread_Dispatch_get_disable_level(void);
|
||||
void _Giant_Release( void );
|
||||
|
||||
/**
|
||||
* @brief Sets thread dispatch level to the value passed in.
|
||||
@@ -109,16 +130,6 @@ RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void)
|
||||
*/
|
||||
uint32_t _Thread_Dispatch_decrement_disable_level(void);
|
||||
#else /* RTEMS_SMP */
|
||||
/**
|
||||
* @brief Get thread dispatch disable level.
|
||||
*
|
||||
* This routine returns value of the the thread dispatch level.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_get_disable_level(void)
|
||||
{
|
||||
return _Thread_Dispatch_disable_level;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set thread dispatch disable level.
|
||||
*
|
||||
@@ -160,16 +171,6 @@ RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void)
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Thread dispatch initialization.
|
||||
*
|
||||
* This routine initializes the thread dispatching subsystem.
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
|
||||
{
|
||||
_Thread_Dispatch_set_disable_level( 1 );
|
||||
}
|
||||
#endif /* RTEMS_SMP */
|
||||
|
||||
/**
|
||||
|
||||
@@ -527,6 +527,13 @@ RTEMS_INLINE_ROUTINE void _Thread_Unblock (
|
||||
|
||||
RTEMS_INLINE_ROUTINE void _Thread_Restart_self( void )
|
||||
{
|
||||
#if defined(RTEMS_SMP)
|
||||
ISR_Level level;
|
||||
|
||||
_Per_CPU_ISR_disable_and_acquire( _Per_CPU_Get(), level );
|
||||
( void ) level;
|
||||
#endif
|
||||
|
||||
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
|
||||
if ( _Thread_Executing->fp_context != NULL )
|
||||
_Context_Restore_fp( &_Thread_Executing->fp_context );
|
||||
|
||||
@@ -23,6 +23,15 @@
|
||||
#if defined( RTEMS_DEBUG )
|
||||
void _Assert_Thread_dispatching_repressed( void )
|
||||
{
|
||||
_Assert( !_Thread_Dispatch_is_enabled() || _ISR_Get_level() != 0 );
|
||||
bool dispatch_is_disabled;
|
||||
ISR_Level level;
|
||||
Per_CPU_Control *per_cpu;
|
||||
|
||||
_ISR_Disable( level );
|
||||
per_cpu = _Per_CPU_Get_by_index( _SMP_Get_current_processor() );
|
||||
dispatch_is_disabled = per_cpu->thread_dispatch_disable_level != 0;
|
||||
_ISR_Enable( level );
|
||||
|
||||
_Assert( dispatch_is_disabled || _ISR_Get_level() != 0 );
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @brief Initialize, Disable, Enable, Flash, Enter, Exit ISR Implementation
|
||||
* @ingroup ScoreISR
|
||||
*/
|
||||
|
||||
/*
|
||||
* COPYRIGHT (c) 1989-2011.
|
||||
* On-Line Applications Research Corporation (OAR).
|
||||
*
|
||||
* The license and distribution terms for this file may be
|
||||
* found in the file LICENSE in this distribution or at
|
||||
* http://www.rtems.com/license/LICENSE.
|
||||
*/
|
||||
|
||||
#if HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/system.h>
|
||||
#include <rtems/score/isr.h>
|
||||
#include <rtems/score/thread.h>
|
||||
#include <rtems/score/threaddispatch.h>
|
||||
#include <rtems/score/smp.h>
|
||||
|
||||
int _ISR_SMP_Enter(void)
|
||||
{
|
||||
uint32_t isr_nest_level;
|
||||
ISR_Level level;
|
||||
|
||||
/* FIXME: Where is the corresponding _ISR_Enable()? */
|
||||
_ISR_Disable( level );
|
||||
|
||||
isr_nest_level = _ISR_Nest_level++;
|
||||
|
||||
_Thread_Disable_dispatch();
|
||||
|
||||
return isr_nest_level;
|
||||
}
|
||||
|
||||
int _ISR_SMP_Exit(void)
|
||||
{
|
||||
ISR_Level level;
|
||||
int retval;
|
||||
|
||||
retval = 0;
|
||||
|
||||
_ISR_Disable( level );
|
||||
|
||||
_ISR_Nest_level--;
|
||||
|
||||
if ( _ISR_Nest_level == 0 ) {
|
||||
if ( _Thread_Dispatch_necessary ) {
|
||||
if ( _Thread_Dispatch_get_disable_level() == 1 ) {
|
||||
retval = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* SPARC has special support to avoid some nasty recursive type behaviour.
|
||||
* When dispatching in a thread and we want to return to it then it needs
|
||||
* to finish.
|
||||
*/
|
||||
#if defined(__sparc__)
|
||||
if ( _CPU_ISR_Dispatch_disable )
|
||||
retval = 0;
|
||||
#endif
|
||||
|
||||
_ISR_Enable( level );
|
||||
|
||||
_Thread_Dispatch_decrement_disable_level();
|
||||
|
||||
return retval;
|
||||
}
|
||||
@@ -50,6 +50,12 @@ RTEMS_STATIC_ASSERT(
|
||||
PER_CPU_ISR_NEST_LEVEL
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof(Per_CPU_Control, thread_dispatch_disable_level)
|
||||
== PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL,
|
||||
PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof(Per_CPU_Control, dispatch_necessary) == PER_CPU_DISPATCH_NEEDED,
|
||||
PER_CPU_DISPATCH_NEEDED
|
||||
|
||||
@@ -50,6 +50,8 @@ static void _Scheduler_simple_smp_Allocate_processor(
|
||||
scheduled->is_scheduled = true;
|
||||
victim->is_scheduled = false;
|
||||
|
||||
_Per_CPU_Acquire( cpu_of_scheduled );
|
||||
|
||||
if ( scheduled->is_executing ) {
|
||||
heir = cpu_of_scheduled->heir;
|
||||
cpu_of_scheduled->heir = scheduled;
|
||||
@@ -57,10 +59,18 @@ static void _Scheduler_simple_smp_Allocate_processor(
|
||||
heir = scheduled;
|
||||
}
|
||||
|
||||
_Per_CPU_Release( cpu_of_scheduled );
|
||||
|
||||
if ( heir != victim ) {
|
||||
const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
|
||||
|
||||
heir->cpu = cpu_of_victim;
|
||||
|
||||
/*
|
||||
* FIXME: Here we need atomic store operations with a relaxed memory order.
|
||||
* The _CPU_SMP_Send_interrupt() will ensure that the change can be
|
||||
* observed consistently.
|
||||
*/
|
||||
cpu_of_victim->heir = heir;
|
||||
cpu_of_victim->dispatch_necessary = true;
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
#include <rtems/score/threaddispatch.h>
|
||||
#include <rtems/score/apiext.h>
|
||||
#include <rtems/score/assert.h>
|
||||
#include <rtems/score/isr.h>
|
||||
#include <rtems/score/threadimpl.h>
|
||||
#include <rtems/score/todimpl.h>
|
||||
@@ -28,60 +29,47 @@
|
||||
|
||||
void _Thread_Dispatch( void )
|
||||
{
|
||||
Per_CPU_Control *per_cpu;
|
||||
Thread_Control *executing;
|
||||
Thread_Control *heir;
|
||||
ISR_Level level;
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
/*
|
||||
* WARNING: The SMP sequence has severe defects regarding the real-time
|
||||
* performance.
|
||||
*
|
||||
* Consider the following scenario. We have three tasks L (lowest
|
||||
* priority), M (middle priority), and H (highest priority). Now let a
|
||||
* thread dispatch from M to L happen. An interrupt occurs in
|
||||
* _Thread_Dispatch() here:
|
||||
*
|
||||
* void _Thread_Dispatch( void )
|
||||
* {
|
||||
* [...]
|
||||
*
|
||||
* post_switch:
|
||||
*
|
||||
* _ISR_Enable( level );
|
||||
*
|
||||
* <-- INTERRUPT
|
||||
* <-- AFTER INTERRUPT
|
||||
*
|
||||
* _Thread_Unnest_dispatch();
|
||||
*
|
||||
* _API_extensions_Run_post_switch();
|
||||
* }
|
||||
*
|
||||
* The interrupt event makes task H ready. The interrupt code will see
|
||||
* _Thread_Dispatch_disable_level > 0 and thus doesn't perform a
|
||||
* _Thread_Dispatch(). Now we return to position "AFTER INTERRUPT". This
|
||||
* means task L executes now although task H is ready! Task H will execute
|
||||
* once someone calls _Thread_Dispatch().
|
||||
*/
|
||||
_Thread_Disable_dispatch();
|
||||
#else
|
||||
_Thread_Dispatch_set_disable_level( 1 );
|
||||
#endif
|
||||
#if defined( RTEMS_SMP )
|
||||
_ISR_Disable( level );
|
||||
#endif
|
||||
|
||||
per_cpu = _Per_CPU_Get();
|
||||
_Assert( per_cpu->thread_dispatch_disable_level == 0 );
|
||||
per_cpu->thread_dispatch_disable_level = 1;
|
||||
|
||||
#if defined( RTEMS_SMP )
|
||||
_ISR_Enable( level );
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now determine if we need to perform a dispatch on the current CPU.
|
||||
*/
|
||||
executing = _Thread_Executing;
|
||||
_ISR_Disable( level );
|
||||
while ( _Thread_Dispatch_necessary == true ) {
|
||||
heir = _Thread_Heir;
|
||||
#if defined(RTEMS_SMP)
|
||||
executing = per_cpu->executing;
|
||||
_Per_CPU_ISR_disable_and_acquire( per_cpu, level );
|
||||
#if defined( RTEMS_SMP )
|
||||
/*
|
||||
* On SMP the complete context switch must be atomic with respect to one
|
||||
* processor. The scheduler must obtain the per-CPU lock to check if a
|
||||
* thread is executing and to update the heir. This ensures that a thread
|
||||
* cannot execute on more than one processor at a time. See also
|
||||
* _Thread_Handler() since _Context_switch() may branch to this function.
|
||||
*/
|
||||
if ( per_cpu->dispatch_necessary ) {
|
||||
#else
|
||||
while ( per_cpu->dispatch_necessary ) {
|
||||
#endif
|
||||
heir = per_cpu->heir;
|
||||
per_cpu->dispatch_necessary = false;
|
||||
per_cpu->executing = heir;
|
||||
#if defined( RTEMS_SMP )
|
||||
executing->is_executing = false;
|
||||
heir->is_executing = true;
|
||||
#endif
|
||||
_Thread_Dispatch_necessary = false;
|
||||
_Thread_Executing = heir;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* When the heir and executing are the same, then we are being
|
||||
@@ -102,16 +90,18 @@ void _Thread_Dispatch( void )
|
||||
if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
|
||||
heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
|
||||
|
||||
#if !defined( RTEMS_SMP )
|
||||
_ISR_Enable( level );
|
||||
#endif
|
||||
|
||||
#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
|
||||
_Thread_Update_cpu_time_used(
|
||||
executing,
|
||||
&_Thread_Time_of_last_context_switch
|
||||
&per_cpu->time_of_last_context_switch
|
||||
);
|
||||
#else
|
||||
{
|
||||
_TOD_Get_uptime( &_Thread_Time_of_last_context_switch );
|
||||
_TOD_Get_uptime( &per_cpu->time_of_last_context_switch );
|
||||
heir->cpu_time_used++;
|
||||
}
|
||||
#endif
|
||||
@@ -165,21 +155,24 @@ void _Thread_Dispatch( void )
|
||||
#endif
|
||||
#endif
|
||||
|
||||
executing = _Thread_Executing;
|
||||
/*
|
||||
* We have to obtain these values again after the context switch since the
|
||||
* heir thread may have migrated from another processor. Values from the
|
||||
* stack or non-volatile registers reflect the old execution environment.
|
||||
*/
|
||||
per_cpu = _Per_CPU_Get();
|
||||
executing = per_cpu->executing;
|
||||
|
||||
#if !defined( RTEMS_SMP )
|
||||
_ISR_Disable( level );
|
||||
#endif
|
||||
}
|
||||
|
||||
post_switch:
|
||||
#ifndef RTEMS_SMP
|
||||
_Thread_Dispatch_set_disable_level( 0 );
|
||||
#endif
|
||||
_Assert( per_cpu->thread_dispatch_disable_level == 1 );
|
||||
per_cpu->thread_dispatch_disable_level = 0;
|
||||
|
||||
_ISR_Enable( level );
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
_Thread_Unnest_dispatch();
|
||||
#endif
|
||||
_Per_CPU_Release_and_ISR_enable( per_cpu, level );
|
||||
|
||||
_API_extensions_Run_post_switch( executing );
|
||||
}
|
||||
|
||||
@@ -15,59 +15,69 @@
|
||||
* http://www.rtems.com/license/LICENSE.
|
||||
*/
|
||||
|
||||
#include <rtems/system.h>
|
||||
#include <rtems/score/apiext.h>
|
||||
#include <rtems/score/context.h>
|
||||
#include <rtems/score/interr.h>
|
||||
#include <rtems/score/isr.h>
|
||||
#include <rtems/score/priority.h>
|
||||
#include <rtems/score/threaddispatch.h>
|
||||
#include <rtems/score/assert.h>
|
||||
|
||||
#define NO_OWNER_CPU 0xffffffffU
|
||||
|
||||
void _Thread_Dispatch_initialization( void )
|
||||
{
|
||||
Thread_Dispatch_disable_level_lock_control *level_lock =
|
||||
&_Thread_Dispatch_disable_level_lock;
|
||||
typedef struct {
|
||||
SMP_lock_Control lock;
|
||||
uint32_t owner_cpu;
|
||||
uint32_t nest_level;
|
||||
} Giant_Control;
|
||||
|
||||
_Thread_Dispatch_disable_level = 0;
|
||||
_SMP_lock_Initialize( &level_lock->lock );
|
||||
level_lock->owner_cpu = NO_OWNER_CPU;
|
||||
_Thread_Dispatch_set_disable_level( 1 );
|
||||
static Giant_Control _Giant = {
|
||||
.lock = SMP_LOCK_INITIALIZER,
|
||||
.owner_cpu = NO_OWNER_CPU,
|
||||
.nest_level = 0
|
||||
};
|
||||
|
||||
static void _Giant_Do_acquire( uint32_t self_cpu_index )
|
||||
{
|
||||
Giant_Control *giant = &_Giant;
|
||||
|
||||
if ( giant->owner_cpu != self_cpu_index ) {
|
||||
_SMP_lock_Acquire( &giant->lock );
|
||||
giant->owner_cpu = self_cpu_index;
|
||||
giant->nest_level = 1;
|
||||
} else {
|
||||
++giant->nest_level;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t _Thread_Dispatch_get_disable_level(void)
|
||||
static void _Giant_Do_release( void )
|
||||
{
|
||||
return _Thread_Dispatch_disable_level;
|
||||
Giant_Control *giant = &_Giant;
|
||||
|
||||
--giant->nest_level;
|
||||
if ( giant->nest_level == 0 ) {
|
||||
giant->owner_cpu = NO_OWNER_CPU;
|
||||
_SMP_lock_Release( &giant->lock );
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t _Thread_Dispatch_increment_disable_level( void )
|
||||
{
|
||||
Thread_Dispatch_disable_level_lock_control *level_lock =
|
||||
&_Thread_Dispatch_disable_level_lock;
|
||||
Giant_Control *giant = &_Giant;
|
||||
ISR_Level isr_level;
|
||||
uint32_t self_cpu;
|
||||
uint32_t self_cpu_index;
|
||||
uint32_t disable_level;
|
||||
Per_CPU_Control *self_cpu;
|
||||
|
||||
_ISR_Disable( isr_level );
|
||||
|
||||
/*
|
||||
* We must obtain the processor ID after interrupts are disabled since a
|
||||
* non-optimizing compiler may store the value on the stack and read it back.
|
||||
* We must obtain the processor ID after interrupts are disabled to prevent
|
||||
* thread migration.
|
||||
*/
|
||||
self_cpu = _SMP_Get_current_processor();
|
||||
self_cpu_index = _SMP_Get_current_processor();
|
||||
|
||||
if ( level_lock->owner_cpu != self_cpu ) {
|
||||
_SMP_lock_Acquire( &level_lock->lock );
|
||||
level_lock->owner_cpu = self_cpu;
|
||||
level_lock->nest_level = 1;
|
||||
} else {
|
||||
++level_lock->nest_level;
|
||||
}
|
||||
_Giant_Do_acquire( self_cpu_index );
|
||||
|
||||
disable_level = _Thread_Dispatch_disable_level;
|
||||
self_cpu = _Per_CPU_Get_by_index( self_cpu_index );
|
||||
disable_level = self_cpu->thread_dispatch_disable_level;
|
||||
++disable_level;
|
||||
_Thread_Dispatch_disable_level = disable_level;
|
||||
self_cpu->thread_dispatch_disable_level = disable_level;
|
||||
|
||||
_ISR_Enable( isr_level );
|
||||
|
||||
@@ -76,22 +86,18 @@ uint32_t _Thread_Dispatch_increment_disable_level( void )
|
||||
|
||||
uint32_t _Thread_Dispatch_decrement_disable_level( void )
|
||||
{
|
||||
Thread_Dispatch_disable_level_lock_control *level_lock =
|
||||
&_Thread_Dispatch_disable_level_lock;
|
||||
ISR_Level isr_level;
|
||||
uint32_t disable_level;
|
||||
Per_CPU_Control *self_cpu;
|
||||
|
||||
_ISR_Disable( isr_level );
|
||||
|
||||
disable_level = _Thread_Dispatch_disable_level;
|
||||
self_cpu = _Per_CPU_Get();
|
||||
disable_level = self_cpu->thread_dispatch_disable_level;
|
||||
--disable_level;
|
||||
_Thread_Dispatch_disable_level = disable_level;
|
||||
self_cpu->thread_dispatch_disable_level = disable_level;
|
||||
|
||||
--level_lock->nest_level;
|
||||
if ( level_lock->nest_level == 0 ) {
|
||||
level_lock->owner_cpu = NO_OWNER_CPU;
|
||||
_SMP_lock_Release( &level_lock->lock );
|
||||
}
|
||||
_Giant_Do_release();
|
||||
|
||||
_ISR_Enable( isr_level );
|
||||
|
||||
@@ -110,13 +116,20 @@ uint32_t _Thread_Dispatch_decrement_disable_level( void )
|
||||
|
||||
uint32_t _Thread_Dispatch_set_disable_level(uint32_t value)
|
||||
{
|
||||
ISR_Level isr_level;
|
||||
uint32_t disable_level;
|
||||
|
||||
_ISR_Disable( isr_level );
|
||||
disable_level = _Thread_Dispatch_disable_level;
|
||||
_ISR_Enable( isr_level );
|
||||
|
||||
/*
|
||||
* If we need the dispatch level to go higher
|
||||
* call increment method the desired number of times.
|
||||
*/
|
||||
|
||||
while ( value > _Thread_Dispatch_disable_level ) {
|
||||
_Thread_Dispatch_increment_disable_level();
|
||||
while ( value > disable_level ) {
|
||||
disable_level = _Thread_Dispatch_increment_disable_level();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -124,9 +137,29 @@ uint32_t _Thread_Dispatch_set_disable_level(uint32_t value)
|
||||
* call increment method the desired number of times.
|
||||
*/
|
||||
|
||||
while ( value < _Thread_Dispatch_disable_level ) {
|
||||
_Thread_Dispatch_decrement_disable_level();
|
||||
while ( value < disable_level ) {
|
||||
disable_level = _Thread_Dispatch_decrement_disable_level();
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void _Giant_Acquire( void )
|
||||
{
|
||||
ISR_Level isr_level;
|
||||
|
||||
_ISR_Disable( isr_level );
|
||||
_Assert( _Thread_Dispatch_disable_level != 0 );
|
||||
_Giant_Do_acquire( _SMP_Get_current_processor() );
|
||||
_ISR_Enable( isr_level );
|
||||
}
|
||||
|
||||
void _Giant_Release( void )
|
||||
{
|
||||
ISR_Level isr_level;
|
||||
|
||||
_ISR_Disable( isr_level );
|
||||
_Assert( _Thread_Dispatch_disable_level != 0 );
|
||||
_Giant_Do_release();
|
||||
_ISR_Enable( isr_level );
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#endif
|
||||
|
||||
#include <rtems/score/threadimpl.h>
|
||||
#include <rtems/score/assert.h>
|
||||
#include <rtems/score/interr.h>
|
||||
#include <rtems/score/isrlevel.h>
|
||||
#include <rtems/score/userextimpl.h>
|
||||
@@ -46,12 +47,46 @@
|
||||
#define EXECUTE_GLOBAL_CONSTRUCTORS
|
||||
#endif
|
||||
|
||||
#if defined(EXECUTE_GLOBAL_CONSTRUCTORS)
|
||||
static bool _Thread_Handler_is_constructor_execution_required(
|
||||
Thread_Control *executing
|
||||
)
|
||||
{
|
||||
static bool doneConstructors;
|
||||
bool doCons = false;
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
static SMP_lock_Control constructor_lock = SMP_LOCK_INITIALIZER;
|
||||
|
||||
if ( !doneConstructors ) {
|
||||
_SMP_lock_Acquire( &constructor_lock );
|
||||
#endif
|
||||
|
||||
#if defined(RTEMS_MULTIPROCESSING)
|
||||
doCons = !doneConstructors
|
||||
&& _Objects_Get_API( executing->Object.id ) != OBJECTS_INTERNAL_API;
|
||||
if (doCons)
|
||||
doneConstructors = true;
|
||||
#else
|
||||
(void) executing;
|
||||
doCons = !doneConstructors;
|
||||
doneConstructors = true;
|
||||
#endif
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
_SMP_lock_Release( &constructor_lock );
|
||||
}
|
||||
#endif
|
||||
|
||||
return doCons;
|
||||
}
|
||||
#endif
|
||||
|
||||
void _Thread_Handler( void )
|
||||
{
|
||||
ISR_Level level;
|
||||
Thread_Control *executing;
|
||||
#if defined(EXECUTE_GLOBAL_CONSTRUCTORS)
|
||||
static bool doneConstructors;
|
||||
bool doCons;
|
||||
#endif
|
||||
|
||||
@@ -64,23 +99,17 @@ void _Thread_Handler( void )
|
||||
*/
|
||||
_Context_Initialization_at_thread_begin();
|
||||
|
||||
#if !defined(RTEMS_SMP)
|
||||
/*
|
||||
* have to put level into a register for those cpu's that use
|
||||
* inline asm here
|
||||
*/
|
||||
level = executing->Start.isr_level;
|
||||
_ISR_Set_level(level);
|
||||
_ISR_Set_level( level );
|
||||
#endif
|
||||
|
||||
#if defined(EXECUTE_GLOBAL_CONSTRUCTORS)
|
||||
#if defined(RTEMS_MULTIPROCESSING)
|
||||
doCons = !doneConstructors
|
||||
&& _Objects_Get_API( executing->Object.id ) != OBJECTS_INTERNAL_API;
|
||||
if (doCons)
|
||||
doneConstructors = true;
|
||||
#else
|
||||
doCons = !doneConstructors;
|
||||
doneConstructors = true;
|
||||
#endif
|
||||
doCons = _Thread_Handler_is_constructor_execution_required( executing );
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -109,7 +138,34 @@ void _Thread_Handler( void )
|
||||
/*
|
||||
* At this point, the dispatch disable level BETTER be 1.
|
||||
*/
|
||||
#if defined(RTEMS_SMP)
|
||||
{
|
||||
/*
|
||||
* On SMP we enter _Thread_Handler() with interrupts disabled and
|
||||
* _Thread_Dispatch() obtained the per-CPU lock for us. We have to
|
||||
* release it here and set the desired interrupt level of the thread.
|
||||
*/
|
||||
Per_CPU_Control *per_cpu = _Per_CPU_Get();
|
||||
|
||||
_Assert( per_cpu->thread_dispatch_disable_level == 1 );
|
||||
_Assert( _ISR_Get_level() != 0 );
|
||||
|
||||
per_cpu->thread_dispatch_disable_level = 0;
|
||||
|
||||
_Per_CPU_Release( per_cpu );
|
||||
|
||||
level = executing->Start.isr_level;
|
||||
_ISR_Set_level( level);
|
||||
|
||||
/*
|
||||
* The thread dispatch level changed from one to zero. Make sure we lose
|
||||
* no thread dispatch necessary update.
|
||||
*/
|
||||
_Thread_Dispatch();
|
||||
}
|
||||
#else
|
||||
_Thread_Enable_dispatch();
|
||||
#endif
|
||||
|
||||
#if defined(EXECUTE_GLOBAL_CONSTRUCTORS)
|
||||
/*
|
||||
|
||||
@@ -26,6 +26,7 @@ void _Thread_Load_environment(
|
||||
)
|
||||
{
|
||||
bool is_fp;
|
||||
uint32_t isr_level;
|
||||
|
||||
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
|
||||
if ( the_thread->Start.fp_context ) {
|
||||
@@ -40,11 +41,22 @@ void _Thread_Load_environment(
|
||||
the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
|
||||
the_thread->budget_callout = the_thread->Start.budget_callout;
|
||||
|
||||
#if defined( RTEMS_SMP )
|
||||
/*
|
||||
* On SMP we have to start the threads with interrupts disabled, see also
|
||||
* _Thread_Handler() and _Thread_Dispatch(). In _Thread_Handler() the
|
||||
* _ISR_Set_level() is used to set the desired interrupt state of the thread.
|
||||
*/
|
||||
isr_level = CPU_MODES_INTERRUPT_MASK;
|
||||
#else
|
||||
isr_level = the_thread->Start.isr_level;
|
||||
#endif
|
||||
|
||||
_Context_Initialize(
|
||||
&the_thread->Registers,
|
||||
the_thread->Start.Initial_stack.area,
|
||||
the_thread->Start.Initial_stack.size,
|
||||
the_thread->Start.isr_level,
|
||||
isr_level,
|
||||
_Thread_Handler,
|
||||
is_fp
|
||||
);
|
||||
|
||||
@@ -39,9 +39,6 @@ bool _Thread_Restart(
|
||||
|
||||
_User_extensions_Thread_restart( the_thread );
|
||||
|
||||
if ( _Thread_Is_executing ( the_thread ) )
|
||||
_Thread_Restart_self();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,8 @@ void _Thread_Start_multitasking( Context_Control *context )
|
||||
#if defined(RTEMS_SMP)
|
||||
_Per_CPU_Change_state( self_cpu, PER_CPU_STATE_UP );
|
||||
|
||||
_Per_CPU_Acquire( self_cpu );
|
||||
|
||||
self_cpu->executing->is_executing = false;
|
||||
heir->is_executing = true;
|
||||
#endif
|
||||
@@ -70,10 +72,11 @@ void _Thread_Start_multitasking( Context_Control *context )
|
||||
#if defined(RTEMS_SMP)
|
||||
} else {
|
||||
/*
|
||||
* Threads begin execution in the _Thread_Handler() function. This function
|
||||
* will call _Thread_Enable_dispatch().
|
||||
* Threads begin execution in the _Thread_Handler() function. This
|
||||
* function will set the thread dispatch disable level to zero and calls
|
||||
* _Per_CPU_Release().
|
||||
*/
|
||||
_Thread_Disable_dispatch();
|
||||
self_cpu->thread_dispatch_disable_level = 1;
|
||||
|
||||
_CPU_Context_switch_to_first_task_smp( &heir->Registers );
|
||||
}
|
||||
|
||||
@@ -61,9 +61,7 @@ rtems_task Init(
|
||||
}
|
||||
|
||||
locked_printf( "rtems_clock_tick - so this task has run longer\n" );
|
||||
_Thread_Disable_dispatch();
|
||||
status = rtems_clock_tick();
|
||||
_Thread_Enable_dispatch();
|
||||
directive_failed( status, "clock tick" );
|
||||
|
||||
cpu_num = rtems_smp_get_current_processor();
|
||||
|
||||
@@ -348,7 +348,11 @@ rtems_timer_service_routine test_unblock_task(
|
||||
_Thread_Disable_dispatch();
|
||||
status = rtems_task_resume( blocked_task_id );
|
||||
_Thread_Unnest_dispatch();
|
||||
#if defined( RTEMS_SMP )
|
||||
directive_failed_with_level( status, "rtems_task_resume", 1 );
|
||||
#else
|
||||
directive_failed( status, "rtems_task_resume" );
|
||||
#endif
|
||||
}
|
||||
|
||||
rtems_task Init(
|
||||
@@ -383,9 +387,7 @@ rtems_task Init(
|
||||
/*
|
||||
* Test clock tick from outside ISR
|
||||
*/
|
||||
_Thread_Disable_dispatch();
|
||||
status = rtems_clock_tick();
|
||||
_Thread_Enable_dispatch();
|
||||
directive_failed( status, "rtems_clock_tick" );
|
||||
puts( "clock_tick from task level" );
|
||||
|
||||
@@ -422,9 +424,7 @@ rtems_task Init(
|
||||
|
||||
/* we expect to be preempted from this call */
|
||||
for ( i=0 ; i<100 && blocked_task_status != 3 ; i++ ) {
|
||||
_Thread_Disable_dispatch();
|
||||
status = rtems_clock_tick();
|
||||
_Thread_Enable_dispatch();
|
||||
directive_failed( status, "rtems_clock_tick" );
|
||||
}
|
||||
switch ( blocked_task_status ) {
|
||||
|
||||
@@ -93,9 +93,7 @@ rtems_task Low_task(
|
||||
)
|
||||
{
|
||||
benchmark_timer_initialize();
|
||||
_Thread_Disable_dispatch();
|
||||
(void) rtems_clock_tick();
|
||||
_Thread_Enable_dispatch();
|
||||
end_time = benchmark_timer_read();
|
||||
|
||||
put_time(
|
||||
|
||||
@@ -127,6 +127,24 @@ static void set_thread_executing( Thread_Control *thread )
|
||||
#endif
|
||||
}
|
||||
|
||||
static void thread_disable_dispatch( void )
|
||||
{
|
||||
#if defined( RTEMS_SMP )
|
||||
Per_CPU_Control *self_cpu;
|
||||
rtems_interrupt_level level;
|
||||
|
||||
rtems_interrupt_disable( level );
|
||||
( void ) level;
|
||||
|
||||
self_cpu = _Per_CPU_Get();
|
||||
self_cpu->thread_dispatch_disable_level = 1;
|
||||
|
||||
_Per_CPU_Acquire( self_cpu );
|
||||
#else
|
||||
_Thread_Disable_dispatch();
|
||||
#endif
|
||||
}
|
||||
|
||||
rtems_task null_task(
|
||||
rtems_task_argument argument
|
||||
)
|
||||
@@ -306,7 +324,7 @@ rtems_task Middle_task(
|
||||
|
||||
set_thread_dispatch_necessary( false );
|
||||
|
||||
_Thread_Disable_dispatch();
|
||||
thread_disable_dispatch();
|
||||
|
||||
benchmark_timer_initialize();
|
||||
_Context_Switch(
|
||||
@@ -350,7 +368,7 @@ rtems_task Low_task(
|
||||
|
||||
set_thread_dispatch_necessary( false );
|
||||
|
||||
_Thread_Disable_dispatch();
|
||||
thread_disable_dispatch();
|
||||
|
||||
benchmark_timer_initialize();
|
||||
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
|
||||
@@ -383,7 +401,7 @@ rtems_task Floating_point_task_1(
|
||||
|
||||
set_thread_dispatch_necessary( false );
|
||||
|
||||
_Thread_Disable_dispatch();
|
||||
thread_disable_dispatch();
|
||||
|
||||
benchmark_timer_initialize();
|
||||
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
|
||||
@@ -407,12 +425,6 @@ rtems_task Floating_point_task_1(
|
||||
(Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY])
|
||||
);
|
||||
|
||||
/* do not force context switch */
|
||||
|
||||
set_thread_dispatch_necessary( false );
|
||||
|
||||
_Thread_Disable_dispatch();
|
||||
|
||||
benchmark_timer_initialize();
|
||||
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
|
||||
_Context_Save_fp( &executing->fp_context );
|
||||
@@ -444,12 +456,6 @@ rtems_task Floating_point_task_2(
|
||||
|
||||
FP_LOAD( 1.0 );
|
||||
|
||||
/* do not force context switch */
|
||||
|
||||
set_thread_dispatch_necessary( false );
|
||||
|
||||
_Thread_Disable_dispatch();
|
||||
|
||||
benchmark_timer_initialize();
|
||||
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
|
||||
_Context_Save_fp( &executing->fp_context );
|
||||
|
||||
Reference in New Issue
Block a user