score: Implement forced thread migration

The current implementation of task migration in RTEMS has some
implications with respect to the interrupt latency. It is crucial to
preserve the system invariant that a task can execute on at most one
processor in the system at a time. This is accomplished with a boolean
indicator in the task context. The processor architecture specific
low-level task context switch code will mark that a task context is no
longer executing and waits that the heir context stopped execution
before it restores the heir context and resumes execution of the heir
task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple
tasks migrate at once. Otherwise some supervising entity is necessary to
prevent life-locks. Such a global supervisor would lead to scalability
problems so this approach is not used. Currently the thread dispatch is
performed with interrupts disabled. So in case the heir task is
currently executing on another processor then this prolongs the time of
disabled interrupts since one processor has to wait for another
processor to make progress.

It is difficult to avoid this issue with the interrupt latency since
interrupts normally store the context of the interrupted task on its
stack. In case a task is marked as not executing we must not use its
task stack to store such an interrupt context. We cannot use the heir
stack before it stopped execution on another processor. So if we enable
interrupts during this transition we have to provide an alternative task
independent stack for this time frame. This issue needs further
investigation.
This commit is contained in:
Sebastian Huber
2014-05-02 10:31:09 +02:00
parent 58444f7795
commit 38b59a6d30
43 changed files with 800 additions and 153 deletions

View File

@@ -163,6 +163,21 @@ done_flushing:
nop nop
nop nop
#if defined(RTEMS_SMP)
! Indicate that this context is no longer executing
stb %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
! Wait for context to stop execution if necessary
1:
ldub [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
cmp %g1, 0
bne 1b
mov 1, %g1
! Indicate that this context is executing
stb %g1, [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
#endif
ld [%o1 + G5_OFFSET], %g5 ! restore the global registers ld [%o1 + G5_OFFSET], %g5 ! restore the global registers
ld [%o1 + G7_OFFSET], %g7 ld [%o1 + G7_OFFSET], %g7
@@ -202,6 +217,11 @@ done_flushing:
SYM(_CPU_Context_restore): SYM(_CPU_Context_restore):
save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
rd %psr, %o2 rd %psr, %o2
#if defined(RTEMS_SMP)
! On SPARC the restore path needs also a valid executing context on SMP
! to update the is executing indicator.
mov %i0, %o0
#endif
ba SYM(_CPU_Context_restore_heir) ba SYM(_CPU_Context_restore_heir)
mov %i0, %o1 ! in the delay slot mov %i0, %o1 ! in the delay slot

View File

@@ -130,6 +130,10 @@ void _CPU_Context_Initialize(
the_ppc_context->msr = msr_value; the_ppc_context->msr = msr_value;
the_ppc_context->lr = (uint32_t) entry_point; the_ppc_context->lr = (uint32_t) entry_point;
#ifdef RTEMS_SMP
the_ppc_context->is_executing = false;
#endif
#ifdef __ALTIVEC__ #ifdef __ALTIVEC__
_CPU_Context_initialize_altivec( the_ppc_context ); _CPU_Context_initialize_altivec( the_ppc_context );
#endif #endif

View File

@@ -326,9 +326,29 @@ PROC (_CPU_Context_switch):
stw r2, PPC_CONTEXT_OFFSET_GPR2(r3) stw r2, PPC_CONTEXT_OFFSET_GPR2(r3)
#ifdef RTEMS_SMP
/* Indicate that this context is no longer executing */
msync
li r5, 0
stb r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
#endif
/* Restore context from r4 */ /* Restore context from r4 */
restore_context: restore_context:
#ifdef RTEMS_SMP
/* Wait for context to stop execution if necessary */
1:
lbz r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r4)
cmpwi r5, 0
bne 1b
/* Indicate that this context is executing */
li r5, 1
stb r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r4)
isync
#endif
#ifdef __ALTIVEC__ #ifdef __ALTIVEC__
mr r14, r4 mr r14, r4
.extern _CPU_Context_switch_altivec .extern _CPU_Context_switch_altivec

View File

@@ -43,7 +43,7 @@
} }
#else #else
/* FIXME: Locking */ /* FIXME: Locking */
if ( the_thread->is_executing ) { if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
*time_of_context_switch = *time_of_context_switch =
_Thread_Get_CPU( the_thread )->time_of_last_context_switch; _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
return true; return true;

View File

@@ -569,8 +569,6 @@ rtems_status_code rtems_task_get_scheduler(
* *
* @retval RTEMS_SUCCESSFUL Successful operation. * @retval RTEMS_SUCCESSFUL Successful operation.
* @retval RTEMS_INVALID_ID Invalid task or scheduler identifier. * @retval RTEMS_INVALID_ID Invalid task or scheduler identifier.
* @retval RTEMS_INCORRECT_STATE The task is in the wrong state to perform a
* scheduler change.
* *
* @see rtems_scheduler_ident(). * @see rtems_scheduler_ident().
*/ */

View File

@@ -30,15 +30,14 @@ rtems_status_code rtems_task_set_scheduler(
if ( _Scheduler_Get_by_id( scheduler_id, &scheduler ) ) { if ( _Scheduler_Get_by_id( scheduler_id, &scheduler ) ) {
Thread_Control *the_thread; Thread_Control *the_thread;
Objects_Locations location; Objects_Locations location;
bool ok;
the_thread = _Thread_Get( id, &location ); the_thread = _Thread_Get( id, &location );
switch ( location ) { switch ( location ) {
case OBJECTS_LOCAL: case OBJECTS_LOCAL:
ok = _Scheduler_Set( scheduler, the_thread ); _Scheduler_Set( scheduler, the_thread );
_Objects_Put( &the_thread->Object ); _Objects_Put( &the_thread->Object );
sc = ok ? RTEMS_SUCCESSFUL : RTEMS_INCORRECT_STATE; sc = RTEMS_SUCCESSFUL;
break; break;
#if defined(RTEMS_MULTIPROCESSING) #if defined(RTEMS_MULTIPROCESSING)
case OBJECTS_REMOTE: case OBJECTS_REMOTE:

View File

@@ -50,6 +50,14 @@
); );
#endif #endif
#ifdef RTEMS_SMP
RTEMS_STATIC_ASSERT(
offsetof( Context_Control, is_executing )
== ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET,
ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
);
#endif
RTEMS_STATIC_ASSERT( RTEMS_STATIC_ASSERT(
sizeof( CPU_Exception_frame ) == ARM_EXCEPTION_FRAME_SIZE, sizeof( CPU_Exception_frame ) == ARM_EXCEPTION_FRAME_SIZE,
ARM_EXCEPTION_FRAME_SIZE ARM_EXCEPTION_FRAME_SIZE
@@ -93,6 +101,10 @@ void _CPU_Context_Initialize(
the_context->thread_id = (uint32_t) tls_area; the_context->thread_id = (uint32_t) tls_area;
#endif #endif
#ifdef RTEMS_SMP
the_context->is_executing = false;
#endif
if ( tls_area != NULL ) { if ( tls_area != NULL ) {
_TLS_TCB_at_area_begin_initialize( tls_area ); _TLS_TCB_at_area_begin_initialize( tls_area );
} }

View File

@@ -67,12 +67,32 @@ DEFINE_FUNCTION_ARM(_CPU_Context_switch)
str r3, [r0, #ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET] str r3, [r0, #ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET]
#endif #endif
#ifdef RTEMS_SMP
/* Indicate that this context is no longer executing */
dmb
mov r3, #0
strb r3, [r0, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
#endif
/* Start restoring context */ /* Start restoring context */
_restore: _restore:
#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE #ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
clrex clrex
#endif #endif
#ifdef RTEMS_SMP
/* Wait for context to stop execution if necessary */
1:
ldrb r3, [r1, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
cmp r3, #0
bne 1b
/* Indicate that this context is executing */
dmb
mov r3, #1
strb r3, [r1, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
#endif
#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER #ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
ldr r3, [r1, #ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET] ldr r3, [r1, #ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET]
mcr p15, 0, r3, c13, c0, 3 mcr p15, 0, r3, c13, c0, 3

View File

@@ -216,6 +216,14 @@
#define ARM_CONTEXT_CONTROL_D8_OFFSET 48 #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
#endif #endif
#ifdef RTEMS_SMP
#ifdef ARM_MULTILIB_VFP_D32
#define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
#else
#define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
#endif
#endif
#define ARM_EXCEPTION_FRAME_SIZE 76 #define ARM_EXCEPTION_FRAME_SIZE 76
#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52 #define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
@@ -280,6 +288,9 @@ typedef struct {
uint64_t register_d14; uint64_t register_d14;
uint64_t register_d15; uint64_t register_d15;
#endif #endif
#ifdef RTEMS_SMP
volatile bool is_executing;
#endif
} Context_Control; } Context_Control;
typedef struct { typedef struct {
@@ -410,6 +421,11 @@ void _CPU_Context_Initialize(
#define _CPU_Context_Get_SP( _context ) \ #define _CPU_Context_Get_SP( _context ) \
(_context)->register_sp (_context)->register_sp
#ifdef RTEMS_SMP
#define _CPU_Context_Get_is_executing( _context ) \
(_context)->is_executing
#endif
#define _CPU_Context_Restart_self( _the_context ) \ #define _CPU_Context_Restart_self( _the_context ) \
_CPU_Context_restore( (_the_context) ); _CPU_Context_restore( (_the_context) );

View File

@@ -26,6 +26,24 @@
#include <rtems/bspIo.h> #include <rtems/bspIo.h>
#include <rtems/score/thread.h> #include <rtems/score/thread.h>
#define I386_ASSERT_OFFSET(field, off) \
RTEMS_STATIC_ASSERT( \
offsetof(Context_Control, field) \
== I386_CONTEXT_CONTROL_ ## off ## _OFFSET, \
Context_Control_ ## field \
)
I386_ASSERT_OFFSET(eflags, EFLAGS);
I386_ASSERT_OFFSET(esp, ESP);
I386_ASSERT_OFFSET(ebp, EBP);
I386_ASSERT_OFFSET(ebx, EBX);
I386_ASSERT_OFFSET(esi, ESI);
I386_ASSERT_OFFSET(edi, EDI);
#ifdef RTEMS_SMP
I386_ASSERT_OFFSET(is_executing, IS_EXECUTING);
#endif
void _CPU_Initialize(void) void _CPU_Initialize(void)
{ {
#if CPU_HARDWARE_FP #if CPU_HARDWARE_FP

View File

@@ -26,13 +26,12 @@
* Format of i386 Register structure * Format of i386 Register structure
*/ */
.set REG_EFLAGS, 0 .set REG_EFLAGS, I386_CONTEXT_CONTROL_EFLAGS_OFFSET
.set REG_ESP, REG_EFLAGS + 4 .set REG_ESP, I386_CONTEXT_CONTROL_ESP_OFFSET
.set REG_EBP, REG_ESP + 4 .set REG_EBP, I386_CONTEXT_CONTROL_EBP_OFFSET
.set REG_EBX, REG_EBP + 4 .set REG_EBX, I386_CONTEXT_CONTROL_EBX_OFFSET
.set REG_ESI, REG_EBX + 4 .set REG_ESI, I386_CONTEXT_CONTROL_ESI_OFFSET
.set REG_EDI, REG_ESI + 4 .set REG_EDI, I386_CONTEXT_CONTROL_EDI_OFFSET
.set SIZE_REGS, REG_EDI + 4
BEGIN_CODE BEGIN_CODE
@@ -58,9 +57,25 @@ SYM (_CPU_Context_switch):
movl esi,REG_ESI(eax) /* save source register */ movl esi,REG_ESI(eax) /* save source register */
movl edi,REG_EDI(eax) /* save destination register */ movl edi,REG_EDI(eax) /* save destination register */
#ifdef RTEMS_SMP
/* Indicate that this context is no longer executing */
movb $0, I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax)
#endif
movl HEIRCONTEXT_ARG(esp),eax /* eax = heir threads context */ movl HEIRCONTEXT_ARG(esp),eax /* eax = heir threads context */
restore: restore:
#ifdef RTEMS_SMP
/* Wait for context to stop execution if necessary */
1:
movb I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax), bl
testb bl, bl
jne 1b
/* Indicate that this context is executing */
movb $1, I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax)
#endif
pushl REG_EFLAGS(eax) /* push eflags */ pushl REG_EFLAGS(eax) /* push eflags */
popf /* restore eflags */ popf /* restore eflags */
movl REG_ESP(eax),esp /* restore stack pointer */ movl REG_ESP(eax),esp /* restore stack pointer */

View File

@@ -128,6 +128,17 @@ extern "C" {
#define CPU_PER_CPU_CONTROL_SIZE 0 #define CPU_PER_CPU_CONTROL_SIZE 0
#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
#ifdef RTEMS_SMP
#define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
#endif
/* structures */ /* structures */
#ifndef ASM #ifndef ASM
@@ -147,11 +158,19 @@ typedef struct {
uint32_t ebx; /* extended bx register */ uint32_t ebx; /* extended bx register */
uint32_t esi; /* extended source index register */ uint32_t esi; /* extended source index register */
uint32_t edi; /* extended destination index flags register */ uint32_t edi; /* extended destination index flags register */
#ifdef RTEMS_SMP
volatile bool is_executing;
#endif
} Context_Control; } Context_Control;
#define _CPU_Context_Get_SP( _context ) \ #define _CPU_Context_Get_SP( _context ) \
(_context)->esp (_context)->esp
#ifdef RTEMS_SMP
#define _CPU_Context_Get_is_executing( _context ) \
(_context)->is_executing
#endif
/* /*
* FP context save area for the i387 numeric coprocessors. * FP context save area for the i387 numeric coprocessors.
*/ */
@@ -435,6 +454,13 @@ uint32_t _CPU_ISR_Get_level( void );
*/ */
#ifdef RTEMS_SMP
#define _I386_Context_Initialize_is_executing( _the_context ) \
(_the_context)->is_executing = false
#else
#define _I386_Context_Initialize_is_executing( _the_context )
#endif
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
_isr, _entry_point, _is_fp, _tls_area ) \ _isr, _entry_point, _is_fp, _tls_area ) \
do { \ do { \
@@ -449,6 +475,7 @@ uint32_t _CPU_ISR_Get_level( void );
*((proc_ptr *)(_stack)) = (_entry_point); \ *((proc_ptr *)(_stack)) = (_entry_point); \
(_the_context)->ebp = (void *) 0; \ (_the_context)->ebp = (void *) 0; \
(_the_context)->esp = (void *) _stack; \ (_the_context)->esp = (void *) _stack; \
_I386_Context_Initialize_is_executing( _the_context ); \
} while (0) } while (0)
#define _CPU_Context_Restart_self( _the_context ) \ #define _CPU_Context_Restart_self( _the_context ) \

View File

@@ -574,6 +574,18 @@ typedef struct {
* is the stack pointer. * is the stack pointer.
*/ */
uint32_t stack_pointer; uint32_t stack_pointer;
#ifdef RTEMS_SMP
/**
* @brief On SMP configurations the thread context must contain a boolean
* indicator if this context is executing on a processor.
*
* This field must be updated during a context switch. The context switch
* to the heir must wait until the heir context indicates that it is no
* longer executing on a processor.
*/
volatile bool is_executing;
#endif
} Context_Control; } Context_Control;
/** /**
@@ -1582,6 +1594,12 @@ register struct Per_CPU_Control *_CPU_Per_CPU_current asm( "rX" );
{ {
__asm__ volatile ( "" : : : "memory" ); __asm__ volatile ( "" : : : "memory" );
} }
/**
* @brief Macro to return the is executing field of the thread context.
*/
#define _CPU_Context_Get_is_executing( _context ) \
( ( _context )->is_executing )
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -53,6 +53,10 @@ PPC_ASSERT_OFFSET(gpr30, GPR30);
PPC_ASSERT_OFFSET(gpr31, GPR31); PPC_ASSERT_OFFSET(gpr31, GPR31);
PPC_ASSERT_OFFSET(gpr2, GPR2); PPC_ASSERT_OFFSET(gpr2, GPR2);
#ifdef RTEMS_SMP
PPC_ASSERT_OFFSET(is_executing, IS_EXECUTING);
#endif
RTEMS_STATIC_ASSERT( RTEMS_STATIC_ASSERT(
sizeof(Context_Control) % PPC_DEFAULT_CACHE_LINE_SIZE == 0, sizeof(Context_Control) % PPC_DEFAULT_CACHE_LINE_SIZE == 0,
ppc_context_size ppc_context_size

View File

@@ -302,6 +302,9 @@ typedef struct {
PPC_GPR_TYPE gpr30; PPC_GPR_TYPE gpr30;
PPC_GPR_TYPE gpr31; PPC_GPR_TYPE gpr31;
uint32_t gpr2; uint32_t gpr2;
#ifdef RTEMS_SMP
volatile bool is_executing;
#endif
#ifdef __ALTIVEC__ #ifdef __ALTIVEC__
/* /*
* 12 non-volatile vector registers, cache-aligned area for vscr/vrsave * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
@@ -327,7 +330,7 @@ typedef struct {
]; ];
} Context_Control; } Context_Control;
static inline ppc_context *ppc_get_context( Context_Control *context ) static inline ppc_context *ppc_get_context( const Context_Control *context )
{ {
uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE; uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
uintptr_t mask = clsz - 1; uintptr_t mask = clsz - 1;
@@ -338,6 +341,11 @@ static inline ppc_context *ppc_get_context( Context_Control *context )
#define _CPU_Context_Get_SP( _context ) \ #define _CPU_Context_Get_SP( _context ) \
ppc_get_context(_context)->gpr1 ppc_get_context(_context)->gpr1
#ifdef RTEMS_SMP
#define _CPU_Context_Get_is_executing( _context ) \
ppc_get_context(_context)->is_executing
#endif
#endif /* ASM */ #endif /* ASM */
#define PPC_CONTEXT_OFFSET_GPR1 32 #define PPC_CONTEXT_OFFSET_GPR1 32
@@ -368,6 +376,10 @@ static inline ppc_context *ppc_get_context( Context_Control *context )
#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 ) #define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
#define PPC_CONTEXT_OFFSET_GPR2 PPC_CONTEXT_GPR_OFFSET( 32 ) #define PPC_CONTEXT_OFFSET_GPR2 PPC_CONTEXT_GPR_OFFSET( 32 )
#ifdef RTEMS_SMP
#define PPC_CONTEXT_OFFSET_IS_EXECUTING (PPC_CONTEXT_GPR_OFFSET( 32 ) + 4)
#endif
#ifndef ASM #ifndef ASM
typedef struct { typedef struct {
/* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over

View File

@@ -67,6 +67,10 @@ SPARC_ASSERT_OFFSET(o7, O7);
SPARC_ASSERT_OFFSET(psr, PSR); SPARC_ASSERT_OFFSET(psr, PSR);
SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK); SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
#if defined(RTEMS_SMP)
SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
#endif
/* /*
* This initializes the set of opcodes placed in each trap * This initializes the set of opcodes placed in each trap
* table entry. The routine which installs a handler is responsible * table entry. The routine which installs a handler is responsible
@@ -326,6 +330,10 @@ void _CPU_Context_Initialize(
*/ */
the_context->isr_dispatch_disable = 0; the_context->isr_dispatch_disable = 0;
#if defined(RTEMS_SMP)
the_context->is_executing = false;
#endif
if ( tls_area != NULL ) { if ( tls_area != NULL ) {
void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area ); void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );

View File

@@ -473,6 +473,10 @@ typedef struct {
* SPARC CPU models at high interrupt rates. * SPARC CPU models at high interrupt rates.
*/ */
uint32_t isr_dispatch_disable; uint32_t isr_dispatch_disable;
#if defined(RTEMS_SMP)
volatile bool is_executing;
#endif
} Context_Control; } Context_Control;
/** /**
@@ -483,6 +487,11 @@ typedef struct {
#define _CPU_Context_Get_SP( _context ) \ #define _CPU_Context_Get_SP( _context ) \
(_context)->o6_sp (_context)->o6_sp
#ifdef RTEMS_SMP
#define _CPU_Context_Get_is_executing( _context ) \
(_context)->is_executing
#endif
#endif /* ASM */ #endif /* ASM */
/* /*
@@ -538,6 +547,10 @@ typedef struct {
/** This macro defines an offset into the context for use in assembly. */ /** This macro defines an offset into the context for use in assembly. */
#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0x54 #define ISR_DISPATCH_DISABLE_STACK_OFFSET 0x54
#if defined(RTEMS_SMP)
#define SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x58
#endif
/** This defines the size of the context area for use in assembly. */ /** This defines the size of the context area for use in assembly. */
#define CONTEXT_CONTROL_SIZE 0x68 #define CONTEXT_CONTROL_SIZE 0x68

View File

@@ -56,6 +56,8 @@ extern "C" {
typedef struct Thread_Control_struct Thread_Control; typedef struct Thread_Control_struct Thread_Control;
#endif #endif
struct Scheduler_Context;
/** /**
* @defgroup PerCPU RTEMS Per CPU Information * @defgroup PerCPU RTEMS Per CPU Information
* *
@@ -268,13 +270,46 @@ typedef struct Per_CPU_Control {
*/ */
volatile uint32_t thread_dispatch_disable_level; volatile uint32_t thread_dispatch_disable_level;
/** This is set to true when this CPU needs to run the dispatcher. */ /**
* @brief This is set to true when this processor needs to run the
* dispatcher.
*
* It is volatile since interrupts may alter this flag.
*
* This field is not protected by a lock. There are two writers after
* multitasking start. The scheduler owning this processor sets this
* indicator to true, after it updated the heir field. This processor sets
* this indicator to false, before it reads the heir. This field is used in
* combination with the heir field.
*
* @see _Thread_Get_heir_and_make_it_executing().
*/
volatile bool dispatch_necessary; volatile bool dispatch_necessary;
/** This is the thread executing on this CPU. */ /**
* @brief This is the thread executing on this processor.
*
* This field is not protected by a lock. The only writer is this processor.
*
* On SMP configurations a thread may be registered as executing on more than
* one processor in case a thread migration is in progress. On SMP
* configurations use _Thread_Is_executing_on_a_processor() to figure out if
* a thread context is executing on a processor.
*/
Thread_Control *executing; Thread_Control *executing;
/** This is the heir thread for this this CPU. */ /**
* @brief This is the heir thread for this processor.
*
* This field is not protected by a lock. The only writer after multitasking
* start is the scheduler owning this processor. This processor will set the
* dispatch necessary indicator to false, before it reads the heir. This
* field is used in combination with the dispatch necessary indicator.
*
* A thread can be a heir on at most one processor in the system.
*
* @see _Thread_Get_heir_and_make_it_executing().
*/
Thread_Control *heir; Thread_Control *heir;
/** This is the time of the last context switch on this CPU. */ /** This is the time of the last context switch on this CPU. */
@@ -282,11 +317,12 @@ typedef struct Per_CPU_Control {
#if defined( RTEMS_SMP ) #if defined( RTEMS_SMP )
/** /**
* @brief This lock protects the dispatch_necessary, executing, heir and * @brief This lock protects some parts of the low-level thread dispatching.
* message fields.
* *
* We must use a ticket lock here since we cannot transport a local context * We must use a ticket lock here since we cannot transport a local context
* through the context switch. * through the context switch.
*
* @see _Thread_Dispatch().
*/ */
SMP_ticket_lock_Control Lock; SMP_ticket_lock_Control Lock;
@@ -309,6 +345,11 @@ typedef struct Per_CPU_Control {
*/ */
Atomic_Ulong message; Atomic_Ulong message;
/**
* @brief The scheduler context of the scheduler owning this processor.
*/
const struct Scheduler_Context *scheduler_context;
/** /**
* @brief Indicates the current state of the CPU. * @brief Indicates the current state of the CPU.
* *

View File

@@ -148,7 +148,7 @@ typedef struct {
* The scheduler context of a particular scheduler implementation must place * The scheduler context of a particular scheduler implementation must place
* this structure at the begin of its context structure. * this structure at the begin of its context structure.
*/ */
typedef struct { typedef struct Scheduler_Context {
#if defined(RTEMS_SMP) #if defined(RTEMS_SMP)
/** /**
* @brief Count of processors owned by this scheduler instance. * @brief Count of processors owned by this scheduler instance.

View File

@@ -390,29 +390,25 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
#endif #endif
} }
RTEMS_INLINE_ROUTINE bool _Scheduler_Set( RTEMS_INLINE_ROUTINE void _Scheduler_Set(
const Scheduler_Control *scheduler, const Scheduler_Control *scheduler,
Thread_Control *the_thread Thread_Control *the_thread
) )
{ {
bool ok;
if ( _States_Is_dormant( the_thread->current_state ) ) {
#if defined(RTEMS_SMP) #if defined(RTEMS_SMP)
const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread );
if ( current_scheduler != scheduler ) {
_Thread_Set_state( the_thread, STATES_MIGRATING );
_Scheduler_Free( _Scheduler_Get( the_thread ), the_thread ); _Scheduler_Free( _Scheduler_Get( the_thread ), the_thread );
the_thread->scheduler = scheduler; the_thread->scheduler = scheduler;
_Scheduler_Allocate( scheduler, the_thread ); _Scheduler_Allocate( scheduler, the_thread );
_Scheduler_Update( scheduler, the_thread ); _Scheduler_Update( scheduler, the_thread );
#else _Thread_Clear_state( the_thread, STATES_MIGRATING );
(void) scheduler;
#endif
ok = true;
} else {
ok = false;
} }
#else
return ok; (void) scheduler;
#endif
} }
RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body( RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
@@ -448,9 +444,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
ok = ok && !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset ); ok = ok && !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
} }
if ( ok ) { _Scheduler_Set( scheduler, the_thread );
ok = _Scheduler_Set( scheduler, the_thread );
}
return ok; return ok;
} }

View File

@@ -24,9 +24,7 @@
#define _RTEMS_SCORE_SCHEDULERSMP_H #define _RTEMS_SCORE_SCHEDULERSMP_H
#include <rtems/score/chain.h> #include <rtems/score/chain.h>
#include <rtems/score/percpu.h> #include <rtems/score/scheduler.h>
#include <rtems/score/prioritybitmap.h>
#include <rtems/score/thread.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {

View File

@@ -24,9 +24,9 @@
#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
#include <rtems/score/schedulersmp.h> #include <rtems/score/schedulersmp.h>
#include <rtems/score/schedulersimpleimpl.h> #include <rtems/score/assert.h>
#include <rtems/score/chainimpl.h> #include <rtems/score/chainimpl.h>
#include <rtems/score/scheduler.h> #include <rtems/score/schedulersimpleimpl.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@@ -64,47 +64,74 @@ static inline void _Scheduler_SMP_Initialize(
_Chain_Initialize_empty( &self->Scheduled ); _Chain_Initialize_empty( &self->Scheduled );
} }
static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
const Scheduler_SMP_Context *self,
const Per_CPU_Control *cpu
)
{
return cpu->scheduler_context == &self->Base;
}
static inline void _Scheduler_SMP_Update_heir(
Per_CPU_Control *cpu_self,
Per_CPU_Control *cpu_for_heir,
Thread_Control *heir
)
{
cpu_for_heir->heir = heir;
/*
* It is critical that we first update the heir and then the dispatch
* necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
* update.
*/
_Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
/*
* Only update the dispatch necessary indicator if not already set to
* avoid superfluous inter-processor interrupts.
*/
if ( !cpu_for_heir->dispatch_necessary ) {
cpu_for_heir->dispatch_necessary = true;
if ( cpu_for_heir != cpu_self ) {
_Per_CPU_Send_interrupt( cpu_for_heir );
}
}
}
static inline void _Scheduler_SMP_Allocate_processor( static inline void _Scheduler_SMP_Allocate_processor(
Scheduler_SMP_Context *self,
Thread_Control *scheduled, Thread_Control *scheduled,
Thread_Control *victim Thread_Control *victim
) )
{ {
Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled ); Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim ); Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
Per_CPU_Control *cpu_self = _Per_CPU_Get();
Thread_Control *heir; Thread_Control *heir;
scheduled->is_scheduled = true; scheduled->is_scheduled = true;
victim->is_scheduled = false; victim->is_scheduled = false;
_Per_CPU_Acquire( cpu_of_scheduled ); _Assert( _ISR_Get_level() != 0 );
if ( scheduled->is_executing ) { if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
heir = cpu_of_scheduled->heir; if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
cpu_of_scheduled->heir = scheduled; heir = cpu_of_scheduled->heir;
_Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
} else {
/* We have to force a migration to our processor set */
_Assert( scheduled->debug_real_cpu->heir != scheduled );
heir = scheduled;
}
} else { } else {
heir = scheduled; heir = scheduled;
} }
_Per_CPU_Release( cpu_of_scheduled );
if ( heir != victim ) { if ( heir != victim ) {
const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
_Thread_Set_CPU( heir, cpu_of_victim ); _Thread_Set_CPU( heir, cpu_of_victim );
_Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
cpu_of_victim->heir = heir;
/*
* It is critical that we first update the heir and then the dispatch
* necessary so that _Thread_Dispatch() cannot miss an update.
*/
_Atomic_Fence( ATOMIC_ORDER_RELEASE );
cpu_of_victim->dispatch_necessary = true;
if ( cpu_of_victim != cpu_of_executing ) {
_Per_CPU_Send_interrupt( cpu_of_victim );
}
} }
} }
@@ -148,7 +175,7 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
highest_ready != NULL highest_ready != NULL
&& !( *order )( &thread->Object.Node, &highest_ready->Object.Node ) && !( *order )( &thread->Object.Node, &highest_ready->Object.Node )
) { ) {
_Scheduler_SMP_Allocate_processor( highest_ready, thread ); _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
( *insert_ready )( self, thread ); ( *insert_ready )( self, thread );
( *move_from_ready_to_scheduled )( self, highest_ready ); ( *move_from_ready_to_scheduled )( self, highest_ready );
@@ -168,7 +195,7 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
lowest_scheduled != NULL lowest_scheduled != NULL
&& ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node )
) { ) {
_Scheduler_SMP_Allocate_processor( thread, lowest_scheduled ); _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
( *insert_scheduled )( self, thread ); ( *insert_scheduled )( self, thread );
( *move_from_scheduled_to_ready )( self, lowest_scheduled ); ( *move_from_scheduled_to_ready )( self, lowest_scheduled );
@@ -187,7 +214,7 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
{ {
Thread_Control *highest_ready = ( *get_highest_ready )( self ); Thread_Control *highest_ready = ( *get_highest_ready )( self );
_Scheduler_SMP_Allocate_processor( highest_ready, victim ); _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
( *move_from_ready_to_scheduled )( self, highest_ready ); ( *move_from_ready_to_scheduled )( self, highest_ready );
} }

View File

@@ -82,6 +82,8 @@ extern "C" {
#define STATES_WAITING_FOR_TERMINATION 0x100000 #define STATES_WAITING_FOR_TERMINATION 0x100000
/** This macro corresponds to a task being a zombie. */ /** This macro corresponds to a task being a zombie. */
#define STATES_ZOMBIE 0x200000 #define STATES_ZOMBIE 0x200000
/** This macro corresponds to a task migration to another scheduler. */
#define STATES_MIGRATING 0x400000
/** This macro corresponds to a task which is in an interruptible /** This macro corresponds to a task which is in an interruptible
* blocking state. * blocking state.

View File

@@ -503,20 +503,6 @@ struct Thread_Control_struct {
*/ */
bool is_in_the_air; bool is_in_the_air;
/**
* @brief This field is true if the thread is executing.
*
* A thread is executing if it executes on a processor. An executing thread
* executes on exactly one processor. There are exactly processor count
* executing threads in the system. An executing thread may have a heir
* thread and thread dispatching is necessary. On SMP a thread dispatch on a
* remote processor needs help from an inter-processor interrupt, thus it
* will take some time to complete the state change. A lot of things can
* happen in the meantime. This field is volatile since it is polled in
* _Thread_Kill_zombies().
*/
volatile bool is_executing;
/** /**
* @brief The scheduler of this thread. * @brief The scheduler of this thread.
*/ */
@@ -548,7 +534,18 @@ struct Thread_Control_struct {
void *scheduler_info; void *scheduler_info;
#ifdef RTEMS_SMP #ifdef RTEMS_SMP
/**
* @brief The processor assigned by the scheduler.
*/
Per_CPU_Control *cpu; Per_CPU_Control *cpu;
#ifdef RTEMS_DEBUG
/**
* @brief The processor on which this thread executed the last time or is
* executing.
*/
Per_CPU_Control *debug_real_cpu;
#endif
#endif #endif
/** This field contains information about the starting state of /** This field contains information about the starting state of

View File

@@ -454,6 +454,22 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
return ( the_thread == _Thread_Executing ); return ( the_thread == _Thread_Executing );
} }
#if defined(RTEMS_SMP)
/**
* @brief Returns @true in case the thread executes currently on some processor
* in the system, otherwise @a false.
*
* Do not confuse this with _Thread_Is_executing() which checks only the
* current processor.
*/
RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
const Thread_Control *the_thread
)
{
return _CPU_Context_Get_is_executing( &the_thread->Registers );
}
#endif
/** /**
* This function returns true if the_thread is the heir * This function returns true if the_thread is the heir
* thread, and false otherwise. * thread, and false otherwise.
@@ -491,7 +507,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
_Giant_Release(); _Giant_Release();
_Per_CPU_ISR_disable_and_acquire( _Per_CPU_Get(), level ); _ISR_Disable_without_giant( level );
( void ) level; ( void ) level;
#endif #endif
@@ -590,7 +606,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Request_dispatch_if_executing(
) )
{ {
#if defined(RTEMS_SMP) #if defined(RTEMS_SMP)
if ( thread->is_executing ) { if ( _Thread_Is_executing_on_a_processor( thread ) ) {
const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get(); const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread ); Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
@@ -611,7 +627,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
_Thread_Dispatch_necessary = true; _Thread_Dispatch_necessary = true;
} else { } else {
#if defined(RTEMS_SMP) #if defined(RTEMS_SMP)
if ( thread->is_executing ) { if ( _Thread_Is_executing_on_a_processor( thread ) ) {
const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get(); const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread ); Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
@@ -624,6 +640,39 @@ RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
} }
} }
/**
* @brief Gets the heir of the processor and makes it executing.
*
* The thread dispatch necessary indicator is cleared as a side-effect.
*
* @return The heir thread.
*
* @see _Thread_Dispatch(), _Thread_Start_multitasking() and
* _Scheduler_SMP_Update_heir().
*/
RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
Per_CPU_Control *cpu_self
)
{
Thread_Control *heir;
cpu_self->dispatch_necessary = false;
#if defined( RTEMS_SMP )
/*
* It is critical that we first update the dispatch necessary and then the
* read the heir so that we don't miss an update by
* _Scheduler_SMP_Update_heir().
*/
_Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
#endif
heir = cpu_self->heir;
cpu_self->executing = heir;
return heir;
}
RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used( RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
Thread_Control *executing, Thread_Control *executing,
Timestamp_Control *time_of_last_context_switch Timestamp_Control *time_of_last_context_switch
@@ -736,6 +785,19 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
return ( life_state & THREAD_LIFE_RESTARTING_TERMINTING ) != 0; return ( life_state & THREAD_LIFE_RESTARTING_TERMINTING ) != 0;
} }
RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
Thread_Control *the_thread,
Per_CPU_Control *cpu
)
{
#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
the_thread->debug_real_cpu = cpu;
#else
(void) the_thread;
(void) cpu;
#endif
}
#if !defined(__DYNAMIC_REENT__) #if !defined(__DYNAMIC_REENT__)
/** /**
* This routine returns the C library re-enterant pointer. * This routine returns the C library re-enterant pointer.

View File

@@ -216,13 +216,21 @@ static inline void _User_extensions_Thread_switch(
const Chain_Node *tail = _Chain_Immutable_tail( chain ); const Chain_Node *tail = _Chain_Immutable_tail( chain );
const Chain_Node *node = _Chain_Immutable_first( chain ); const Chain_Node *node = _Chain_Immutable_first( chain );
while ( node != tail ) { if ( node != tail ) {
const User_extensions_Switch_control *extension = Per_CPU_Control *cpu_self = _Per_CPU_Get();
(const User_extensions_Switch_control *) node;
(*extension->thread_switch)( executing, heir ); _Per_CPU_Acquire( cpu_self );
node = _Chain_Immutable_next( node ); while ( node != tail ) {
const User_extensions_Switch_control *extension =
(const User_extensions_Switch_control *) node;
(*extension->thread_switch)( executing, heir );
node = _Chain_Immutable_next( node );
}
_Per_CPU_Release( cpu_self );
} }
} }

View File

@@ -58,7 +58,10 @@ static void _SMP_Start_processors( uint32_t cpu_count )
cpu->started = started; cpu->started = started;
if ( started ) { if ( started ) {
++assignment->scheduler->context->processor_count; Scheduler_Context *scheduler_context = assignment->scheduler->context;
++scheduler_context->processor_count;
cpu->scheduler_context = scheduler_context;
} }
} }
} }

View File

@@ -64,10 +64,14 @@ void _Thread_Dispatch( void )
{ {
Per_CPU_Control *cpu_self; Per_CPU_Control *cpu_self;
Thread_Control *executing; Thread_Control *executing;
Thread_Control *heir;
ISR_Level level; ISR_Level level;
#if defined( RTEMS_SMP ) #if defined( RTEMS_SMP )
/*
* On SMP the complete context switch must be atomic with respect to one
* processor. See also _Thread_Handler() since _Context_switch() may branch
* to this function.
*/
_ISR_Disable_without_giant( level ); _ISR_Disable_without_giant( level );
#endif #endif
@@ -76,45 +80,21 @@ void _Thread_Dispatch( void )
_Profiling_Thread_dispatch_disable( cpu_self, 0 ); _Profiling_Thread_dispatch_disable( cpu_self, 0 );
cpu_self->thread_dispatch_disable_level = 1; cpu_self->thread_dispatch_disable_level = 1;
#if defined( RTEMS_SMP )
_ISR_Enable_without_giant( level );
#endif
/* /*
* Now determine if we need to perform a dispatch on the current CPU. * Now determine if we need to perform a dispatch on the current CPU.
*/ */
executing = cpu_self->executing; executing = cpu_self->executing;
_Per_CPU_ISR_disable_and_acquire( cpu_self, level );
#if !defined( RTEMS_SMP )
_ISR_Disable( level );
#endif
#if defined( RTEMS_SMP ) #if defined( RTEMS_SMP )
/*
* On SMP the complete context switch must be atomic with respect to one
* processor. The scheduler must obtain the per-CPU lock to check if a
* thread is executing and to update the heir. This ensures that a thread
* cannot execute on more than one processor at a time. See also
* _Thread_Handler() since _Context_switch() may branch to this function.
*/
if ( cpu_self->dispatch_necessary ) { if ( cpu_self->dispatch_necessary ) {
#else #else
while ( cpu_self->dispatch_necessary ) { while ( cpu_self->dispatch_necessary ) {
#endif #endif
cpu_self->dispatch_necessary = false; Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
#if defined( RTEMS_SMP )
/*
* It is critical that we first update the dispatch necessary and then the
* read the heir so that we don't miss an update by
* _Scheduler_SMP_Allocate_processor().
*/
_Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
#endif
heir = cpu_self->heir;
cpu_self->executing = heir;
#if defined( RTEMS_SMP )
executing->is_executing = false;
heir->is_executing = true;
#endif
/* /*
* When the heir and executing are the same, then we are being * When the heir and executing are the same, then we are being
@@ -207,6 +187,8 @@ void _Thread_Dispatch( void )
*/ */
cpu_self = _Per_CPU_Get(); cpu_self = _Per_CPU_Get();
_Thread_Debug_set_real_processor( executing, cpu_self );
#if !defined( RTEMS_SMP ) #if !defined( RTEMS_SMP )
_ISR_Disable( level ); _ISR_Disable( level );
#endif #endif
@@ -217,7 +199,7 @@ post_switch:
cpu_self->thread_dispatch_disable_level = 0; cpu_self->thread_dispatch_disable_level = 0;
_Profiling_Thread_dispatch_enable( cpu_self, 0 ); _Profiling_Thread_dispatch_enable( cpu_self, 0 );
_Per_CPU_Release_and_ISR_enable( cpu_self, level ); _ISR_Enable_without_giant( level );
_Thread_Run_post_switch_actions( executing ); _Thread_Run_post_switch_actions( executing );
} }

View File

@@ -153,11 +153,11 @@ void _Thread_Handler( void )
_Assert( cpu_self->thread_dispatch_disable_level == 1 ); _Assert( cpu_self->thread_dispatch_disable_level == 1 );
_Assert( _ISR_Get_level() != 0 ); _Assert( _ISR_Get_level() != 0 );
_Thread_Debug_set_real_processor( executing, cpu_self );
cpu_self->thread_dispatch_disable_level = 0; cpu_self->thread_dispatch_disable_level = 0;
_Profiling_Thread_dispatch_enable( cpu_self, 0 ); _Profiling_Thread_dispatch_enable( cpu_self, 0 );
_Per_CPU_Release( cpu_self );
level = executing->Start.isr_level; level = executing->Start.isr_level;
_ISR_Set_level( level); _ISR_Set_level( level);

View File

@@ -52,6 +52,7 @@ bool _Thread_Initialize(
bool extension_status; bool extension_status;
size_t i; size_t i;
bool scheduler_allocated = false; bool scheduler_allocated = false;
Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 );
#if defined( RTEMS_SMP ) #if defined( RTEMS_SMP )
if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
@@ -182,12 +183,13 @@ bool _Thread_Initialize(
#if defined(RTEMS_SMP) #if defined(RTEMS_SMP)
the_thread->is_scheduled = false; the_thread->is_scheduled = false;
the_thread->is_in_the_air = false; the_thread->is_in_the_air = false;
the_thread->is_executing = false;
the_thread->scheduler = scheduler; the_thread->scheduler = scheduler;
#endif #endif
_Thread_Debug_set_real_processor( the_thread, cpu );
/* Initialize the CPU for the non-SMP schedulers */ /* Initialize the CPU for the non-SMP schedulers */
_Thread_Set_CPU( the_thread, _Per_CPU_Get_by_index( 0 ) ); _Thread_Set_CPU( the_thread, cpu );
the_thread->current_state = STATES_DORMANT; the_thread->current_state = STATES_DORMANT;
the_thread->Wait.queue = NULL; the_thread->Wait.queue = NULL;

View File

@@ -107,7 +107,7 @@ static void _Thread_Wait_for_execution_stop( Thread_Control *the_thread )
* in case the thread termination sequence is interrupted by a slow interrupt * in case the thread termination sequence is interrupted by a slow interrupt
* service on a remote processor. * service on a remote processor.
*/ */
while (the_thread->is_executing) { while ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
/* Wait */ /* Wait */
} }
#else #else

View File

@@ -30,22 +30,12 @@ void _Thread_Start_multitasking( void )
/* /*
* Threads begin execution in the _Thread_Handler() function. This * Threads begin execution in the _Thread_Handler() function. This
* function will set the thread dispatch disable level to zero and calls * function will set the thread dispatch disable level to zero.
* _Per_CPU_Release().
*/ */
_Per_CPU_Acquire( cpu_self );
cpu_self->thread_dispatch_disable_level = 1; cpu_self->thread_dispatch_disable_level = 1;
#endif #endif
heir = cpu_self->heir; heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
#if defined(RTEMS_SMP)
cpu_self->executing->is_executing = false;
heir->is_executing = true;
#endif
cpu_self->dispatch_necessary = false;
cpu_self->executing = heir;
/* /*
* Get the init task(s) running. * Get the init task(s) running.

View File

@@ -211,6 +211,52 @@ affinity. Although the behavior is scheduler specific, if the scheduler
does not support affinity, it is likely to ignore all attempts to set does not support affinity, it is likely to ignore all attempts to set
affinity. affinity.
@subsection Task Migration
@cindex task migration
@cindex thread migration
With more than one processor in the system tasks can migrate from one processor
to another. There are three reasons why tasks migrate in RTEMS.
@itemize @bullet
@item The scheduler changes explicitly via @code{rtems_task_set_scheduler()} or
similar directives.
@item The task resumes execution after a blocking operation. On a priority
based scheduler it will evict the lowest priority task currently assigned to a
processor in the processor set managed by the scheduler instance.
@item The task moves temporarily to another scheduler instance due to locking
protocols like @cite{Migratory Priority Inheritance} or the
@cite{Multiprocessor Resource Sharing Protocol}.
@end itemize
Task migration should be avoided so that the working set of a task can stay on
the most local cache level.
The current implementation of task migration in RTEMS has some implications
with respect to the interrupt latency. It is crucial to preserve the system
invariant that a task can execute on at most one processor in the system at a
time. This is accomplished with a boolean indicator in the task context. The
processor architecture specific low-level task context switch code will mark
that a task context is no longer executing and waits that the heir context
stopped execution before it restores the heir context and resumes execution of
the heir task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple tasks
migrate at once. Otherwise some supervising entity is necessary to prevent
life-locks. Such a global supervisor would lead to scalability problems so
this approach is not used. Currently the thread dispatch is performed with
interrupts disabled. So in case the heir task is currently executing on
another processor then this prolongs the time of disabled interrupts since one
processor has to wait for another processor to make progress.
It is difficult to avoid this issue with the interrupt latency since interrupts
normally store the context of the interrupted task on its stack. In case a
task is marked as not executing we must not use its task stack to store such an
interrupt context. We cannot use the heir stack before it stopped execution on
another processor. So if we enable interrupts during this transition we have
to provide an alternative task independent stack for this time frame. This
issue needs further investigation.
@subsection Critical Section Techniques and SMP @subsection Critical Section Techniques and SMP
As discussed earlier, SMP systems have opportunities for true parallelism As discussed earlier, SMP systems have opportunities for true parallelism

View File

@@ -22,6 +22,7 @@ SUBDIRS += smpipi01
SUBDIRS += smpload01 SUBDIRS += smpload01
SUBDIRS += smplock01 SUBDIRS += smplock01
SUBDIRS += smpmigration01 SUBDIRS += smpmigration01
SUBDIRS += smpmigration02
SUBDIRS += smpscheduler01 SUBDIRS += smpscheduler01
SUBDIRS += smpscheduler02 SUBDIRS += smpscheduler02
SUBDIRS += smpsignal01 SUBDIRS += smpsignal01

View File

@@ -77,6 +77,7 @@ smpipi01/Makefile
smpload01/Makefile smpload01/Makefile
smplock01/Makefile smplock01/Makefile
smpmigration01/Makefile smpmigration01/Makefile
smpmigration02/Makefile
smppsxaffinity01/Makefile smppsxaffinity01/Makefile
smppsxaffinity02/Makefile smppsxaffinity02/Makefile
smppsxsignal01/Makefile smppsxsignal01/Makefile

View File

@@ -0,0 +1,19 @@
rtems_tests_PROGRAMS = smpmigration02
smpmigration02_SOURCES = init.c
dist_rtems_tests_DATA = smpmigration02.scn smpmigration02.doc
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
include $(top_srcdir)/../automake/compile.am
include $(top_srcdir)/../automake/leaf.am
AM_CPPFLAGS += -I$(top_srcdir)/../support/include
LINK_OBJS = $(smpmigration02_OBJECTS)
LINK_LIBS = $(smpmigration02_LDLIBS)
smpmigration02$(EXEEXT): $(smpmigration02_OBJECTS) $(smpmigration02_DEPENDENCIES)
@rm -f smpmigration02$(EXEEXT)
$(make-exe)
include $(top_srcdir)/../automake/local.am

View File

@@ -0,0 +1,253 @@
/*
* Copyright (c) 2014 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <rtems/libcsupport.h>
#include "tmacros.h"
const char rtems_test_name[] = "SMPMIGRATION 2";
#define CPU_COUNT 32
#define TASK_COUNT (CPU_COUNT + 1)
#define PRIO_LOW 3
#define PRIO_HIGH 2
typedef struct {
uint32_t value;
uint32_t cache_line_separation[31];
} test_counter;
typedef struct {
test_counter counters[TASK_COUNT];
rtems_id scheduler_ids[CPU_COUNT];
rtems_id task_ids[TASK_COUNT];
} test_context;
static test_context test_instance;
static void task(rtems_task_argument arg)
{
test_context *ctx = &test_instance;
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_index = rtems_get_current_processor();
while (true) {
cpu_index = (cpu_index + 1) % cpu_count;
sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
++ctx->counters[arg].value;
rtems_test_assert(cpu_index == rtems_get_current_processor());
}
}
static void test(void)
{
test_context *ctx = &test_instance;
rtems_status_code sc;
uint32_t cpu_count = rtems_get_processor_count();
uint32_t cpu_index;
uint32_t task_count = cpu_count + 1;
uint32_t task_index;
for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
for (task_index = 0; task_index < task_count; ++task_index) {
rtems_id task_id;
sc = rtems_task_create(
rtems_build_name('T', 'A', 'S', 'K'),
task_index > 0 ? PRIO_LOW : PRIO_HIGH,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_scheduler(task_id, ctx->scheduler_ids[task_index % cpu_count]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(task_id, task, task_index);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
ctx->task_ids[task_index] = task_id;
}
sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
for (task_index = 0; task_index < task_count; ++task_index) {
printf(
"task %" PRIu32 " counter: %" PRIu32 "\n",
task_index,
ctx->counters[task_index].value
);
sc = rtems_task_delete(ctx->task_ids[task_index]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
}
static void Init(rtems_task_argument arg)
{
rtems_resource_snapshot snapshot;
TEST_BEGIN();
rtems_resource_snapshot_take(&snapshot);
test();
rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
TEST_END();
rtems_test_exit(0);
}
#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
#define CONFIGURE_SMP_APPLICATION
#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
#define CONFIGURE_SCHEDULER_SIMPLE_SMP
#include <rtems/scheduler.h>
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(17);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(18);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(19);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(20);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(21);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(22);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(23);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(24);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(25);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(26);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(27);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(28);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(29);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(30);
RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(31);
#define CONFIGURE_SCHEDULER_CONTROLS \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(17, 17), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(18, 18), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(19, 19), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(20, 20), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(21, 21), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(22, 22), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(23, 23), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(24, 24), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(25, 25), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(26, 26), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(27, 27), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(28, 28), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(29, 29), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(30, 30), \
RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(31, 31)
#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(17, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(18, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(19, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(20, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(21, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(22, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(23, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(24, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(25, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(26, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(27, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(28, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(29, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(30, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
RTEMS_SCHEDULER_ASSIGN(31, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
#define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
#define CONFIGURE_INIT
#include <rtems/confdefs.h>

View File

@@ -0,0 +1,12 @@
This file describes the directives and concepts tested by this test set.
test set name: smpmigration02
directives:
- _Scheduler_SMP_Allocate_processor()
- _CPU_Context_switch()
concepts:
- Ensure that forced thread migration works.

View File

@@ -0,0 +1,7 @@
*** BEGIN OF TEST SMPMIGRATION 2 ***
task 0 counter: 1137459
task 1 counter: 1136714
task 2 counter: 1136713
task 3 counter: 1136712
task 4 counter: 1136711
*** END OF TEST SMPMIGRATION 2 ***

View File

@@ -158,8 +158,8 @@ static void test(void)
sc = rtems_task_start(task_id, task, 0); sc = rtems_task_start(task_id, task, 0);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_scheduler(task_id, scheduler_a_id); sc = rtems_task_set_scheduler(task_id, scheduler_b_id);
rtems_test_assert(sc == RTEMS_INCORRECT_STATE); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT); sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);

View File

@@ -1,13 +1,13 @@
*** TEST SMPSWITCHEXTENSION 1 *** *** BEGIN OF TEST SMPSWITCHEXTENSION 1 ***
toggler 0 toggler 0
toggles 2146479 toggles 1555183
toggler 1 toggler 1
toggles 2146477 toggles 1555182
extension 0 extension 0
context switches 2146478 context switches 1555185
extension 1 extension 1
context switches 2146481 context switches 1244705
extension 2 extension 2
context switches 2146482 context switches 1554688
extension switches 718121 extension switches 311649
*** END OF TEST SMPSWITCHEXTENSION 1 *** *** END OF TEST SMPSWITCHEXTENSION 1 ***

View File

@@ -81,10 +81,10 @@ static void test_task_get_set_affinity(void)
rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));
sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset); sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_INVALID_NUMBER); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset); sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_INVALID_NUMBER); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset); sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -163,7 +163,7 @@ static void test_task_get_set_scheduler(void)
rtems_test_assert(sc == RTEMS_INVALID_ID); rtems_test_assert(sc == RTEMS_INVALID_ID);
sc = rtems_task_set_scheduler(self_id, scheduler_id); sc = rtems_task_set_scheduler(self_id, scheduler_id);
rtems_test_assert(sc == RTEMS_INCORRECT_STATE); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_create( sc = rtems_task_create(
rtems_build_name('T', 'A', 'S', 'K'), rtems_build_name('T', 'A', 'S', 'K'),
@@ -188,7 +188,7 @@ static void test_task_get_set_scheduler(void)
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_set_scheduler(task_id, scheduler_id); sc = rtems_task_set_scheduler(task_id, scheduler_id);
rtems_test_assert(sc == RTEMS_INCORRECT_STATE); rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(task_id); sc = rtems_task_delete(task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(sc == RTEMS_SUCCESSFUL);

View File

@@ -146,8 +146,6 @@ static void thread_disable_dispatch( void )
self_cpu = _Per_CPU_Get(); self_cpu = _Per_CPU_Get();
self_cpu->thread_dispatch_disable_level = 1; self_cpu->thread_dispatch_disable_level = 1;
_Per_CPU_Acquire( self_cpu );
#else #else
_Thread_Disable_dispatch(); _Thread_Disable_dispatch();
#endif #endif