score: Remove the Giant lock

Update #2555.
This commit is contained in:
Sebastian Huber
2016-05-17 16:03:46 +02:00
parent c2f301b580
commit ceb0f6597c
13 changed files with 0 additions and 613 deletions

View File

@@ -299,7 +299,6 @@ libscore_a_SOURCES += src/rbtreereplace.c
libscore_a_SOURCES += src/thread.c src/threadchangepriority.c \
src/threadclearstate.c src/threadcreateidle.c \
src/threaddispatch.c \
src/threadenabledispatch.c src/threaddisabledispatch.c \
src/threadget.c src/threadhandler.c src/threadinitialize.c \
src/threadloadenv.c \
src/threadrestart.c src/threadsetpriority.c \
@@ -315,10 +314,6 @@ libscore_a_SOURCES += src/threadtimeout.c
libscore_a_SOURCES += src/threadwaitgetid.c
libscore_a_SOURCES += src/threadyield.c
if HAS_SMP
libscore_a_SOURCES += src/threaddispatchdisablelevel.c
endif
## THREADQ_C_FILES
libscore_a_SOURCES += src/threadq.c \
src/threadqenqueue.c \

View File

@@ -392,12 +392,6 @@ typedef struct Per_CPU_Control {
SMP_lock_Stats_context Lock_stats_context;
#endif
/**
* @brief Context for the Giant lock acquire and release pair of this
* processor.
*/
SMP_lock_Context Giant_lock_context;
/**
* @brief Bit field for SMP messages.
*

View File

@@ -86,122 +86,6 @@ RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
_Thread_Dispatch_disable_level = 1;
}
#if defined(RTEMS_SMP)
/**
* @brief Acquires the giant lock.
*
* The giant lock is a recursive SMP lock protecting nearly all operating
* system services.
*
* This lock is implicitly acquired in
* _Thread_Dispatch_increment_disable_level().
*
* Thread dispatching must be disabled before the Giant lock can be acquired
* and must no be enabled while owning the Giant lock. The thread dispatch
* disable level is not altered by this function.
*
* @param[in] cpu_self The current processor.
*/
void _Giant_Acquire( Per_CPU_Control *cpu_self );
/**
* @brief Releases the giant lock.
*
* This lock is implicitly released in
* _Thread_Dispatch_decrement_disable_level().
*
* The thread dispatch disable level is not altered by this function.
*
* @param[in] cpu_self The current processor.
*/
void _Giant_Release( Per_CPU_Control *cpu_self );
/**
* @brief Releases the giant lock completely if held by the executing processor.
*
* The thread dispatch disable level is not altered by this function.
*
* The only use case for this operation is in _SMP_Request_shutdown().
*
* @param[in] cpu_self The current processor.
*/
void _Giant_Drop( Per_CPU_Control *cpu_self );
/**
* @brief Increments the thread dispatch level.
*
* This rountine increments the thread dispatch level
*/
uint32_t _Thread_Dispatch_increment_disable_level(void);
/**
* @brief Decrements the thread dispatch level.
*
* This routine decrements the thread dispatch level.
*/
uint32_t _Thread_Dispatch_decrement_disable_level(void);
#else /* RTEMS_SMP */
/**
* @brief Increase thread dispatch disable level.
*
* This rountine increments the thread dispatch level
*/
RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_increment_disable_level(void)
{
uint32_t disable_level = _Thread_Dispatch_disable_level;
#if defined( RTEMS_PROFILING )
ISR_Level level;
_ISR_Local_disable( level );
_Profiling_Thread_dispatch_disable( _Per_CPU_Get(), disable_level );
#endif
++disable_level;
_Thread_Dispatch_disable_level = disable_level;
#if defined( RTEMS_PROFILING )
_ISR_Local_enable( level );
#endif
return disable_level;
}
/**
* @brief Decrease thread dispatch disable level.
*
* This routine decrements the thread dispatch level.
*/
RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_decrement_disable_level(void)
{
uint32_t disable_level = _Thread_Dispatch_disable_level;
#if defined( RTEMS_PROFILING )
ISR_Level level;
_ISR_Local_disable( level );
#endif
--disable_level;
_Thread_Dispatch_disable_level = disable_level;
#if defined( RTEMS_PROFILING )
_Profiling_Thread_dispatch_enable( _Per_CPU_Get(), disable_level );
_ISR_Local_enable( level );
#endif
return disable_level;
}
RTEMS_INLINE_ROUTINE void _Giant_Acquire( Per_CPU_Control *cpu_self )
{
(void) cpu_self;
}
RTEMS_INLINE_ROUTINE void _Giant_Release( Per_CPU_Control *cpu_self )
{
(void) cpu_self;
}
#endif /* RTEMS_SMP */
/**
* @brief Performs a thread dispatch if necessary.
*
@@ -240,8 +124,6 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level );
* @brief Disables thread dispatching inside a critical section (interrupts
* disabled) with the current processor.
*
* This function does not acquire the Giant lock.
*
* @param[in] cpu_self The current processor.
* @param[in] lock_context The lock context of the corresponding
* _ISR_lock_ISR_disable() that started the critical section.
@@ -270,8 +152,6 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_with_CPU(
* @brief Disables thread dispatching inside a critical section (interrupts
* disabled).
*
* This function does not acquire the Giant lock.
*
* @param[in] lock_context The lock context of the corresponding
* _ISR_lock_ISR_disable() that started the critical section.
*
@@ -287,8 +167,6 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical(
/**
* @brief Disables thread dispatching.
*
* This function does not acquire the Giant lock.
*
* @return The current processor.
*/
RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable( void )
@@ -314,8 +192,6 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable( void )
*
* May perfrom a thread dispatch if necessary as a side-effect.
*
* This function does not release the Giant lock.
*
* @param[in] cpu_self The current processor.
*/
RTEMS_INLINE_ROUTINE void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
@@ -343,8 +219,6 @@ RTEMS_INLINE_ROUTINE void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self )
/**
* @brief Unnests thread dispatching.
*
* This function does not release the Giant lock.
*
* @param[in] cpu_self The current processor.
*/
RTEMS_INLINE_ROUTINE void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
@@ -352,55 +226,6 @@ RTEMS_INLINE_ROUTINE void _Thread_Dispatch_unnest( Per_CPU_Control *cpu_self )
--cpu_self->thread_dispatch_disable_level;
}
/**
* @brief Disables thread dispatching and acquires the Giant lock.
*/
#if defined ( __THREAD_DO_NOT_INLINE_DISABLE_DISPATCH__ )
void _Thread_Disable_dispatch( void );
#else
RTEMS_INLINE_ROUTINE void _Thread_Disable_dispatch( void )
{
_Thread_Dispatch_increment_disable_level();
RTEMS_COMPILER_MEMORY_BARRIER();
}
#endif
RTEMS_INLINE_ROUTINE void _Thread_Enable_dispatch_body( void )
{
Per_CPU_Control *cpu_self = _Per_CPU_Get();
_Giant_Release( cpu_self );
_Thread_Dispatch_enable( cpu_self );
}
/**
* @brief Enables thread dispatching and releases the Giant lock.
*
* May perfrom a thread dispatch if necessary as a side-effect.
*/
#if defined ( __THREAD_DO_NOT_INLINE_ENABLE_DISPATCH__ )
void _Thread_Enable_dispatch( void );
#else
/* inlining of enable dispatching must be true */
RTEMS_INLINE_ROUTINE void _Thread_Enable_dispatch( void )
{
RTEMS_COMPILER_MEMORY_BARRIER();
_Thread_Enable_dispatch_body();
}
#endif
/**
* @brief Enables thread dispatching and releases the Giant lock.
*
* @warning A thread dispatch is not performed as a side-effect. Use this
* function with
*/
RTEMS_INLINE_ROUTINE void _Thread_Unnest_dispatch( void )
{
RTEMS_COMPILER_MEMORY_BARRIER();
_Thread_Dispatch_decrement_disable_level();
}
/** @} */
#ifdef __cplusplus

View File

@@ -21,7 +21,6 @@
#include <rtems/score/smpimpl.h>
#include <rtems/score/assert.h>
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/threaddispatch.h>
#include <rtems/score/threadimpl.h>
#include <rtems/config.h>
@@ -162,14 +161,6 @@ void _SMP_Request_shutdown( void )
Per_CPU_Control *self_cpu = _Per_CPU_Get();
_Per_CPU_State_change( self_cpu, PER_CPU_STATE_SHUTDOWN );
/*
* We have to drop the Giant lock here in order to give other processors the
* opportunity to receive the inter-processor interrupts issued previously.
* In case the executing thread still holds SMP locks, then other processors
* already waiting for this SMP lock will spin forever.
*/
_Giant_Drop( self_cpu );
}
void _SMP_Send_message( uint32_t cpu_index, unsigned long message )

View File

@@ -1,60 +0,0 @@
/**
* @file
*
* @brief Disaable Thread Dispatching
* @ingroup ScoreThread
*/
/*
*
*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/system.h>
#include <rtems/score/sysstate.h>
#include <rtems/score/thread.h>
#include <rtems/score/threaddispatch.h>
#if defined ( __THREAD_DO_NOT_INLINE_DISABLE_DISPATCH__ )
void _Thread_Disable_dispatch( void )
{
/*
* This check is very brutal to system performance but is very helpful
* at finding blown stack problems. If you have a stack problem and
* need help finding it, then uncomment this code. Every system
* call will check the stack and since mutexes are used frequently
* in most systems, you might get lucky.
*/
#if defined(RTEMS_HEAVY_STACK_DEBUG)
if (_System_state_Is_up(_System_state_Get()) && (_ISR_Nest_level == 0)) {
if ( rtems_stack_checker_is_blown() ) {
printk( "Stack blown!!\n" );
rtems_fatal_error_occurred( 99 );
}
}
#endif
_Thread_Dispatch_increment_disable_level();
RTEMS_COMPILER_MEMORY_BARRIER();
/*
* This check is even more brutal than the other one. This enables
* malloc heap integrity checking upon entry to every system call.
*/
#if defined(RTEMS_HEAVY_MALLOC_DEBUG)
if ( _Thread_Dispatch_get_disable_level() == 1 ) {
_Heap_Walk( RTEMS_Malloc_Heap,99, false );
}
#endif
}
#endif

View File

@@ -1,140 +0,0 @@
/**
* @file
*
* @brief Thread Dispatch Disable Functions
*
* @ingroup ScoreThread
*/
/*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems/score/threaddispatch.h>
#include <rtems/score/assert.h>
#include <rtems/score/profiling.h>
#include <rtems/score/sysstate.h>
#define NO_OWNER_CPU NULL
typedef struct {
SMP_lock_Control lock;
Per_CPU_Control *owner_cpu;
uint32_t nest_level;
} Giant_Control;
static Giant_Control _Giant = {
.lock = SMP_LOCK_INITIALIZER("Giant"),
.owner_cpu = NO_OWNER_CPU,
.nest_level = 0
};
static void _Giant_Do_acquire( Per_CPU_Control *cpu_self )
{
Giant_Control *giant = &_Giant;
if ( giant->owner_cpu != cpu_self ) {
_SMP_lock_Acquire( &giant->lock, &cpu_self->Giant_lock_context );
giant->owner_cpu = cpu_self;
giant->nest_level = 1;
} else {
++giant->nest_level;
}
}
static void _Giant_Do_release( Per_CPU_Control *cpu_self )
{
Giant_Control *giant = &_Giant;
--giant->nest_level;
if ( giant->nest_level == 0 ) {
giant->owner_cpu = NO_OWNER_CPU;
_SMP_lock_Release( &giant->lock, &cpu_self->Giant_lock_context );
}
}
void _Giant_Drop( Per_CPU_Control *cpu_self )
{
Giant_Control *giant = &_Giant;
_Assert( _ISR_Get_level() != 0 );
if ( giant->owner_cpu == cpu_self ) {
giant->nest_level = 0;
giant->owner_cpu = NO_OWNER_CPU;
_SMP_lock_Release( &giant->lock, &cpu_self->Giant_lock_context );
}
}
uint32_t _Thread_Dispatch_increment_disable_level( void )
{
ISR_Level isr_level;
uint32_t disable_level;
Per_CPU_Control *cpu_self;
_ISR_Local_disable( isr_level );
/*
* We must obtain the processor after interrupts are disabled to prevent
* thread migration.
*/
cpu_self = _Per_CPU_Get();
_Giant_Do_acquire( cpu_self );
disable_level = cpu_self->thread_dispatch_disable_level;
_Profiling_Thread_dispatch_disable( cpu_self, disable_level );
++disable_level;
cpu_self->thread_dispatch_disable_level = disable_level;
_ISR_Local_enable( isr_level );
return disable_level;
}
uint32_t _Thread_Dispatch_decrement_disable_level( void )
{
ISR_Level isr_level;
uint32_t disable_level;
Per_CPU_Control *cpu_self;
_ISR_Local_disable( isr_level );
cpu_self = _Per_CPU_Get();
disable_level = cpu_self->thread_dispatch_disable_level;
_Assert( disable_level > 0);
--disable_level;
cpu_self->thread_dispatch_disable_level = disable_level;
_Giant_Do_release( cpu_self );
_Profiling_Thread_dispatch_enable( cpu_self, disable_level );
_ISR_Local_enable( isr_level );
return disable_level;
}
void _Giant_Acquire( Per_CPU_Control *cpu_self )
{
ISR_Level isr_level;
_ISR_Local_disable( isr_level );
_Assert( _Thread_Dispatch_disable_level != 0 );
_Giant_Do_acquire( cpu_self );
_ISR_Local_enable( isr_level );
}
void _Giant_Release( Per_CPU_Control *cpu_self )
{
ISR_Level isr_level;
_ISR_Local_disable( isr_level );
_Assert( _Thread_Dispatch_disable_level != 0 );
_Giant_Do_release( cpu_self );
_ISR_Local_enable( isr_level );
}

View File

@@ -1,32 +0,0 @@
/**
* @file
*
* @brief Enable Dispatching of Threads
*
* @ingroup ScoreThread
*/
/*
* _Thread_Enable_dispatch
*
*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/score/threaddispatch.h>
#if defined (__THREAD_DO_NOT_INLINE_ENABLE_DISPATCH__ )
void _Thread_Enable_dispatch( void )
{
_Thread_Enable_dispatch_body();
}
#endif

View File

@@ -17,7 +17,6 @@ SUBDIRS += smpcapture02
SUBDIRS += smpclock01
SUBDIRS += smpfatal01
SUBDIRS += smpfatal02
SUBDIRS += smpfatal03
SUBDIRS += smpfatal04
SUBDIRS += smpfatal05
SUBDIRS += smpfatal08

View File

@@ -73,7 +73,6 @@ smpcapture02/Makefile
smpclock01/Makefile
smpfatal01/Makefile
smpfatal02/Makefile
smpfatal03/Makefile
smpfatal04/Makefile
smpfatal05/Makefile
smpfatal08/Makefile

View File

@@ -1,19 +0,0 @@
rtems_tests_PROGRAMS = smpfatal03
smpfatal03_SOURCES = init.c
dist_rtems_tests_DATA = smpfatal03.scn smpfatal03.doc
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
include $(top_srcdir)/../automake/compile.am
include $(top_srcdir)/../automake/leaf.am
AM_CPPFLAGS += -I$(top_srcdir)/../support/include
LINK_OBJS = $(smpfatal03_OBJECTS)
LINK_LIBS = $(smpfatal03_LDLIBS)
smpfatal03$(EXEEXT): $(smpfatal03_OBJECTS) $(smpfatal03_DEPENDENCIES)
@rm -f smpfatal03$(EXEEXT)
$(make-exe)
include $(top_srcdir)/../automake/local.am

View File

@@ -1,151 +0,0 @@
/*
* Copyright (c) 2014 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems.h>
#include <rtems/counter.h>
#include <rtems/test.h>
#include <rtems/score/smpbarrier.h>
#include <rtems/score/smpimpl.h>
#include <rtems/score/threaddispatch.h>
#include <assert.h>
#include <stdlib.h>
const char rtems_test_name[] = "SMPFATAL 3";
#define CPU_COUNT 2
static uint32_t main_cpu;
static SMP_barrier_Control giant_barrier = SMP_BARRIER_CONTROL_INITIALIZER;
static SMP_barrier_Control fatal_barrier = SMP_BARRIER_CONTROL_INITIALIZER;
static void acquire_giant_and_fatal_task(rtems_task_argument arg)
{
SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER;
int i;
for (i = 0; i < 13; ++i) {
_Giant_Acquire();
}
_SMP_barrier_Wait(&giant_barrier, &state, CPU_COUNT);
/*
* Now we have to wait some time so that the other thread can actually start
* with the _Giant_Acquire() procedure.
*/
rtems_counter_delay_nanoseconds(1000000);
rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0xdeadbeef);
}
static void wait_for_giant(void)
{
SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER;
_SMP_barrier_Wait(&giant_barrier, &state, CPU_COUNT);
_Giant_Release();
}
static void Init(rtems_task_argument arg)
{
uint32_t self = rtems_get_current_processor();
uint32_t cpu_count = rtems_get_processor_count();
rtems_test_begink();
main_cpu = self;
if (cpu_count >= CPU_COUNT) {
rtems_status_code sc;
rtems_id id;
sc = rtems_task_create(
rtems_build_name( 'W', 'A', 'I', 'T' ),
1,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&id
);
assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(id, acquire_giant_and_fatal_task, 0);
assert(sc == RTEMS_SUCCESSFUL);
wait_for_giant();
} else {
rtems_test_endk();
exit(0);
}
}
static void fatal_extension(
rtems_fatal_source source,
bool is_internal,
rtems_fatal_code code
)
{
if (
source == RTEMS_FATAL_SOURCE_APPLICATION
|| source == RTEMS_FATAL_SOURCE_SMP
) {
uint32_t self = rtems_get_current_processor();
SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER;
assert(!is_internal);
if (self == main_cpu) {
assert(source == RTEMS_FATAL_SOURCE_SMP);
assert(code == SMP_FATAL_SHUTDOWN_RESPONSE);
} else {
assert(source == RTEMS_FATAL_SOURCE_APPLICATION);
assert(code == 0xdeadbeef);
}
_SMP_barrier_Wait(&fatal_barrier, &state, CPU_COUNT);
if (self == 0) {
rtems_test_endk();
}
_SMP_barrier_Wait(&fatal_barrier, &state, CPU_COUNT);
}
}
#define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER
#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
#define CONFIGURE_INITIAL_EXTENSIONS \
{ .fatal = fatal_extension }, \
RTEMS_TEST_INITIAL_EXTENSION
#define CONFIGURE_SMP_APPLICATION
#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
#define CONFIGURE_MAXIMUM_TASKS 2
#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
#define CONFIGURE_INIT
#include <rtems/confdefs.h>

View File

@@ -1,12 +0,0 @@
This file describes the directives and concepts tested by this test set.
test set name: smpfatal03
directives:
- _Terminate()
concepts:
- Ensure that _Terminate() drops the Giant lock so that other processors
waiting on the Giant lock can receive shutdown requests.

View File

@@ -1,2 +0,0 @@
*** TEST SMPFATAL 3 ***
*** END OF TEST SMPFATAL 3 ***