score: Optimize self-contained mutexes

This commit is contained in:
Sebastian Huber
2016-11-03 14:44:15 +01:00
parent e7ab43d46f
commit ee42943cd0
3 changed files with 2185 additions and 2132 deletions

View File

@@ -150,6 +150,24 @@ typedef struct {
{ }
#endif
/**
* @brief Sets the ISR level in the ISR lock context.
*
* @param[in] context The ISR lock context.
* @param[in] level The ISR level.
*/
RTEMS_INLINE_ROUTINE void _ISR_lock_Context_set_level(
ISR_lock_Context *context,
ISR_Level level
)
{
#if defined( RTEMS_SMP )
context->Lock_context.isr_level = level;
#else
context->isr_level = level;
#endif
}
/**
* @brief Initializes an ISR lock.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 embedded brains GmbH. All rights reserved.
* Copyright (c) 2015, 2016 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -71,14 +71,21 @@ static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
return (Mutex_Control *) _mutex;
}
static Thread_Control *_Mutex_Queue_acquire(
#define _Mutex_ISR_disable( level, queue_context ) \
do { \
_ISR_Local_disable( level ); \
_ISR_lock_ISR_disable_profile( \
&( queue_context )->Lock_context.Lock_context \
) \
} while ( 0 )
static Thread_Control *_Mutex_Queue_acquire_critical(
Mutex_Control *mutex,
Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
_ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&mutex->Queue.Queue,
@@ -91,19 +98,22 @@ static Thread_Control *_Mutex_Queue_acquire(
static void _Mutex_Queue_release(
Mutex_Control *mutex,
ISR_Level level,
Thread_queue_Context *queue_context
)
{
_Thread_queue_Queue_release(
_Thread_queue_Queue_release_critical(
&mutex->Queue.Queue,
&queue_context->Lock_context.Lock_context
);
_ISR_Local_enable( level );
}
static void _Mutex_Acquire_slow(
Mutex_Control *mutex,
Thread_Control *owner,
Thread_Control *executing,
ISR_Level level,
Thread_queue_Context *queue_context
)
{
@@ -112,6 +122,10 @@ static void _Mutex_Acquire_slow(
queue_context,
_Thread_queue_Deadlock_fatal
);
_ISR_lock_Context_set_level(
&queue_context->Lock_context.Lock_context,
level
);
_Thread_queue_Enqueue_critical(
&mutex->Queue.Queue,
MUTEX_TQ_OPERATIONS,
@@ -124,6 +138,7 @@ static void _Mutex_Acquire_slow(
static void _Mutex_Release_critical(
Mutex_Control *mutex,
Thread_Control *executing,
ISR_Level level,
Thread_queue_Context *queue_context
)
{
@@ -134,8 +149,12 @@ static void _Mutex_Release_critical(
_Thread_Resource_count_decrement( executing );
if ( __predict_true( heads == NULL ) ) {
_Mutex_Queue_release( mutex, queue_context );
_Mutex_Queue_release( mutex, level, queue_context );
} else {
_ISR_lock_Context_set_level(
&queue_context->Lock_context.Lock_context,
level
);
_Thread_queue_Surrender(
&mutex->Queue.Queue,
heads,
@@ -150,22 +169,24 @@ void _Mutex_Acquire( struct _Mutex_Control *_mutex )
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
_Mutex_Queue_release( mutex, &queue_context );
_Mutex_Queue_release( mutex, level, &queue_context );
} else {
_Thread_queue_Context_set_no_timeout( &queue_context );
_Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
_Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context );
}
}
@@ -176,19 +197,21 @@ int _Mutex_Acquire_timed(
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
_Mutex_Queue_release( mutex, &queue_context );
_Mutex_Queue_release( mutex, level, &queue_context );
return 0;
} else {
@@ -196,18 +219,18 @@ int _Mutex_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
_Mutex_Queue_release( mutex, &queue_context );
_Mutex_Queue_release( mutex, level, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
_Mutex_Queue_release( mutex, &queue_context );
_Mutex_Queue_release( mutex, level, &queue_context );
return ETIMEDOUT;
default:
break;
}
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
_Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
_Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -217,13 +240,15 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
@@ -235,7 +260,7 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
eno = EBUSY;
}
_Mutex_Queue_release( mutex, &queue_context );
_Mutex_Queue_release( mutex, level, &queue_context );
return eno;
}
@@ -244,15 +269,17 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
_Assert( mutex->Queue.Queue.owner == executing );
_Mutex_Release_critical( mutex, executing, &queue_context );
_Mutex_Release_critical( mutex, executing, level, &queue_context );
}
static Mutex_recursive_Control *_Mutex_recursive_Get(
@@ -266,25 +293,27 @@ void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
} else if ( owner == executing ) {
++mutex->nest_level;
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
} else {
_Thread_queue_Context_set_no_timeout( &queue_context );
_Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
_Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context );
}
}
@@ -295,24 +324,26 @@ int _Mutex_recursive_Acquire_timed(
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return 0;
} else if ( owner == executing ) {
++mutex->nest_level;
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return 0;
} else {
@@ -320,18 +351,18 @@ int _Mutex_recursive_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return ETIMEDOUT;
default:
break;
}
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
_Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
_Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -341,13 +372,15 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
@@ -362,7 +395,7 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
eno = EBUSY;
}
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return eno;
}
@@ -371,23 +404,25 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
ISR_Level level;
Thread_Control *executing;
unsigned int nest_level;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Mutex_ISR_disable( level, &queue_context );
executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
_Assert( mutex->Mutex.Queue.Queue.owner == executing );
nest_level = mutex->nest_level;
if ( __predict_true( nest_level == 0 ) ) {
_Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
_Mutex_Release_critical( &mutex->Mutex, executing, level, &queue_context );
} else {
mutex->nest_level = nest_level - 1;
_Mutex_Queue_release( &mutex->Mutex, &queue_context );
_Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
}
}

File diff suppressed because it is too large Load Diff