forked from Imagelibrary/rtems
score: Add thread queue enqueue callout
Replace the expected thread dispatch disable level with a thread queue enqueue callout. This enables the use of _Thread_Dispatch_direct() in the thread queue enqueue procedure. This avoids impossible exection paths, e.g. Per_CPU_Control::dispatch_necessary is always true.
This commit is contained in:
@@ -35,6 +35,10 @@ extern "C" {
|
||||
|
||||
#define POSIX_CONDITION_VARIABLES_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
|
||||
|
||||
#define POSIX_CONDITION_VARIABLE_OF_THREAD_QUEUE_QUEUE( queue ) \
|
||||
RTEMS_CONTAINER_OF( \
|
||||
queue, POSIX_Condition_variables_Control, Wait_queue.Queue )
|
||||
|
||||
/**
|
||||
* The following defines the information control block used to manage
|
||||
* this class of objects.
|
||||
|
||||
@@ -27,6 +27,30 @@
|
||||
|
||||
THREAD_QUEUE_OBJECT_ASSERT( POSIX_Condition_variables_Control, Wait_queue );
|
||||
|
||||
static void _POSIX_Condition_variables_Enqueue_callout(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
POSIX_Condition_variables_Control *the_cond;
|
||||
int mutex_error;
|
||||
|
||||
the_cond = POSIX_CONDITION_VARIABLE_OF_THREAD_QUEUE_QUEUE( queue );
|
||||
|
||||
mutex_error = pthread_mutex_unlock( &the_cond->mutex );
|
||||
if ( mutex_error != 0 ) {
|
||||
/*
|
||||
* Historically, we ignored the unlock status since the behavior
|
||||
* is undefined by POSIX. But GNU/Linux returns EPERM in this
|
||||
* case, so we follow their lead.
|
||||
*/
|
||||
_Assert( mutex_error == EINVAL || mutex_error == EPERM );
|
||||
_Thread_queue_Extract( the_thread );
|
||||
the_thread->Wait.return_code= STATUS_NOT_OWNER;
|
||||
}
|
||||
}
|
||||
|
||||
int _POSIX_Condition_variables_Wait_support(
|
||||
pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex,
|
||||
@@ -37,7 +61,6 @@ int _POSIX_Condition_variables_Wait_support(
|
||||
Thread_queue_Context queue_context;
|
||||
int error;
|
||||
int mutex_error;
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *executing;
|
||||
Watchdog_Interval timeout;
|
||||
bool already_timedout;
|
||||
@@ -91,14 +114,13 @@ int _POSIX_Condition_variables_Wait_support(
|
||||
}
|
||||
|
||||
the_cond->mutex = *mutex;
|
||||
|
||||
cpu_self = _Thread_Dispatch_disable_critical(
|
||||
&queue_context.Lock_context.Lock_context
|
||||
);
|
||||
executing = _Per_CPU_Get_executing( cpu_self );
|
||||
executing = _Thread_Executing;
|
||||
|
||||
if ( !already_timedout ) {
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, 2 );
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&queue_context,
|
||||
_POSIX_Condition_variables_Enqueue_callout
|
||||
);
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_cond->Wait_queue.Queue,
|
||||
POSIX_CONDITION_VARIABLES_TQ_OPERATIONS,
|
||||
@@ -106,33 +128,18 @@ int _POSIX_Condition_variables_Wait_support(
|
||||
STATES_WAITING_FOR_CONDITION_VARIABLE,
|
||||
&queue_context
|
||||
);
|
||||
error = _POSIX_Get_error_after_wait( executing );
|
||||
} else {
|
||||
_POSIX_Condition_variables_Release( the_cond, &queue_context );
|
||||
executing->Wait.return_code = STATUS_TIMEOUT;
|
||||
|
||||
mutex_error = pthread_mutex_unlock( &the_cond->mutex );
|
||||
if ( mutex_error != 0 ) {
|
||||
error = EPERM;
|
||||
} else {
|
||||
error = ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_error = pthread_mutex_unlock( mutex );
|
||||
if ( mutex_error != 0 ) {
|
||||
/*
|
||||
* Historically, we ignored the unlock status since the behavior
|
||||
* is undefined by POSIX. But GNU/Linux returns EPERM in this
|
||||
* case, so we follow their lead.
|
||||
*/
|
||||
_Assert( mutex_error == EINVAL || mutex_error == EPERM );
|
||||
_Thread_queue_Extract( executing );
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch ourself out because we blocked as a result of the
|
||||
* _Thread_queue_Enqueue_critical().
|
||||
*/
|
||||
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
|
||||
error = _POSIX_Get_error_after_wait( executing );
|
||||
|
||||
/*
|
||||
* If the thread is interrupted, while in the thread queue, by
|
||||
* a POSIX signal, then pthread_cond_wait returns spuriously,
|
||||
@@ -149,10 +156,12 @@ int _POSIX_Condition_variables_Wait_support(
|
||||
* When we get here the dispatch disable level is 0.
|
||||
*/
|
||||
|
||||
mutex_error = pthread_mutex_lock( mutex );
|
||||
if ( mutex_error != 0 ) {
|
||||
_Assert( mutex_error == EINVAL );
|
||||
return EINVAL;
|
||||
if ( error != EPERM ) {
|
||||
mutex_error = pthread_mutex_lock( mutex );
|
||||
if ( mutex_error != 0 ) {
|
||||
_Assert( mutex_error == EINVAL );
|
||||
error = EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
|
||||
@@ -40,23 +40,34 @@ static inline int nanosleep_helper(
|
||||
Watchdog_Discipline discipline
|
||||
)
|
||||
{
|
||||
Thread_Control *executing;
|
||||
struct timespec stop;
|
||||
int err = 0;
|
||||
Thread_queue_Context queue_context;
|
||||
struct timespec stop;
|
||||
int err;
|
||||
|
||||
executing = _Thread_Get_executing();
|
||||
err = 0;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&queue_context,
|
||||
_Thread_queue_Enqueue_do_nothing
|
||||
);
|
||||
|
||||
if ( discipline == WATCHDOG_ABSOLUTE ) {
|
||||
_Thread_queue_Context_set_absolute_timeout( &queue_context, ticks );
|
||||
} else {
|
||||
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
|
||||
}
|
||||
|
||||
/*
|
||||
* Block for the desired amount of time
|
||||
*/
|
||||
_Thread_queue_Enqueue(
|
||||
&_Nanosleep_Pseudo_queue,
|
||||
_Thread_queue_Acquire( &_Nanosleep_Pseudo_queue, &queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&_Nanosleep_Pseudo_queue.Queue,
|
||||
&_Thread_queue_Operations_FIFO,
|
||||
executing,
|
||||
_Thread_Executing,
|
||||
STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL,
|
||||
ticks,
|
||||
discipline,
|
||||
1
|
||||
&queue_context
|
||||
);
|
||||
|
||||
clock_gettime( clock_id, &stop );
|
||||
|
||||
@@ -39,7 +39,7 @@ static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
|
||||
void *value;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
|
||||
_Thread_queue_Context_set_no_timeout( &queue_context );
|
||||
the_thread = _Thread_Get( thread, &queue_context.Lock_context.Lock_context );
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ int sigtimedwait(
|
||||
|
||||
executing->Wait.option = *set;
|
||||
executing->Wait.return_argument = the_info;
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&_POSIX_signals_Wait_queue.Queue,
|
||||
POSIX_SIGNALS_TQ_OPERATIONS,
|
||||
|
||||
@@ -35,6 +35,9 @@ extern "C" {
|
||||
* @{
|
||||
*/
|
||||
|
||||
#define REGION_OF_THREAD_QUEUE_QUEUE( queue ) \
|
||||
RTEMS_CONTAINER_OF( queue, Region_Control, Wait_queue.Queue )
|
||||
|
||||
/**
|
||||
* The following defines the information control block used to
|
||||
* manage this class of objects.
|
||||
|
||||
@@ -24,6 +24,18 @@
|
||||
#include <rtems/score/threadqimpl.h>
|
||||
#include <rtems/score/statesimpl.h>
|
||||
|
||||
static void _Region_Enqueue_callout(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
Region_Control *the_region;
|
||||
|
||||
the_region = REGION_OF_THREAD_QUEUE_QUEUE( queue );
|
||||
_Region_Unlock( the_region );
|
||||
}
|
||||
|
||||
rtems_status_code rtems_region_get_segment(
|
||||
rtems_id id,
|
||||
uintptr_t size,
|
||||
@@ -64,35 +76,29 @@ rtems_status_code rtems_region_get_segment(
|
||||
} else if ( _Options_Is_no_wait( option_set ) ) {
|
||||
status = RTEMS_UNSATISFIED;
|
||||
} else {
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *executing;
|
||||
Thread_queue_Context queue_context;
|
||||
Thread_Control *executing;
|
||||
|
||||
/*
|
||||
* Switch from using the memory allocation mutex to using a
|
||||
* dispatching disabled critical section. We have to do this
|
||||
* because this thread is going to block.
|
||||
*/
|
||||
/* FIXME: This is a home grown condition variable */
|
||||
cpu_self = _Thread_Dispatch_disable();
|
||||
_Region_Unlock( the_region );
|
||||
|
||||
executing = _Per_CPU_Get_executing( cpu_self );
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_Thread_queue_Acquire( &the_region->Wait_queue, &queue_context );
|
||||
|
||||
executing = _Thread_Executing;
|
||||
executing->Wait.count = size;
|
||||
executing->Wait.return_argument = segment;
|
||||
|
||||
_Thread_queue_Enqueue(
|
||||
&the_region->Wait_queue,
|
||||
/* FIXME: This is a home grown condition variable */
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&queue_context,
|
||||
_Region_Enqueue_callout
|
||||
);
|
||||
_Thread_queue_Context_set_relative_timeout( &queue_context, timeout );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_region->Wait_queue.Queue,
|
||||
the_region->wait_operations,
|
||||
executing,
|
||||
STATES_WAITING_FOR_SEGMENT,
|
||||
timeout,
|
||||
WATCHDOG_RELATIVE,
|
||||
2
|
||||
&queue_context
|
||||
);
|
||||
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
|
||||
return _Status_Get_after_wait( executing );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,12 +25,12 @@ rtems_status_code rtems_task_delete(
|
||||
rtems_id id
|
||||
)
|
||||
{
|
||||
Thread_Control *the_thread;
|
||||
ISR_lock_Context lock_context;
|
||||
Thread_Control *executing;
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *the_thread;
|
||||
Thread_Close_context context;
|
||||
Thread_Control *executing;
|
||||
|
||||
the_thread = _Thread_Get( id, &lock_context );
|
||||
_Thread_queue_Context_initialize( &context.Base );
|
||||
the_thread = _Thread_Get( id, &context.Base.Lock_context.Lock_context );
|
||||
|
||||
if ( the_thread == NULL ) {
|
||||
#if defined(RTEMS_MULTIPROCESSING)
|
||||
@@ -42,12 +42,16 @@ rtems_status_code rtems_task_delete(
|
||||
return RTEMS_INVALID_ID;
|
||||
}
|
||||
|
||||
cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
|
||||
_ISR_lock_ISR_enable( &lock_context );
|
||||
|
||||
executing = _Per_CPU_Get_executing( cpu_self );
|
||||
executing = _Thread_Executing;
|
||||
|
||||
if ( the_thread == executing ) {
|
||||
Per_CPU_Control *cpu_self;
|
||||
|
||||
cpu_self = _Thread_Dispatch_disable_critical(
|
||||
&context.Base.Lock_context.Lock_context
|
||||
);
|
||||
_ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
|
||||
|
||||
/*
|
||||
* The Classic tasks are neither detached nor joinable. In case of
|
||||
* self deletion, they are detached, otherwise joinable by default.
|
||||
@@ -57,10 +61,10 @@ rtems_status_code rtems_task_delete(
|
||||
THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED,
|
||||
NULL
|
||||
);
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
} else {
|
||||
_Thread_Close( the_thread, executing );
|
||||
_Thread_Close( the_thread, executing, &context );
|
||||
}
|
||||
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
return RTEMS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ static const char *const internal_error_text[] = {
|
||||
"INTERNAL_ERROR_BAD_ATTRIBUTES",
|
||||
"INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY",
|
||||
"OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL",
|
||||
"INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE",
|
||||
"OBSOLETE_INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE",
|
||||
"INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0",
|
||||
"OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP",
|
||||
"INTERNAL_ERROR_GXX_KEY_ADD_FAILED",
|
||||
|
||||
@@ -184,7 +184,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
|
||||
return STATUS_UNSATISFIED;
|
||||
}
|
||||
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_semaphore->Wait_queue.Queue,
|
||||
operations,
|
||||
|
||||
@@ -154,7 +154,7 @@ typedef enum {
|
||||
INTERNAL_ERROR_BAD_ATTRIBUTES,
|
||||
INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY,
|
||||
OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL,
|
||||
INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE,
|
||||
OBSOLETE_INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE,
|
||||
INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0,
|
||||
OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP,
|
||||
INTERNAL_ERROR_GXX_KEY_ADD_FAILED,
|
||||
|
||||
@@ -245,6 +245,11 @@ void _Thread_Cancel(
|
||||
void *exit_value
|
||||
);
|
||||
|
||||
typedef struct {
|
||||
Thread_queue_Context Base;
|
||||
Thread_Control *cancel;
|
||||
} Thread_Close_context;
|
||||
|
||||
/**
|
||||
* @brief Closes the thread.
|
||||
*
|
||||
@@ -252,7 +257,11 @@ void _Thread_Cancel(
|
||||
* case the executing thread is not terminated, then this function waits until
|
||||
* the terminating thread reached the zombie state.
|
||||
*/
|
||||
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
|
||||
void _Thread_Close(
|
||||
Thread_Control *the_thread,
|
||||
Thread_Control *executing,
|
||||
Thread_Close_context *context
|
||||
);
|
||||
|
||||
RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
|
||||
{
|
||||
|
||||
@@ -45,10 +45,27 @@ struct Scheduler_Node;
|
||||
|
||||
typedef struct _Thread_Control Thread_Control;
|
||||
|
||||
typedef struct Thread_queue_Context Thread_queue_Context;
|
||||
|
||||
typedef struct Thread_queue_Queue Thread_queue_Queue;
|
||||
|
||||
typedef struct Thread_queue_Operations Thread_queue_Operations;
|
||||
|
||||
/**
|
||||
* @brief Thread queue enqueue callout.
|
||||
*
|
||||
* @param[in] queue The actual thread queue.
|
||||
* @param[in] the_thread The thread to enqueue.
|
||||
* @param[in] queue_context The thread queue context of the lock acquire.
|
||||
*
|
||||
* @see _Thread_queue_Context_set_enqueue_callout().
|
||||
*/
|
||||
typedef void ( *Thread_queue_Enqueue_callout )(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Thread queue deadlock callout.
|
||||
*
|
||||
@@ -168,7 +185,7 @@ typedef struct {
|
||||
*
|
||||
* @see _Thread_queue_Context_initialize().
|
||||
*/
|
||||
typedef struct {
|
||||
struct Thread_queue_Context {
|
||||
/**
|
||||
* @brief The lock context for the thread queue acquire and release
|
||||
* operations.
|
||||
@@ -176,13 +193,14 @@ typedef struct {
|
||||
Thread_queue_Lock_context Lock_context;
|
||||
|
||||
/**
|
||||
* @brief The expected thread dispatch disable level for
|
||||
* _Thread_queue_Enqueue_critical().
|
||||
* @brief The enqueue callout for _Thread_queue_Enqueue_critical().
|
||||
*
|
||||
* In case the actual thread dispatch disable level is not equal to the
|
||||
* expected level, then a fatal error occurs.
|
||||
* The callout is invoked after the release of the thread queue lock with
|
||||
* thread dispatching disabled. Afterwards the thread is blocked.
|
||||
*
|
||||
* @see _Thread_queue_Enqueue_do_nothing().
|
||||
*/
|
||||
uint32_t expected_thread_dispatch_disable_level;
|
||||
Thread_queue_Enqueue_callout enqueue_callout;
|
||||
|
||||
/**
|
||||
* @brief The clock discipline for the interval timeout.
|
||||
@@ -274,7 +292,7 @@ typedef struct {
|
||||
*/
|
||||
Thread_queue_MP_callout mp_callout;
|
||||
#endif
|
||||
} Thread_queue_Context;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Thread priority queue.
|
||||
|
||||
@@ -61,6 +61,12 @@ typedef struct {
|
||||
Thread_queue_Queue Queue;
|
||||
} Thread_queue_Syslock_queue;
|
||||
|
||||
void _Thread_queue_Enqueue_do_nothing(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Sets the thread wait return code to STATUS_DEADLOCK.
|
||||
*/
|
||||
@@ -82,7 +88,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
|
||||
{
|
||||
#if defined(RTEMS_DEBUG)
|
||||
memset( queue_context, 0, sizeof( *queue_context ) );
|
||||
queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef;
|
||||
queue_context->enqueue_callout = _Thread_queue_Enqueue_do_nothing;
|
||||
queue_context->deadlock_callout = _Thread_queue_Deadlock_fatal;
|
||||
#else
|
||||
(void) queue_context;
|
||||
@@ -90,21 +96,35 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sets the expected thread dispatch disable level in the thread queue
|
||||
* context.
|
||||
* @brief Sets the enqueue callout in the thread queue context.
|
||||
*
|
||||
* @param queue_context The thread queue context.
|
||||
* @param expected_level The expected thread dispatch disable level.
|
||||
* @param enqueue_callout The enqueue callout.
|
||||
*
|
||||
* @see _Thread_queue_Enqueue_critical().
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE void
|
||||
_Thread_queue_Context_set_expected_level(
|
||||
Thread_queue_Context *queue_context,
|
||||
uint32_t expected_level
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
Thread_queue_Context *queue_context,
|
||||
Thread_queue_Enqueue_callout enqueue_callout
|
||||
)
|
||||
{
|
||||
queue_context->expected_thread_dispatch_disable_level = expected_level;
|
||||
queue_context->enqueue_callout = enqueue_callout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sets the do nothing enqueue callout in the thread queue context.
|
||||
*
|
||||
* @param queue_context The thread queue context.
|
||||
*
|
||||
* @see _Thread_queue_Enqueue_critical().
|
||||
*/
|
||||
RTEMS_INLINE_ROUTINE void
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout(
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
queue_context->enqueue_callout = _Thread_queue_Enqueue_do_nothing;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -562,7 +582,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
|
||||
* mutex->owner = executing;
|
||||
* _Thread_queue_Release( &mutex->Queue, queue_context );
|
||||
* } else {
|
||||
* _Thread_queue_Context_set_expected_level( &queue_context, 1 );
|
||||
* _Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
|
||||
* _Thread_queue_Enqueue_critical(
|
||||
* &mutex->Queue.Queue,
|
||||
* MUTEX_TQ_OPERATIONS,
|
||||
@@ -638,12 +658,17 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue(
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_Thread_queue_Acquire( the_thread_queue, &queue_context );
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, expected_level );
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&queue_context,
|
||||
_Thread_queue_Enqueue_do_nothing
|
||||
);
|
||||
|
||||
if ( discipline == WATCHDOG_ABSOLUTE ) {
|
||||
_Thread_queue_Context_set_absolute_timeout( &queue_context, timeout );
|
||||
} else {
|
||||
_Thread_queue_Context_set_relative_timeout( &queue_context, timeout );
|
||||
}
|
||||
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_thread_queue->Queue,
|
||||
operations,
|
||||
|
||||
@@ -80,31 +80,48 @@ static void _Condition_Queue_release(
|
||||
);
|
||||
}
|
||||
|
||||
static Per_CPU_Control *_Condition_Do_wait(
|
||||
typedef struct {
|
||||
Thread_queue_Context Base;
|
||||
struct _Mutex_Control *mutex;
|
||||
} Condition_Enqueue_context;
|
||||
|
||||
static void _Condition_Enqueue_callout(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
Condition_Enqueue_context *context;
|
||||
|
||||
context = (Condition_Enqueue_context *) queue_context;
|
||||
_Mutex_Release( context->mutex );
|
||||
}
|
||||
|
||||
static Thread_Control *_Condition_Do_wait(
|
||||
struct _Condition_Control *_condition,
|
||||
Thread_queue_Context *queue_context
|
||||
struct _Mutex_Control *_mutex,
|
||||
Condition_Enqueue_context *context
|
||||
)
|
||||
{
|
||||
Condition_Control *condition;
|
||||
Thread_Control *executing;
|
||||
Per_CPU_Control *cpu_self;
|
||||
|
||||
context->mutex = _mutex;
|
||||
condition = _Condition_Get( _condition );
|
||||
executing = _Condition_Queue_acquire_critical( condition, queue_context );
|
||||
cpu_self = _Thread_Dispatch_disable_critical(
|
||||
&queue_context->Lock_context.Lock_context
|
||||
executing = _Condition_Queue_acquire_critical( condition, &context->Base );
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&context->Base,
|
||||
_Condition_Enqueue_callout
|
||||
);
|
||||
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 2 );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&condition->Queue.Queue,
|
||||
CONDITION_TQ_OPERATIONS,
|
||||
executing,
|
||||
STATES_WAITING_FOR_SYS_LOCK_CONDITION,
|
||||
queue_context
|
||||
&context->Base
|
||||
);
|
||||
|
||||
return cpu_self;
|
||||
return executing;
|
||||
}
|
||||
|
||||
void _Condition_Wait(
|
||||
@@ -112,19 +129,12 @@ void _Condition_Wait(
|
||||
struct _Mutex_Control *_mutex
|
||||
)
|
||||
{
|
||||
Thread_queue_Context queue_context;
|
||||
Per_CPU_Control *cpu_self;
|
||||
Condition_Enqueue_context context;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
|
||||
_Thread_queue_Context_set_no_timeout( &queue_context );
|
||||
cpu_self = _Condition_Do_wait(
|
||||
_condition,
|
||||
&queue_context
|
||||
);
|
||||
|
||||
_Mutex_Release( _mutex );
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
_Thread_queue_Context_initialize( &context.Base );
|
||||
_ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
|
||||
_Thread_queue_Context_set_no_timeout( &context.Base );
|
||||
_Condition_Do_wait( _condition, _mutex, &context );
|
||||
_Mutex_Acquire( _mutex );
|
||||
}
|
||||
|
||||
@@ -134,57 +144,59 @@ int _Condition_Wait_timed(
|
||||
const struct timespec *abstime
|
||||
)
|
||||
{
|
||||
Thread_queue_Context queue_context;
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *executing;
|
||||
int eno;
|
||||
Watchdog_Interval ticks;
|
||||
Condition_Enqueue_context context;
|
||||
Thread_Control *executing;
|
||||
int eno;
|
||||
Watchdog_Interval ticks;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
|
||||
_Thread_queue_Context_initialize( &context.Base );
|
||||
_ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
|
||||
|
||||
switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
|
||||
case TOD_ABSOLUTE_TIMEOUT_INVALID:
|
||||
_ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
|
||||
_ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
|
||||
return EINVAL;
|
||||
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
|
||||
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
|
||||
_ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
|
||||
_ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
|
||||
return ETIMEDOUT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
|
||||
cpu_self = _Condition_Do_wait( _condition, &queue_context );
|
||||
|
||||
_Mutex_Release( _mutex );
|
||||
executing = cpu_self->executing;
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
_Thread_queue_Context_set_relative_timeout( &context.Base, ticks );
|
||||
executing = _Condition_Do_wait( _condition, _mutex, &context );
|
||||
eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
|
||||
_Mutex_Acquire( _mutex );
|
||||
|
||||
return eno;
|
||||
}
|
||||
|
||||
static unsigned int _Condition_Unnest_mutex(
|
||||
struct _Mutex_recursive_Control *_mutex
|
||||
)
|
||||
{
|
||||
unsigned int nest_level;
|
||||
|
||||
nest_level = _mutex->_nest_level;
|
||||
_mutex->_nest_level = 0;
|
||||
|
||||
return nest_level;
|
||||
}
|
||||
|
||||
void _Condition_Wait_recursive(
|
||||
struct _Condition_Control *_condition,
|
||||
struct _Mutex_recursive_Control *_mutex
|
||||
)
|
||||
{
|
||||
Thread_queue_Context queue_context;
|
||||
Per_CPU_Control *cpu_self;
|
||||
unsigned int nest_level;
|
||||
Condition_Enqueue_context context;
|
||||
unsigned int nest_level;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
|
||||
_Thread_queue_Context_set_no_timeout( &queue_context );
|
||||
cpu_self = _Condition_Do_wait( _condition, &queue_context );
|
||||
|
||||
nest_level = _mutex->_nest_level;
|
||||
_mutex->_nest_level = 0;
|
||||
_Mutex_recursive_Release( _mutex );
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
_Thread_queue_Context_initialize( &context.Base );
|
||||
_ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
|
||||
_Thread_queue_Context_set_no_timeout( &context.Base );
|
||||
nest_level = _Condition_Unnest_mutex( _mutex );
|
||||
_Condition_Do_wait( _condition, &_mutex->_Mutex, &context );
|
||||
_Mutex_recursive_Acquire( _mutex );
|
||||
_mutex->_nest_level = nest_level;
|
||||
}
|
||||
@@ -195,36 +207,29 @@ int _Condition_Wait_recursive_timed(
|
||||
const struct timespec *abstime
|
||||
)
|
||||
{
|
||||
Thread_queue_Context queue_context;
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *executing;
|
||||
int eno;
|
||||
unsigned int nest_level;
|
||||
Watchdog_Interval ticks;
|
||||
Condition_Enqueue_context context;
|
||||
Thread_Control *executing;
|
||||
int eno;
|
||||
unsigned int nest_level;
|
||||
Watchdog_Interval ticks;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
|
||||
_Thread_queue_Context_initialize( &context.Base );
|
||||
_ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
|
||||
|
||||
switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
|
||||
case TOD_ABSOLUTE_TIMEOUT_INVALID:
|
||||
_ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
|
||||
_ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
|
||||
return EINVAL;
|
||||
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
|
||||
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
|
||||
_ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
|
||||
_ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
|
||||
return ETIMEDOUT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
|
||||
cpu_self = _Condition_Do_wait( _condition, &queue_context );
|
||||
|
||||
nest_level = _mutex->_nest_level;
|
||||
_mutex->_nest_level = 0;
|
||||
_Mutex_recursive_Release( _mutex );
|
||||
executing = cpu_self->executing;
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
_Thread_queue_Context_set_relative_timeout( &context.Base, ticks );
|
||||
nest_level = _Condition_Unnest_mutex( _mutex );
|
||||
executing = _Condition_Do_wait( _condition, &_mutex->_Mutex, &context );
|
||||
eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
|
||||
_Mutex_recursive_Acquire( _mutex );
|
||||
_mutex->_nest_level = nest_level;
|
||||
@@ -235,7 +240,7 @@ int _Condition_Wait_recursive_timed(
|
||||
typedef struct {
|
||||
Thread_queue_Context Base;
|
||||
int count;
|
||||
} Condition_Context;
|
||||
} Condition_Flush_context;
|
||||
|
||||
static Thread_Control *_Condition_Flush_filter(
|
||||
Thread_Control *the_thread,
|
||||
@@ -243,9 +248,9 @@ static Thread_Control *_Condition_Flush_filter(
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
Condition_Context *context;
|
||||
Condition_Flush_context *context;
|
||||
|
||||
context = (Condition_Context *) queue_context;
|
||||
context = (Condition_Flush_context *) queue_context;
|
||||
|
||||
if ( context->count <= 0 ) {
|
||||
return NULL;
|
||||
@@ -258,8 +263,8 @@ static Thread_Control *_Condition_Flush_filter(
|
||||
|
||||
static void _Condition_Wake( struct _Condition_Control *_condition, int count )
|
||||
{
|
||||
Condition_Control *condition;
|
||||
Condition_Context context;
|
||||
Condition_Control *condition;
|
||||
Condition_Flush_context context;
|
||||
|
||||
condition = _Condition_Get( _condition );
|
||||
_Thread_queue_Context_initialize( &context.Base );
|
||||
|
||||
@@ -44,7 +44,7 @@ Status_Control _CORE_barrier_Seize(
|
||||
return STATUS_BARRIER_AUTOMATICALLY_RELEASED;
|
||||
} else {
|
||||
the_barrier->number_of_waiting_threads = number_of_waiting_threads;
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_barrier->Wait_queue.Queue,
|
||||
CORE_BARRIER_TQ_OPERATIONS,
|
||||
|
||||
@@ -113,7 +113,7 @@ Status_Control _CORE_message_queue_Seize(
|
||||
executing->Wait.return_argument = size_p;
|
||||
/* Wait.count will be filled in with the message priority */
|
||||
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_message_queue->Wait_queue.Queue,
|
||||
the_message_queue->operations,
|
||||
|
||||
@@ -131,7 +131,7 @@ Status_Control _CORE_message_queue_Submit(
|
||||
executing->Wait.option = (uint32_t) size;
|
||||
executing->Wait.count = submit_type;
|
||||
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_message_queue->Wait_queue.Queue,
|
||||
the_message_queue->operations,
|
||||
|
||||
@@ -32,7 +32,7 @@ Status_Control _CORE_mutex_Seize_slow(
|
||||
)
|
||||
{
|
||||
if ( wait ) {
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Context_set_deadlock_callout(
|
||||
queue_context,
|
||||
_Thread_queue_Deadlock_status
|
||||
|
||||
@@ -78,7 +78,7 @@ Status_Control _CORE_RWLock_Seize_for_reading(
|
||||
|
||||
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
|
||||
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_rwlock->Wait_queue.Queue,
|
||||
CORE_RWLOCK_TQ_OPERATIONS,
|
||||
|
||||
@@ -66,7 +66,7 @@ Status_Control _CORE_RWLock_Seize_for_writing(
|
||||
|
||||
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
|
||||
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&the_rwlock->Wait_queue.Queue,
|
||||
CORE_RWLOCK_TQ_OPERATIONS,
|
||||
|
||||
@@ -92,7 +92,7 @@ int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val )
|
||||
executing = _Futex_Queue_acquire_critical( futex, &queue_context );
|
||||
|
||||
if ( *uaddr == val ) {
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
|
||||
_Thread_queue_Context_set_no_timeout( &queue_context );
|
||||
_Thread_queue_Context_set_ISR_level( &queue_context, level );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
|
||||
@@ -226,47 +226,59 @@ void _MPCI_Send_process_packet (
|
||||
(*_MPCI_table->send_packet)( destination, the_packet );
|
||||
}
|
||||
|
||||
static void _MPCI_Enqueue_callout(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
_Thread_Dispatch_unnest( _Per_CPU_Get() );
|
||||
}
|
||||
|
||||
Status_Control _MPCI_Send_request_packet(
|
||||
uint32_t destination,
|
||||
MP_packet_Prefix *the_packet,
|
||||
States_Control extra_state
|
||||
)
|
||||
{
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *executing;
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_queue_Context queue_context;
|
||||
Thread_Control *executing;
|
||||
|
||||
/*
|
||||
* See if we need a default timeout
|
||||
*/
|
||||
|
||||
if (the_packet->timeout == MPCI_DEFAULT_TIMEOUT)
|
||||
the_packet->timeout = _MPCI_table->default_timeout;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&queue_context,
|
||||
_MPCI_Enqueue_callout
|
||||
);
|
||||
_Thread_queue_Context_set_relative_timeout( &queue_context, the_packet->timeout );
|
||||
|
||||
cpu_self = _Thread_Dispatch_disable();
|
||||
|
||||
executing = _Per_CPU_Get_executing( cpu_self );
|
||||
executing = _Per_CPU_Get_executing( cpu_self );
|
||||
executing->Wait.remote_id = the_packet->id;
|
||||
|
||||
the_packet->source_tid = executing->Object.id;
|
||||
the_packet->source_priority = _Thread_Get_priority( executing );
|
||||
the_packet->to_convert =
|
||||
( the_packet->to_convert - sizeof(MP_packet_Prefix) ) / sizeof(uint32_t);
|
||||
the_packet->source_tid = executing->Object.id;
|
||||
the_packet->source_priority = _Thread_Get_priority( executing );
|
||||
the_packet->to_convert =
|
||||
( the_packet->to_convert - sizeof(MP_packet_Prefix) ) / sizeof(uint32_t);
|
||||
|
||||
executing->Wait.remote_id = the_packet->id;
|
||||
|
||||
(*_MPCI_table->send_packet)( destination, the_packet );
|
||||
|
||||
/*
|
||||
* See if we need a default timeout
|
||||
*/
|
||||
|
||||
if (the_packet->timeout == MPCI_DEFAULT_TIMEOUT)
|
||||
the_packet->timeout = _MPCI_table->default_timeout;
|
||||
|
||||
_Thread_queue_Enqueue(
|
||||
&_MPCI_Remote_blocked_threads,
|
||||
&_Thread_queue_Operations_FIFO,
|
||||
executing,
|
||||
STATES_WAITING_FOR_RPC_REPLY | extra_state,
|
||||
the_packet->timeout,
|
||||
WATCHDOG_RELATIVE,
|
||||
2
|
||||
);
|
||||
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
(*_MPCI_table->send_packet)( destination, the_packet );
|
||||
|
||||
_Thread_queue_Acquire( &_MPCI_Remote_blocked_threads, &queue_context );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
&_MPCI_Remote_blocked_threads.Queue,
|
||||
&_Thread_queue_Operations_FIFO,
|
||||
executing,
|
||||
STATES_WAITING_FOR_RPC_REPLY | extra_state,
|
||||
&queue_context
|
||||
);
|
||||
return _Thread_Wait_get_status( executing );
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ static void _Mutex_Acquire_slow(
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
_Thread_queue_Context_set_expected_level( queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
|
||||
_Thread_queue_Context_set_deadlock_callout(
|
||||
queue_context,
|
||||
_Thread_queue_Deadlock_fatal
|
||||
|
||||
@@ -103,7 +103,7 @@ void _Semaphore_Wait( struct _Semaphore_Control *_sem )
|
||||
sem->count = count - 1;
|
||||
_Semaphore_Queue_release( sem, level, &queue_context );
|
||||
} else {
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, 1 );
|
||||
_Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
|
||||
_Thread_queue_Context_set_no_timeout( &queue_context );
|
||||
_Thread_queue_Context_set_ISR_level( &queue_context, level );
|
||||
_Thread_queue_Enqueue_critical(
|
||||
|
||||
@@ -356,6 +356,15 @@ bool _Thread_queue_Path_acquire_critical(
|
||||
return true;
|
||||
}
|
||||
|
||||
void _Thread_queue_Enqueue_do_nothing(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
/* Do nothing */
|
||||
}
|
||||
|
||||
void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
|
||||
{
|
||||
the_thread->Wait.return_code = STATUS_DEADLOCK;
|
||||
@@ -442,16 +451,7 @@ void _Thread_queue_Enqueue_critical(
|
||||
);
|
||||
_Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
|
||||
|
||||
if (
|
||||
cpu_self->thread_dispatch_disable_level
|
||||
!= queue_context->expected_thread_dispatch_disable_level
|
||||
) {
|
||||
_Terminate(
|
||||
INTERNAL_ERROR_CORE,
|
||||
false,
|
||||
INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
|
||||
);
|
||||
}
|
||||
( *queue_context->enqueue_callout )( queue, the_thread, queue_context );
|
||||
|
||||
/*
|
||||
* Set the blocking state for this thread queue in the thread.
|
||||
@@ -482,7 +482,7 @@ void _Thread_queue_Enqueue_critical(
|
||||
}
|
||||
|
||||
_Thread_Priority_update( queue_context );
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
_Thread_Dispatch_direct( cpu_self );
|
||||
}
|
||||
|
||||
#if defined(RTEMS_SMP)
|
||||
|
||||
@@ -514,21 +514,40 @@ void _Thread_Cancel(
|
||||
_Thread_Dispatch_enable( cpu_self );
|
||||
}
|
||||
|
||||
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing )
|
||||
static void _Thread_Close_enqueue_callout(
|
||||
Thread_queue_Queue *queue,
|
||||
Thread_Control *the_thread,
|
||||
Thread_queue_Context *queue_context
|
||||
)
|
||||
{
|
||||
Thread_queue_Context queue_context;
|
||||
Thread_Close_context *context;
|
||||
|
||||
_Thread_queue_Context_initialize( &queue_context );
|
||||
_Thread_queue_Context_set_expected_level( &queue_context, 2 );
|
||||
_Thread_queue_Context_set_no_timeout( &queue_context );
|
||||
_Thread_State_acquire( the_thread, &queue_context.Lock_context.Lock_context );
|
||||
context = (Thread_Close_context *) queue_context;
|
||||
_Thread_Cancel( context->cancel, the_thread, NULL );
|
||||
}
|
||||
|
||||
void _Thread_Close(
|
||||
Thread_Control *the_thread,
|
||||
Thread_Control *executing,
|
||||
Thread_Close_context *context
|
||||
)
|
||||
{
|
||||
context->cancel = the_thread;
|
||||
_Thread_queue_Context_set_enqueue_callout(
|
||||
&context->Base,
|
||||
_Thread_Close_enqueue_callout
|
||||
);
|
||||
_Thread_queue_Context_set_no_timeout( &context->Base );
|
||||
_Thread_State_acquire_critical(
|
||||
the_thread,
|
||||
&context->Base.Lock_context.Lock_context
|
||||
);
|
||||
_Thread_Join(
|
||||
the_thread,
|
||||
STATES_WAITING_FOR_JOIN,
|
||||
executing,
|
||||
&queue_context
|
||||
&context->Base
|
||||
);
|
||||
_Thread_Cancel( the_thread, executing, NULL );
|
||||
}
|
||||
|
||||
void _Thread_Exit(
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
#define FATAL_ERROR_EXPECTED_SOURCE INTERNAL_ERROR_CORE
|
||||
#define FATAL_ERROR_EXPECTED_IS_INTERNAL FALSE
|
||||
#define FATAL_ERROR_EXPECTED_ERROR \
|
||||
INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
|
||||
INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL
|
||||
|
||||
void force_error(void)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user