validation: Support a partial thread queue flush

Update #3716.
This commit is contained in:
Sebastian Huber
2022-08-31 11:26:00 +02:00
parent 44b3bc65af
commit 1dca588f63
7 changed files with 53 additions and 31 deletions

View File

@@ -185,7 +185,7 @@ static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
return STATUS_BUILD( 0, eno );
}
static void Flush( TQContext *tq_ctx )
static uint32_t Flush( TQContext *tq_ctx, uint32_t thread_count, bool all )
{
Context *ctx;
int count;
@@ -199,6 +199,8 @@ static void Flush( TQContext *tq_ctx )
count = _Futex_Wake( &ctx->futex, INT_MAX );
T_eq_int( count, how_many > 1 ? how_many - 1 : 0 );
return thread_count;
}
static void NewlibReqFutexWake_Pre_Count_Prepare(

View File

@@ -228,12 +228,16 @@ static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
return STATUS_BUILD( STATUS_SUCCESSFUL, 0 );
}
static void Flush( TQContext *tq_ctx )
static uint32_t Flush( TQContext *tq_ctx, uint32_t thread_count, bool all )
{
rtems_status_code sc;
(void) all;
sc = rtems_semaphore_flush( tq_ctx->thread_queue_id );
T_rsc_success( sc );
return thread_count;
}
static void RtemsSemReqFlush_Pre_Class_Prepare(

View File

@@ -147,7 +147,7 @@ static void Flush( void *arg )
ctx = arg;
TQSchedulerRecordStart( ctx->tq_ctx );
TQFlush( ctx->tq_ctx );
TQFlush( ctx->tq_ctx, true );
}
static void SchedulerEvent(
@@ -288,7 +288,7 @@ static void ScoreTqReqFlushFifo_Action( ScoreTqReqFlushFifo_Context *ctx )
TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
} else {
TQSchedulerRecordStart( ctx->tq_ctx );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
}
TQSchedulerRecordStop( ctx->tq_ctx );

View File

@@ -179,7 +179,7 @@ static void Flush( void *arg )
ctx = arg;
TQSchedulerRecordStart( ctx->tq_ctx );
TQFlush( ctx->tq_ctx );
TQFlush( ctx->tq_ctx, true );
}
static void SchedulerEvent(
@@ -434,7 +434,7 @@ static void ScoreTqReqFlushPriorityInherit_Action(
);
} else {
TQSchedulerRecordStart( ctx->tq_ctx );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
}
TQSchedulerRecordStop( ctx->tq_ctx );

View File

@@ -154,7 +154,7 @@ static void Flush( void *arg )
ctx = arg;
TQSchedulerRecordStart( ctx->tq_ctx );
TQFlush( ctx->tq_ctx );
TQFlush( ctx->tq_ctx, true );
}
static void SchedulerEvent(
@@ -315,7 +315,7 @@ static void ScoreTqReqFlushPriority_Action(
);
} else {
TQSchedulerRecordStart( ctx->tq_ctx );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
}
TQSchedulerRecordStop( ctx->tq_ctx );

View File

@@ -333,8 +333,12 @@ static void Worker( rtems_task_argument arg, TQWorkerKind worker )
_Thread_Dispatch_direct( cpu_self );
}
if ( ( events & TQ_EVENT_FLUSH ) != 0 ) {
TQFlush( ctx );
if ( ( events & TQ_EVENT_FLUSH_ALL ) != 0 ) {
TQFlush( ctx, true );
}
if ( ( events & TQ_EVENT_FLUSH_PARTIAL ) != 0 ) {
TQFlush( ctx, false );
}
if ( ( events & TQ_EVENT_ENQUEUE_DONE ) != 0 ) {
@@ -647,9 +651,9 @@ Status_Control TQSurrender( TQContext *ctx )
return ( *ctx->surrender )( ctx );
}
void TQFlush( TQContext *ctx )
void TQFlush( TQContext *ctx, bool flush_all )
{
( *ctx->flush )( ctx );
ctx->flush_count = ( *ctx->flush )( ctx, ctx->how_many, flush_all );
}
rtems_tcb *TQGetOwner( TQContext *ctx )

View File

@@ -120,23 +120,24 @@ typedef enum {
TQ_EVENT_MUTEX_B_OBTAIN = RTEMS_EVENT_10,
TQ_EVENT_MUTEX_B_RELEASE = RTEMS_EVENT_11,
TQ_EVENT_BUSY_WAIT = RTEMS_EVENT_12,
TQ_EVENT_FLUSH = RTEMS_EVENT_13,
TQ_EVENT_SCHEDULER_RECORD_START = RTEMS_EVENT_14,
TQ_EVENT_SCHEDULER_RECORD_STOP = RTEMS_EVENT_15,
TQ_EVENT_TIMEOUT = RTEMS_EVENT_16,
TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN = RTEMS_EVENT_17,
TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE = RTEMS_EVENT_18,
TQ_EVENT_ENQUEUE_FATAL = RTEMS_EVENT_19,
TQ_EVENT_MUTEX_C_OBTAIN = RTEMS_EVENT_20,
TQ_EVENT_MUTEX_C_RELEASE = RTEMS_EVENT_21,
TQ_EVENT_MUTEX_FIFO_OBTAIN = RTEMS_EVENT_22,
TQ_EVENT_MUTEX_FIFO_RELEASE = RTEMS_EVENT_23,
TQ_EVENT_ENQUEUE_TIMED = RTEMS_EVENT_24,
TQ_EVENT_MUTEX_D_OBTAIN = RTEMS_EVENT_25,
TQ_EVENT_MUTEX_D_RELEASE = RTEMS_EVENT_26,
TQ_EVENT_PIN = RTEMS_EVENT_27,
TQ_EVENT_UNPIN = RTEMS_EVENT_28,
TQ_EVENT_COUNT = RTEMS_EVENT_29
TQ_EVENT_FLUSH_ALL = RTEMS_EVENT_13,
TQ_EVENT_FLUSH_PARTIAL = RTEMS_EVENT_14,
TQ_EVENT_SCHEDULER_RECORD_START = RTEMS_EVENT_15,
TQ_EVENT_SCHEDULER_RECORD_STOP = RTEMS_EVENT_16,
TQ_EVENT_TIMEOUT = RTEMS_EVENT_17,
TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN = RTEMS_EVENT_18,
TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE = RTEMS_EVENT_19,
TQ_EVENT_ENQUEUE_FATAL = RTEMS_EVENT_20,
TQ_EVENT_MUTEX_C_OBTAIN = RTEMS_EVENT_21,
TQ_EVENT_MUTEX_C_RELEASE = RTEMS_EVENT_22,
TQ_EVENT_MUTEX_FIFO_OBTAIN = RTEMS_EVENT_23,
TQ_EVENT_MUTEX_FIFO_RELEASE = RTEMS_EVENT_24,
TQ_EVENT_ENQUEUE_TIMED = RTEMS_EVENT_25,
TQ_EVENT_MUTEX_D_OBTAIN = RTEMS_EVENT_26,
TQ_EVENT_MUTEX_D_RELEASE = RTEMS_EVENT_27,
TQ_EVENT_PIN = RTEMS_EVENT_28,
TQ_EVENT_UNPIN = RTEMS_EVENT_29,
TQ_EVENT_COUNT = RTEMS_EVENT_30
} TQEvent;
typedef enum {
@@ -262,6 +263,12 @@ typedef struct TQContext {
*/
uint32_t how_many;
/**
* @brief This this member contains the count of the least recently flushed
* threads.
*/
uint32_t flush_count;
/**
* @brief This this member provides a context to jump back to before the
* enqueue.
@@ -290,8 +297,13 @@ typedef struct TQContext {
/**
* @brief This member provides the thread queue flush handler.
*
* The second parameter specifies the count of enqueued threads. While the
* third parameter is true, all enqueued threads shall be extracted,
* otherwise the thread queue shall be partially flushed. The handler shall
* return the count of flushed threads.
*/
void ( *flush )( struct TQContext * );
uint32_t ( *flush )( struct TQContext *, uint32_t, bool );
/**
* @brief This member provides the get owner handler.
@@ -390,7 +402,7 @@ void TQEnqueueDone( TQContext *ctx );
Status_Control TQSurrender( TQContext *ctx );
void TQFlush( TQContext *ctx );
void TQFlush( TQContext *ctx, bool flush_all );
rtems_tcb *TQGetOwner( TQContext *ctx );