mirror of
https://github.com/eclipse-threadx/threadx.git
synced 2025-11-16 04:24:48 +00:00
Release 6.1.9
This commit is contained in:
@@ -26,7 +26,7 @@
|
||||
/* APPLICATION INTERFACE DEFINITION RELEASE */
|
||||
/* */
|
||||
/* tx_api.h PORTABLE C */
|
||||
/* 6.1.8 */
|
||||
/* 6.1.9 */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
/* William E. Lamie, Microsoft Corporation */
|
||||
@@ -71,10 +71,13 @@
|
||||
/* resulting in version 6.1.6 */
|
||||
/* 06-02-2021 Yuxin Zhou Modified comment(s), added */
|
||||
/* Execution Profile support, */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* 08-02-2021 Scott Larson Modified comment(s), and */
|
||||
/* update patch number, */
|
||||
/* resulting in version 6.1.8 */
|
||||
/* 10-15-2021 Yuxin Zhou Modified comment(s), */
|
||||
/* update patch number, */
|
||||
/* resulting in version 6.1.9 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
|
||||
@@ -101,13 +104,13 @@ extern "C" {
|
||||
/* Define basic constants for the ThreadX kernel. */
|
||||
|
||||
|
||||
/* Define the major/minor version information that can be used by the application
|
||||
/* Define the major/minor version information that can be used by the application
|
||||
and the ThreadX source as well. */
|
||||
|
||||
|
||||
#define AZURE_RTOS_THREADX
|
||||
#define THREADX_MAJOR_VERSION 6
|
||||
#define THREADX_MINOR_VERSION 1
|
||||
#define THREADX_PATCH_VERSION 8
|
||||
#define THREADX_PATCH_VERSION 9
|
||||
|
||||
/* Define the following symbol for backward compatibility */
|
||||
#define EL_PRODUCT_THREADX
|
||||
@@ -214,16 +217,16 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* Event numbers 0 through 4095 are reserved by Azure RTOS. Specific event assignments are:
|
||||
|
||||
ThreadX events: 1-199
|
||||
/* Event numbers 0 through 4095 are reserved by Azure RTOS. Specific event assignments are:
|
||||
|
||||
ThreadX events: 1-199
|
||||
FileX events: 200-299
|
||||
NetX events: 300-599
|
||||
USBX events: 600-999
|
||||
GUIX events: 1000-1500
|
||||
|
||||
User-defined event numbers start at 4096 and continue through 65535, as defined by the constants
|
||||
TX_TRACE_USER_EVENT_START and TX_TRACE_USER_EVENT_END, respectively. User events should be based
|
||||
|
||||
User-defined event numbers start at 4096 and continue through 65535, as defined by the constants
|
||||
TX_TRACE_USER_EVENT_START and TX_TRACE_USER_EVENT_END, respectively. User events should be based
|
||||
on these constants in case the user event number assignment is changed in future releases. */
|
||||
|
||||
#define TX_TRACE_USER_EVENT_START 4096 /* I1, I2, I3, I4 are user defined */
|
||||
@@ -249,7 +252,7 @@ extern "C" {
|
||||
|
||||
/* Define basic alignment type used in block and byte pool operations. This data type must
|
||||
be at least 32-bits in size and also be large enough to hold a pointer type. */
|
||||
|
||||
|
||||
#ifndef ALIGN_TYPE_DEFINED
|
||||
#define ALIGN_TYPE ULONG
|
||||
#endif
|
||||
@@ -258,10 +261,10 @@ extern "C" {
|
||||
/* Define the control block definitions for all system objects. */
|
||||
|
||||
|
||||
/* Define the basic timer management structures. These are the structures
|
||||
/* Define the basic timer management structures. These are the structures
|
||||
used to manage thread sleep, timeout, and user timer requests. */
|
||||
|
||||
/* Determine if the internal timer control block has an extension defined. If not,
|
||||
/* Determine if the internal timer control block has an extension defined. If not,
|
||||
define the extension to whitespace. */
|
||||
|
||||
#ifndef TX_TIMER_INTERNAL_EXTENSION
|
||||
@@ -299,7 +302,7 @@ typedef struct TX_TIMER_INTERNAL_STRUCT
|
||||
} TX_TIMER_INTERNAL;
|
||||
|
||||
|
||||
/* Determine if the timer control block has an extension defined. If not,
|
||||
/* Determine if the timer control block has an extension defined. If not,
|
||||
define the extension to whitespace. */
|
||||
|
||||
#ifndef TX_TIMER_EXTENSION
|
||||
@@ -358,7 +361,7 @@ typedef struct TX_TIMER_STRUCT
|
||||
typedef struct TX_THREAD_STRUCT
|
||||
{
|
||||
/* The first section of the control block contains critical
|
||||
information that is referenced by the port-specific
|
||||
information that is referenced by the port-specific
|
||||
assembly language code. Any changes in this section could
|
||||
necessitate changes in the assembly language. */
|
||||
|
||||
@@ -378,38 +381,38 @@ typedef struct TX_THREAD_STRUCT
|
||||
|
||||
/***************************************************************/
|
||||
|
||||
/* Define the first port extension in the thread control block. This
|
||||
/* Define the first port extension in the thread control block. This
|
||||
is typically defined to whitespace or a pointer type in tx_port.h. */
|
||||
TX_THREAD_EXTENSION_0
|
||||
|
||||
|
||||
CHAR *tx_thread_name; /* Pointer to thread's name */
|
||||
UINT tx_thread_priority; /* Priority of thread (0-1023) */
|
||||
UINT tx_thread_state; /* Thread's execution state */
|
||||
UINT tx_thread_delayed_suspend; /* Delayed suspend flag */
|
||||
UINT tx_thread_suspending; /* Thread suspending flag */
|
||||
UINT tx_thread_preempt_threshold; /* Preemption threshold */
|
||||
|
||||
/* Define the thread schedule hook. The usage of this is port/application specific,
|
||||
|
||||
/* Define the thread schedule hook. The usage of this is port/application specific,
|
||||
but when used, the function pointer designated is called whenever the thread is
|
||||
scheduled and unscheduled. */
|
||||
VOID (*tx_thread_schedule_hook)(struct TX_THREAD_STRUCT *thread_ptr, ULONG id);
|
||||
|
||||
/* Nothing after this point is referenced by the target-specific
|
||||
assembly language. Hence, information after this point can
|
||||
be added to the control block providing the complete system
|
||||
assembly language. Hence, information after this point can
|
||||
be added to the control block providing the complete system
|
||||
is recompiled. */
|
||||
|
||||
/* Define the thread's entry point and input parameter. */
|
||||
VOID (*tx_thread_entry)(ULONG id);
|
||||
ULONG tx_thread_entry_parameter;
|
||||
|
||||
/* Define the thread's timer block. This is used for thread
|
||||
/* Define the thread's timer block. This is used for thread
|
||||
sleep and timeout requests. */
|
||||
TX_TIMER_INTERNAL tx_thread_timer;
|
||||
|
||||
/* Define the thread's cleanup function and associated data. This
|
||||
is used to cleanup various data structures when a thread
|
||||
suspension is lifted or terminated either by the user or
|
||||
is used to cleanup various data structures when a thread
|
||||
suspension is lifted or terminated either by the user or
|
||||
a timeout. */
|
||||
VOID (*tx_thread_suspend_cleanup)(struct TX_THREAD_STRUCT *thread_ptr, ULONG suspension_sequence);
|
||||
VOID *tx_thread_suspend_control_block;
|
||||
@@ -421,17 +424,17 @@ typedef struct TX_THREAD_STRUCT
|
||||
UINT tx_thread_suspend_option;
|
||||
UINT tx_thread_suspend_status;
|
||||
|
||||
/* Define the second port extension in the thread control block. This
|
||||
/* Define the second port extension in the thread control block. This
|
||||
is typically defined to whitespace or a pointer type in tx_port.h. */
|
||||
TX_THREAD_EXTENSION_1
|
||||
|
||||
/* Define pointers to the next and previous threads in the
|
||||
/* Define pointers to the next and previous threads in the
|
||||
created list. */
|
||||
struct TX_THREAD_STRUCT
|
||||
*tx_thread_created_next,
|
||||
*tx_thread_created_previous;
|
||||
|
||||
/* Define the third port extension in the thread control block. This
|
||||
/* Define the third port extension in the thread control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_THREAD_EXTENSION_2
|
||||
|
||||
@@ -439,14 +442,14 @@ typedef struct TX_THREAD_STRUCT
|
||||
#ifndef TX_NO_FILEX_POINTER
|
||||
VOID *tx_thread_filex_ptr;
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the priority inheritance variables. These will be used
|
||||
to manage priority inheritance changes applied to this thread
|
||||
to manage priority inheritance changes applied to this thread
|
||||
as a result of mutex get operations. */
|
||||
UINT tx_thread_user_priority;
|
||||
UINT tx_thread_user_preempt_threshold;
|
||||
UINT tx_thread_inherit_priority;
|
||||
|
||||
|
||||
/* Define the owned mutex count and list head pointer. */
|
||||
UINT tx_thread_owned_mutex_count;
|
||||
struct TX_MUTEX_STRUCT
|
||||
@@ -460,7 +463,7 @@ typedef struct TX_THREAD_STRUCT
|
||||
/* Define the number of times this thread suspends. */
|
||||
ULONG tx_thread_performance_suspend_count;
|
||||
|
||||
/* Define the number of times this thread is preempted by calling
|
||||
/* Define the number of times this thread is preempted by calling
|
||||
a ThreadX API service. */
|
||||
ULONG tx_thread_performance_solicited_preemption_count;
|
||||
|
||||
@@ -495,33 +498,33 @@ typedef struct TX_THREAD_STRUCT
|
||||
|
||||
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
|
||||
|
||||
/* Define the application callback routine used to notify the application when
|
||||
/* Define the application callback routine used to notify the application when
|
||||
the thread is entered or exits. */
|
||||
VOID (*tx_thread_entry_exit_notify)(struct TX_THREAD_STRUCT *thread_ptr, UINT type);
|
||||
#endif
|
||||
|
||||
/* Define the fourth port extension in the thread control block. This
|
||||
/* Define the fourth port extension in the thread control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_THREAD_EXTENSION_3
|
||||
|
||||
|
||||
/* Define variables for supporting execution profile. */
|
||||
/* Note that in ThreadX 5.x, user would define TX_ENABLE_EXECUTION_CHANGE_NOTIFY and use TX_THREAD_EXTENSION_3
|
||||
to define the following two variables.
|
||||
to define the following two variables.
|
||||
For Azure RTOS 6, user shall use TX_EXECUTION_PROFILE_ENABLE instead of TX_ENABLE_EXECUTION_CHANGE_NOTIFY,
|
||||
and SHALL NOT add variables to TX_THREAD_EXTENSION_3. */
|
||||
#if (defined(TX_EXECUTION_PROFILE_ENABLE) && !defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY))
|
||||
unsigned long long tx_thread_execution_time_total;
|
||||
unsigned long long tx_thread_execution_time_last_start;
|
||||
unsigned long long tx_thread_execution_time_total;
|
||||
unsigned long long tx_thread_execution_time_last_start;
|
||||
#endif
|
||||
|
||||
/* Define suspension sequence number. This is used to ensure suspension is still valid when
|
||||
/* Define suspension sequence number. This is used to ensure suspension is still valid when
|
||||
cleanup routine executes. */
|
||||
ULONG tx_thread_suspension_sequence;
|
||||
|
||||
/* Define the user extension field. This typically is defined
|
||||
to white space, but some ports of ThreadX may need to have
|
||||
additional fields in the thread control block. This is
|
||||
/* Define the user extension field. This typically is defined
|
||||
to white space, but some ports of ThreadX may need to have
|
||||
additional fields in the thread control block. This is
|
||||
defined in the file tx_port.h. */
|
||||
TX_THREAD_USER_EXTENSION
|
||||
|
||||
@@ -583,14 +586,14 @@ typedef struct TX_BLOCK_POOL_STRUCT
|
||||
ULONG tx_block_pool_performance_timeout_count;
|
||||
#endif
|
||||
|
||||
/* Define the port extension in the block pool control block. This
|
||||
/* Define the port extension in the block pool control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_BLOCK_POOL_EXTENSION
|
||||
|
||||
} TX_BLOCK_POOL;
|
||||
|
||||
|
||||
/* Determine if the byte allocate extension is defined. If not, define the
|
||||
/* Determine if the byte allocate extension is defined. If not, define the
|
||||
extension to whitespace. */
|
||||
|
||||
#ifndef TX_BYTE_ALLOCATE_EXTENSION
|
||||
@@ -598,7 +601,7 @@ typedef struct TX_BLOCK_POOL_STRUCT
|
||||
#endif
|
||||
|
||||
|
||||
/* Determine if the byte release extension is defined. If not, define the
|
||||
/* Determine if the byte release extension is defined. If not, define the
|
||||
extension to whitespace. */
|
||||
|
||||
#ifndef TX_BYTE_RELEASE_EXTENSION
|
||||
@@ -678,7 +681,7 @@ typedef struct TX_BYTE_POOL_STRUCT
|
||||
ULONG tx_byte_pool_performance_timeout_count;
|
||||
#endif
|
||||
|
||||
/* Define the port extension in the byte pool control block. This
|
||||
/* Define the port extension in the byte pool control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_BYTE_POOL_EXTENSION
|
||||
|
||||
@@ -696,7 +699,7 @@ typedef struct TX_EVENT_FLAGS_GROUP_STRUCT
|
||||
/* Define the event flags group's name. */
|
||||
CHAR *tx_event_flags_group_name;
|
||||
|
||||
/* Define the actual current event flags in this group. A zero in a
|
||||
/* Define the actual current event flags in this group. A zero in a
|
||||
particular bit indicates the event flag is not set. */
|
||||
ULONG tx_event_flags_group_current;
|
||||
|
||||
@@ -735,19 +738,19 @@ typedef struct TX_EVENT_FLAGS_GROUP_STRUCT
|
||||
|
||||
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
|
||||
|
||||
/* Define the application callback routine used to notify the application when
|
||||
/* Define the application callback routine used to notify the application when
|
||||
an event flag is set. */
|
||||
VOID (*tx_event_flags_group_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *group_ptr);
|
||||
#endif
|
||||
|
||||
/* Define the port extension in the event flags group control block. This
|
||||
/* Define the port extension in the event flags group control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_EVENT_FLAGS_GROUP_EXTENSION
|
||||
|
||||
} TX_EVENT_FLAGS_GROUP;
|
||||
|
||||
|
||||
/* Determine if the mutex put extension 1 is defined. If not, define the
|
||||
/* Determine if the mutex put extension 1 is defined. If not, define the
|
||||
extension to whitespace. */
|
||||
|
||||
#ifndef TX_MUTEX_PUT_EXTENSION_1
|
||||
@@ -755,7 +758,7 @@ typedef struct TX_EVENT_FLAGS_GROUP_STRUCT
|
||||
#endif
|
||||
|
||||
|
||||
/* Determine if the mutex put extension 2 is defined. If not, define the
|
||||
/* Determine if the mutex put extension 2 is defined. If not, define the
|
||||
extension to whitespace. */
|
||||
|
||||
#ifndef TX_MUTEX_PUT_EXTENSION_2
|
||||
@@ -763,7 +766,7 @@ typedef struct TX_EVENT_FLAGS_GROUP_STRUCT
|
||||
#endif
|
||||
|
||||
|
||||
/* Determine if the mutex priority change extension is defined. If not, define the
|
||||
/* Determine if the mutex priority change extension is defined. If not, define the
|
||||
extension to whitespace. */
|
||||
|
||||
#ifndef TX_MUTEX_PRIORITY_CHANGE_EXTENSION
|
||||
@@ -837,7 +840,7 @@ typedef struct TX_MUTEX_STRUCT
|
||||
ULONG tx_mutex_performance__priority_inheritance_count;
|
||||
#endif
|
||||
|
||||
/* Define the port extension in the mutex control block. This
|
||||
/* Define the port extension in the mutex control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_MUTEX_EXTENSION
|
||||
|
||||
@@ -866,7 +869,7 @@ typedef struct TX_QUEUE_STRUCT
|
||||
UINT tx_queue_enqueued;
|
||||
UINT tx_queue_available_storage;
|
||||
|
||||
/* Define pointers that represent the start and end for the queue's
|
||||
/* Define pointers that represent the start and end for the queue's
|
||||
message area. */
|
||||
ULONG *tx_queue_start;
|
||||
ULONG *tx_queue_end;
|
||||
@@ -911,12 +914,12 @@ typedef struct TX_QUEUE_STRUCT
|
||||
|
||||
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
|
||||
|
||||
/* Define the application callback routine used to notify the application when
|
||||
/* Define the application callback routine used to notify the application when
|
||||
the a message is sent to the queue. */
|
||||
VOID (*tx_queue_send_notify)(struct TX_QUEUE_STRUCT *queue_ptr);
|
||||
#endif
|
||||
|
||||
/* Define the port extension in the queue control block. This
|
||||
/* Define the port extension in the queue control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_QUEUE_EXTENSION
|
||||
|
||||
@@ -966,29 +969,29 @@ typedef struct TX_SEMAPHORE_STRUCT
|
||||
|
||||
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
|
||||
|
||||
/* Define the application callback routine used to notify the application when
|
||||
/* Define the application callback routine used to notify the application when
|
||||
the a semaphore is put. */
|
||||
VOID (*tx_semaphore_put_notify)(struct TX_SEMAPHORE_STRUCT *semaphore_ptr);
|
||||
#endif
|
||||
|
||||
/* Define the port extension in the semaphore control block. This
|
||||
/* Define the port extension in the semaphore control block. This
|
||||
is typically defined to whitespace in tx_port.h. */
|
||||
TX_SEMAPHORE_EXTENSION
|
||||
|
||||
} TX_SEMAPHORE;
|
||||
|
||||
|
||||
/* Define the system API mappings based on the error checking
|
||||
selected by the user. Note: this section is only applicable to
|
||||
/* Define the system API mappings based on the error checking
|
||||
selected by the user. Note: this section is only applicable to
|
||||
application source code, hence the conditional that turns off this
|
||||
stuff when the include file is processed by the ThreadX source. */
|
||||
|
||||
#ifndef TX_SOURCE_CODE
|
||||
|
||||
|
||||
/* Determine if error checking is desired. If so, map API functions
|
||||
/* Determine if error checking is desired. If so, map API functions
|
||||
to the appropriate error checking front-ends. Otherwise, map API
|
||||
functions to the core functions that actually perform the work.
|
||||
functions to the core functions that actually perform the work.
|
||||
Note: error checking is enabled by default. */
|
||||
|
||||
#ifdef TX_DISABLE_ERROR_CHECKING
|
||||
@@ -1362,8 +1365,8 @@ UINT _tx_block_allocate(TX_BLOCK_POOL *pool_ptr, VOID **block_ptr, ULONG
|
||||
UINT _tx_block_pool_create(TX_BLOCK_POOL *pool_ptr, CHAR *name_ptr, ULONG block_size,
|
||||
VOID *pool_start, ULONG pool_size);
|
||||
UINT _tx_block_pool_delete(TX_BLOCK_POOL *pool_ptr);
|
||||
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BLOCK_POOL **next_pool);
|
||||
UINT _tx_block_pool_performance_info_get(TX_BLOCK_POOL *pool_ptr, ULONG *allocates, ULONG *releases,
|
||||
ULONG *suspensions, ULONG *timeouts);
|
||||
@@ -1373,15 +1376,15 @@ UINT _tx_block_pool_prioritize(TX_BLOCK_POOL *pool_ptr);
|
||||
UINT _tx_block_release(VOID *block_ptr);
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_block_allocate(TX_BLOCK_POOL *pool_ptr, VOID **block_ptr, ULONG wait_option);
|
||||
UINT _txe_block_pool_create(TX_BLOCK_POOL *pool_ptr, CHAR *name_ptr, ULONG block_size,
|
||||
VOID *pool_start, ULONG pool_size, UINT pool_control_block_size);
|
||||
UINT _txe_block_pool_delete(TX_BLOCK_POOL *pool_ptr);
|
||||
UINT _txe_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
UINT _txe_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BLOCK_POOL **next_pool);
|
||||
UINT _txe_block_pool_prioritize(TX_BLOCK_POOL *pool_ptr);
|
||||
UINT _txe_block_release(VOID *block_ptr);
|
||||
@@ -1390,8 +1393,8 @@ UINT _txr_block_allocate(TX_BLOCK_POOL *pool_ptr, VOID **block_ptr, ULONG
|
||||
UINT _txr_block_pool_create(TX_BLOCK_POOL *pool_ptr, CHAR *name_ptr, ULONG block_size,
|
||||
VOID *pool_start, ULONG pool_size, UINT pool_control_block_size);
|
||||
UINT _txr_block_pool_delete(TX_BLOCK_POOL *pool_ptr);
|
||||
UINT _txr_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
UINT _txr_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BLOCK_POOL **next_pool);
|
||||
UINT _txr_block_pool_prioritize(TX_BLOCK_POOL *pool_ptr);
|
||||
UINT _txr_block_release(VOID *block_ptr);
|
||||
@@ -1405,8 +1408,8 @@ UINT _tx_byte_allocate(TX_BYTE_POOL *pool_ptr, VOID **memory_ptr, ULONG m
|
||||
UINT _tx_byte_pool_create(TX_BYTE_POOL *pool_ptr, CHAR *name_ptr, VOID *pool_start,
|
||||
ULONG pool_size);
|
||||
UINT _tx_byte_pool_delete(TX_BYTE_POOL *pool_ptr);
|
||||
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BYTE_POOL **next_pool);
|
||||
UINT _tx_byte_pool_performance_info_get(TX_BYTE_POOL *pool_ptr, ULONG *allocates, ULONG *releases,
|
||||
ULONG *fragments_searched, ULONG *merges, ULONG *splits, ULONG *suspensions, ULONG *timeouts);
|
||||
@@ -1416,7 +1419,7 @@ UINT _tx_byte_pool_prioritize(TX_BYTE_POOL *pool_ptr);
|
||||
UINT _tx_byte_release(VOID *memory_ptr);
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_byte_allocate(TX_BYTE_POOL *pool_ptr, VOID **memory_ptr, ULONG memory_size,
|
||||
@@ -1424,8 +1427,8 @@ UINT _txe_byte_allocate(TX_BYTE_POOL *pool_ptr, VOID **memory_ptr, ULONG
|
||||
UINT _txe_byte_pool_create(TX_BYTE_POOL *pool_ptr, CHAR *name_ptr, VOID *pool_start,
|
||||
ULONG pool_size, UINT pool_control_block_size);
|
||||
UINT _txe_byte_pool_delete(TX_BYTE_POOL *pool_ptr);
|
||||
UINT _txe_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
UINT _txe_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BYTE_POOL **next_pool);
|
||||
UINT _txe_byte_pool_prioritize(TX_BYTE_POOL *pool_ptr);
|
||||
UINT _txe_byte_release(VOID *memory_ptr);
|
||||
@@ -1435,8 +1438,8 @@ UINT _txr_byte_allocate(TX_BYTE_POOL *pool_ptr, VOID **memory_ptr, ULONG
|
||||
UINT _txr_byte_pool_create(TX_BYTE_POOL *pool_ptr, CHAR *name_ptr, VOID *pool_start,
|
||||
ULONG pool_size, UINT pool_control_block_size);
|
||||
UINT _txr_byte_pool_delete(TX_BYTE_POOL *pool_ptr);
|
||||
UINT _txr_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
UINT _txr_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BYTE_POOL **next_pool);
|
||||
UINT _txr_byte_pool_prioritize(TX_BYTE_POOL *pool_ptr);
|
||||
UINT _txr_byte_release(VOID *memory_ptr);
|
||||
@@ -1449,29 +1452,29 @@ UINT _tx_event_flags_create(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR *name_p
|
||||
UINT _tx_event_flags_delete(TX_EVENT_FLAGS_GROUP *group_ptr);
|
||||
UINT _tx_event_flags_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG requested_flags,
|
||||
UINT get_option, ULONG *actual_flags_ptr, ULONG wait_option);
|
||||
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_EVENT_FLAGS_GROUP **next_group);
|
||||
UINT _tx_event_flags_performance_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG *sets, ULONG *gets,
|
||||
ULONG *suspensions, ULONG *timeouts);
|
||||
UINT _tx_event_flags_performance_system_info_get(ULONG *sets, ULONG *gets,
|
||||
ULONG *suspensions, ULONG *timeouts);
|
||||
UINT _tx_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set,
|
||||
UINT _tx_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set,
|
||||
UINT set_option);
|
||||
UINT _tx_event_flags_set_notify(TX_EVENT_FLAGS_GROUP *group_ptr, VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr));
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_event_flags_create(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR *name_ptr, UINT event_control_block_size);
|
||||
UINT _txe_event_flags_delete(TX_EVENT_FLAGS_GROUP *group_ptr);
|
||||
UINT _txe_event_flags_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG requested_flags,
|
||||
UINT get_option, ULONG *actual_flags_ptr, ULONG wait_option);
|
||||
UINT _txe_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _txe_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_EVENT_FLAGS_GROUP **next_group);
|
||||
UINT _txe_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set,
|
||||
UINT _txe_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set,
|
||||
UINT set_option);
|
||||
UINT _txe_event_flags_set_notify(TX_EVENT_FLAGS_GROUP *group_ptr, VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr));
|
||||
#ifdef TX_ENABLE_MULTI_ERROR_CHECKING
|
||||
@@ -1479,10 +1482,10 @@ UINT _txr_event_flags_create(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR *name_
|
||||
UINT _txr_event_flags_delete(TX_EVENT_FLAGS_GROUP *group_ptr);
|
||||
UINT _txr_event_flags_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG requested_flags,
|
||||
UINT get_option, ULONG *actual_flags_ptr, ULONG wait_option);
|
||||
UINT _txr_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _txr_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_EVENT_FLAGS_GROUP **next_group);
|
||||
UINT _txr_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set,
|
||||
UINT _txr_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set,
|
||||
UINT set_option);
|
||||
UINT _txr_event_flags_set_notify(TX_EVENT_FLAGS_GROUP *group_ptr, VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *notify_group_ptr));
|
||||
#endif
|
||||
@@ -1498,8 +1501,8 @@ VOID _tx_initialize_kernel_enter(VOID);
|
||||
UINT _tx_mutex_create(TX_MUTEX *mutex_ptr, CHAR *name_ptr, UINT inherit);
|
||||
UINT _tx_mutex_delete(TX_MUTEX *mutex_ptr);
|
||||
UINT _tx_mutex_get(TX_MUTEX *mutex_ptr, ULONG wait_option);
|
||||
UINT _tx_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _tx_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_MUTEX **next_mutex);
|
||||
UINT _tx_mutex_performance_info_get(TX_MUTEX *mutex_ptr, ULONG *puts, ULONG *gets,
|
||||
ULONG *suspensions, ULONG *timeouts, ULONG *inversions, ULONG *inheritances);
|
||||
@@ -1509,14 +1512,14 @@ UINT _tx_mutex_prioritize(TX_MUTEX *mutex_ptr);
|
||||
UINT _tx_mutex_put(TX_MUTEX *mutex_ptr);
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_mutex_create(TX_MUTEX *mutex_ptr, CHAR *name_ptr, UINT inherit, UINT mutex_control_block_size);
|
||||
UINT _txe_mutex_delete(TX_MUTEX *mutex_ptr);
|
||||
UINT _txe_mutex_get(TX_MUTEX *mutex_ptr, ULONG wait_option);
|
||||
UINT _txe_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _txe_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_MUTEX **next_mutex);
|
||||
UINT _txe_mutex_prioritize(TX_MUTEX *mutex_ptr);
|
||||
UINT _txe_mutex_put(TX_MUTEX *mutex_ptr);
|
||||
@@ -1524,8 +1527,8 @@ UINT _txe_mutex_put(TX_MUTEX *mutex_ptr);
|
||||
UINT _txr_mutex_create(TX_MUTEX *mutex_ptr, CHAR *name_ptr, UINT inherit, UINT mutex_control_block_size);
|
||||
UINT _txr_mutex_delete(TX_MUTEX *mutex_ptr);
|
||||
UINT _txr_mutex_get(TX_MUTEX *mutex_ptr, ULONG wait_option);
|
||||
UINT _txr_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _txr_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_MUTEX **next_mutex);
|
||||
UINT _txr_mutex_prioritize(TX_MUTEX *mutex_ptr);
|
||||
UINT _txr_mutex_put(TX_MUTEX *mutex_ptr);
|
||||
@@ -1534,7 +1537,7 @@ UINT _txr_mutex_put(TX_MUTEX *mutex_ptr);
|
||||
|
||||
/* Define queue management function prototypes. */
|
||||
|
||||
UINT _tx_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
UINT _tx_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
VOID *queue_start, ULONG queue_size);
|
||||
UINT _tx_queue_delete(TX_QUEUE *queue_ptr);
|
||||
UINT _tx_queue_flush(TX_QUEUE *queue_ptr);
|
||||
@@ -1551,10 +1554,10 @@ UINT _tx_queue_send_notify(TX_QUEUE *queue_ptr, VOID (*queue_send_notify)
|
||||
UINT _tx_queue_front_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_option);
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
UINT _txe_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
VOID *queue_start, ULONG queue_size, UINT queue_control_block_size);
|
||||
UINT _txe_queue_delete(TX_QUEUE *queue_ptr);
|
||||
UINT _txe_queue_flush(TX_QUEUE *queue_ptr);
|
||||
@@ -1566,7 +1569,7 @@ UINT _txe_queue_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_op
|
||||
UINT _txe_queue_send_notify(TX_QUEUE *queue_ptr, VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr));
|
||||
UINT _txe_queue_front_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_option);
|
||||
#ifdef TX_ENABLE_MULTI_ERROR_CHECKING
|
||||
UINT _txr_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
UINT _txr_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
VOID *queue_start, ULONG queue_size, UINT queue_control_block_size);
|
||||
UINT _txr_queue_delete(TX_QUEUE *queue_ptr);
|
||||
UINT _txr_queue_flush(TX_QUEUE *queue_ptr);
|
||||
@@ -1586,8 +1589,8 @@ UINT _tx_semaphore_ceiling_put(TX_SEMAPHORE *semaphore_ptr, ULONG ceiling
|
||||
UINT _tx_semaphore_create(TX_SEMAPHORE *semaphore_ptr, CHAR *name_ptr, ULONG initial_count);
|
||||
UINT _tx_semaphore_delete(TX_SEMAPHORE *semaphore_ptr);
|
||||
UINT _tx_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option);
|
||||
UINT _tx_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _tx_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_SEMAPHORE **next_semaphore);
|
||||
UINT _tx_semaphore_performance_info_get(TX_SEMAPHORE *semaphore_ptr, ULONG *puts, ULONG *gets,
|
||||
ULONG *suspensions, ULONG *timeouts);
|
||||
@@ -1597,15 +1600,15 @@ UINT _tx_semaphore_put(TX_SEMAPHORE *semaphore_ptr);
|
||||
UINT _tx_semaphore_put_notify(TX_SEMAPHORE *semaphore_ptr, VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr));
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_semaphore_ceiling_put(TX_SEMAPHORE *semaphore_ptr, ULONG ceiling);
|
||||
UINT _txe_semaphore_create(TX_SEMAPHORE *semaphore_ptr, CHAR *name_ptr, ULONG initial_count, UINT semaphore_control_block_size);
|
||||
UINT _txe_semaphore_delete(TX_SEMAPHORE *semaphore_ptr);
|
||||
UINT _txe_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option);
|
||||
UINT _txe_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _txe_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_SEMAPHORE **next_semaphore);
|
||||
UINT _txe_semaphore_prioritize(TX_SEMAPHORE *semaphore_ptr);
|
||||
UINT _txe_semaphore_put(TX_SEMAPHORE *semaphore_ptr);
|
||||
@@ -1615,8 +1618,8 @@ UINT _txr_semaphore_ceiling_put(TX_SEMAPHORE *semaphore_ptr, ULONG ceilin
|
||||
UINT _txr_semaphore_create(TX_SEMAPHORE *semaphore_ptr, CHAR *name_ptr, ULONG initial_count, UINT semaphore_control_block_size);
|
||||
UINT _txr_semaphore_delete(TX_SEMAPHORE *semaphore_ptr);
|
||||
UINT _txr_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option);
|
||||
UINT _txr_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _txr_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_SEMAPHORE **next_semaphore);
|
||||
UINT _txr_semaphore_prioritize(TX_SEMAPHORE *semaphore_ptr);
|
||||
UINT _txr_semaphore_put(TX_SEMAPHORE *semaphore_ptr);
|
||||
@@ -1628,19 +1631,19 @@ UINT _txr_semaphore_put_notify(TX_SEMAPHORE *semaphore_ptr, VOID (*semaph
|
||||
|
||||
VOID _tx_thread_context_save(VOID);
|
||||
VOID _tx_thread_context_restore(VOID);
|
||||
UINT _tx_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr,
|
||||
UINT _tx_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr,
|
||||
VOID (*entry_function)(ULONG entry_input), ULONG entry_input,
|
||||
VOID *stack_start, ULONG stack_size,
|
||||
UINT priority, UINT preempt_threshold,
|
||||
VOID *stack_start, ULONG stack_size,
|
||||
UINT priority, UINT preempt_threshold,
|
||||
ULONG time_slice, UINT auto_start);
|
||||
UINT _tx_thread_delete(TX_THREAD *thread_ptr);
|
||||
UINT _tx_thread_entry_exit_notify(TX_THREAD *thread_ptr, VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type));
|
||||
TX_THREAD *_tx_thread_identify(VOID);
|
||||
UINT _tx_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
UINT _tx_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
TX_THREAD **next_thread, TX_THREAD **next_suspended_thread);
|
||||
UINT _tx_thread_interrupt_control(UINT new_posture);
|
||||
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions,
|
||||
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions,
|
||||
ULONG *solicited_preemptions, ULONG *interrupt_preemptions, ULONG *priority_inversions,
|
||||
ULONG *time_slices, ULONG *relinquishes, ULONG *timeouts, ULONG *wait_aborts, TX_THREAD **last_preempted_by);
|
||||
UINT _tx_thread_performance_system_info_get(ULONG *resumptions, ULONG *suspensions,
|
||||
@@ -1662,18 +1665,18 @@ UINT _tx_thread_time_slice_change(TX_THREAD *thread_ptr, ULONG new_time_s
|
||||
UINT _tx_thread_wait_abort(TX_THREAD *thread_ptr);
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr,
|
||||
UINT _txe_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr,
|
||||
VOID (*entry_function)(ULONG entry_input), ULONG entry_input,
|
||||
VOID *stack_start, ULONG stack_size,
|
||||
UINT priority, UINT preempt_threshold,
|
||||
VOID *stack_start, ULONG stack_size,
|
||||
UINT priority, UINT preempt_threshold,
|
||||
ULONG time_slice, UINT auto_start, UINT thread_control_block_size);
|
||||
UINT _txe_thread_delete(TX_THREAD *thread_ptr);
|
||||
UINT _txe_thread_entry_exit_notify(TX_THREAD *thread_ptr, VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type));
|
||||
UINT _txe_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
UINT _txe_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
TX_THREAD **next_thread, TX_THREAD **next_suspended_thread);
|
||||
UINT _txe_thread_preemption_change(TX_THREAD *thread_ptr, UINT new_threshold,
|
||||
UINT *old_threshold);
|
||||
@@ -1687,15 +1690,15 @@ UINT _txe_thread_terminate(TX_THREAD *thread_ptr);
|
||||
UINT _txe_thread_time_slice_change(TX_THREAD *thread_ptr, ULONG new_time_slice, ULONG *old_time_slice);
|
||||
UINT _txe_thread_wait_abort(TX_THREAD *thread_ptr);
|
||||
#ifdef TX_ENABLE_MULTI_ERROR_CHECKING
|
||||
UINT _txr_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr,
|
||||
UINT _txr_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr,
|
||||
VOID (*entry_function)(ULONG entry_input), ULONG entry_input,
|
||||
VOID *stack_start, ULONG stack_size,
|
||||
UINT priority, UINT preempt_threshold,
|
||||
VOID *stack_start, ULONG stack_size,
|
||||
UINT priority, UINT preempt_threshold,
|
||||
ULONG time_slice, UINT auto_start, UINT thread_control_block_size);
|
||||
UINT _txr_thread_delete(TX_THREAD *thread_ptr);
|
||||
UINT _txr_thread_entry_exit_notify(TX_THREAD *thread_ptr, VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type));
|
||||
UINT _txr_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
UINT _txr_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
TX_THREAD **next_thread, TX_THREAD **next_suspended_thread);
|
||||
UINT _txr_thread_preemption_change(TX_THREAD *thread_ptr, UINT new_threshold,
|
||||
UINT *old_threshold);
|
||||
@@ -1714,12 +1717,12 @@ UINT _txr_thread_wait_abort(TX_THREAD *thread_ptr);
|
||||
|
||||
UINT _tx_timer_activate(TX_TIMER *timer_ptr);
|
||||
UINT _tx_timer_change(TX_TIMER *timer_ptr, ULONG initial_ticks, ULONG reschedule_ticks);
|
||||
UINT _tx_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
|
||||
UINT _tx_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
|
||||
VOID (*expiration_function)(ULONG input), ULONG expiration_input,
|
||||
ULONG initial_ticks, ULONG reschedule_ticks, UINT auto_activate);
|
||||
UINT _tx_timer_deactivate(TX_TIMER *timer_ptr);
|
||||
UINT _tx_timer_delete(TX_TIMER *timer_ptr);
|
||||
UINT _tx_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
|
||||
UINT _tx_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
|
||||
ULONG *reschedule_ticks, TX_TIMER **next_timer);
|
||||
UINT _tx_timer_performance_info_get(TX_TIMER *timer_ptr, ULONG *activates, ULONG *reactivates,
|
||||
ULONG *deactivates, ULONG *expirations, ULONG *expiration_adjusts);
|
||||
@@ -1730,27 +1733,27 @@ ULONG _tx_time_get(VOID);
|
||||
VOID _tx_time_set(ULONG new_time);
|
||||
|
||||
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
/* Define error checking shells for API services. These are only referenced by the
|
||||
application. */
|
||||
|
||||
UINT _txe_timer_activate(TX_TIMER *timer_ptr);
|
||||
UINT _txe_timer_change(TX_TIMER *timer_ptr, ULONG initial_ticks, ULONG reschedule_ticks);
|
||||
UINT _txe_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
|
||||
UINT _txe_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
|
||||
VOID (*expiration_function)(ULONG input), ULONG expiration_input,
|
||||
ULONG initial_ticks, ULONG reschedule_ticks, UINT auto_activate, UINT timer_control_block_size);
|
||||
UINT _txe_timer_deactivate(TX_TIMER *timer_ptr);
|
||||
UINT _txe_timer_delete(TX_TIMER *timer_ptr);
|
||||
UINT _txe_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
|
||||
UINT _txe_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
|
||||
ULONG *reschedule_ticks, TX_TIMER **next_timer);
|
||||
#ifdef TX_ENABLE_MULTI_ERROR_CHECKING
|
||||
UINT _txr_timer_activate(TX_TIMER *timer_ptr);
|
||||
UINT _txr_timer_change(TX_TIMER *timer_ptr, ULONG initial_ticks, ULONG reschedule_ticks);
|
||||
UINT _txr_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
|
||||
UINT _txr_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
|
||||
VOID (*expiration_function)(ULONG input), ULONG expiration_input,
|
||||
ULONG initial_ticks, ULONG reschedule_ticks, UINT auto_activate, UINT timer_control_block_size);
|
||||
UINT _txr_timer_deactivate(TX_TIMER *timer_ptr);
|
||||
UINT _txr_timer_delete(TX_TIMER *timer_ptr);
|
||||
UINT _txr_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
|
||||
UINT _txr_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
|
||||
ULONG *reschedule_ticks, TX_TIMER **next_timer);
|
||||
#endif
|
||||
|
||||
@@ -1771,13 +1774,13 @@ UINT _tx_trace_interrupt_control(UINT new_posture);
|
||||
/* Add a default macro that can be re-defined in tx_port.h to add default processing when a thread starts. Common usage
|
||||
would be for enabling floating point for a thread by default, however, the additional processing could be anything
|
||||
defined in tx_port.h. */
|
||||
|
||||
|
||||
#ifndef TX_THREAD_STARTED_EXTENSION
|
||||
#define TX_THREAD_STARTED_EXTENSION(thread_ptr)
|
||||
#endif
|
||||
|
||||
|
||||
/* Add a default macro that can be re-defined in tx_port.h to add processing to the thread stack analyze function.
|
||||
/* Add a default macro that can be re-defined in tx_port.h to add processing to the thread stack analyze function.
|
||||
By default, this is simply defined as whitespace. */
|
||||
|
||||
#ifndef TX_THREAD_STACK_ANALYZE_EXTENSION
|
||||
@@ -1785,7 +1788,7 @@ UINT _tx_trace_interrupt_control(UINT new_posture);
|
||||
#endif
|
||||
|
||||
|
||||
/* Add a default macro that can be re-defined in tx_port.h to add processing to the initialize kernel enter function.
|
||||
/* Add a default macro that can be re-defined in tx_port.h to add processing to the initialize kernel enter function.
|
||||
By default, this is simply defined as whitespace. */
|
||||
|
||||
#ifndef TX_INITIALIZE_KERNEL_ENTER_EXTENSION
|
||||
@@ -2240,7 +2243,7 @@ void __ghs_rnerr(char *errMsg, int stackLevels, int stackTraceDisplay, void *hex
|
||||
#endif
|
||||
|
||||
/* Define the get system state macro. By default, it simply maps to the variable _tx_thread_system_state. */
|
||||
/* Note that prior to Azure RTOS 6.1, this symbol was defined in tx_thread.h. */
|
||||
/* Note that prior to Azure RTOS 6.1, this symbol was defined in tx_thread.h. */
|
||||
#ifndef TX_THREAD_GET_SYSTEM_STATE
|
||||
#define TX_THREAD_GET_SYSTEM_STATE() _tx_thread_system_state
|
||||
#endif
|
||||
|
||||
@@ -56,12 +56,12 @@
|
||||
#define TX_BLOCK_POOL_ID ((ULONG) 0x424C4F43)
|
||||
|
||||
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
caller. */
|
||||
|
||||
#ifdef TX_INVOKE_INLINE_INITIALIZATION
|
||||
|
||||
/* Yes, in-line initialization is supported, remap the block memory pool
|
||||
/* Yes, in-line initialization is supported, remap the block memory pool
|
||||
initialization function. */
|
||||
|
||||
#ifndef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
@@ -68,12 +68,12 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
caller. */
|
||||
|
||||
#ifdef TX_INVOKE_INLINE_INITIALIZATION
|
||||
|
||||
/* Yes, in-line initialization is supported, remap the byte memory pool
|
||||
/* Yes, in-line initialization is supported, remap the byte memory pool
|
||||
initialization function. */
|
||||
|
||||
#ifndef TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
@@ -58,11 +58,11 @@
|
||||
#define TX_EVENT_FLAGS_CLEAR_MASK ((UINT) 0x1)
|
||||
|
||||
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
caller. */
|
||||
#ifdef TX_INVOKE_INLINE_INITIALIZATION
|
||||
|
||||
/* Yes, in-line initialization is supported, remap the event flag initialization
|
||||
/* Yes, in-line initialization is supported, remap the event flag initialization
|
||||
function. */
|
||||
|
||||
#ifndef TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
@@ -67,15 +67,15 @@ VOID _tx_initialize_low_level(VOID);
|
||||
|
||||
/* Define the macro for adding additional port-specific global data. This macro is defined
|
||||
as white space, unless defined by tx_port.h. */
|
||||
|
||||
|
||||
#ifndef TX_PORT_SPECIFIC_DATA
|
||||
#define TX_PORT_SPECIFIC_DATA
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the macro for adding additional port-specific pre and post initialization processing.
|
||||
/* Define the macro for adding additional port-specific pre and post initialization processing.
|
||||
These macros is defined as white space, unless defined by tx_port.h. */
|
||||
|
||||
|
||||
#ifndef TX_PORT_SPECIFIC_PRE_INITIALIZATION
|
||||
#define TX_PORT_SPECIFIC_PRE_INITIALIZATION
|
||||
#endif
|
||||
@@ -102,9 +102,9 @@ VOID _tx_initialize_low_level(VOID);
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the unused memory pointer. The value of the first available
|
||||
/* Define the unused memory pointer. The value of the first available
|
||||
memory address is placed in this variable in the low-level
|
||||
initialization function. The content of this variable is passed
|
||||
initialization function. The content of this variable is passed
|
||||
to the application's system definition function. */
|
||||
|
||||
INITIALIZE_DECLARE VOID *_tx_initialize_unused_memory;
|
||||
|
||||
@@ -56,12 +56,12 @@
|
||||
#define TX_MUTEX_ID ((ULONG) 0x4D555445)
|
||||
|
||||
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
caller. */
|
||||
|
||||
#ifdef TX_INVOKE_INLINE_INITIALIZATION
|
||||
|
||||
/* Yes, in-line initialization is supported, remap the mutex initialization
|
||||
/* Yes, in-line initialization is supported, remap the mutex initialization
|
||||
function. */
|
||||
|
||||
#ifndef TX_MUTEX_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
@@ -56,11 +56,11 @@
|
||||
#define TX_QUEUE_ID ((ULONG) 0x51554555)
|
||||
|
||||
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
caller. */
|
||||
#ifdef TX_INVOKE_INLINE_INITIALIZATION
|
||||
|
||||
/* Yes, in-line initialization is supported, remap the queue initialization
|
||||
/* Yes, in-line initialization is supported, remap the queue initialization
|
||||
function. */
|
||||
|
||||
#ifndef TX_QUEUE_ENABLE_PERFORMANCE_INFO
|
||||
@@ -85,7 +85,7 @@ VOID _tx_queue_initialize(VOID);
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the message copy macro. Note that the source and destination
|
||||
/* Define the message copy macro. Note that the source and destination
|
||||
pointers must be modified since they are used subsequently. */
|
||||
|
||||
#ifndef TX_QUEUE_MESSAGE_COPY
|
||||
|
||||
@@ -56,10 +56,10 @@
|
||||
#define TX_SEMAPHORE_ID ((ULONG) 0x53454D41)
|
||||
|
||||
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
/* Determine if in-line component initialization is supported by the
|
||||
caller. */
|
||||
#ifdef TX_INVOKE_INLINE_INITIALIZATION
|
||||
/* Yes, in-line initialization is supported, remap the
|
||||
/* Yes, in-line initialization is supported, remap the
|
||||
semaphore initialization function. */
|
||||
#ifndef TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO
|
||||
#define _tx_semaphore_initialize() \
|
||||
@@ -76,7 +76,7 @@
|
||||
#endif
|
||||
#define TX_SEMAPHORE_INIT
|
||||
#else
|
||||
/* No in-line initialization is supported, use standard
|
||||
/* No in-line initialization is supported, use standard
|
||||
function call. */
|
||||
VOID _tx_semaphore_initialize(VOID);
|
||||
#endif
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
/* COMPONENT DEFINITION RELEASE */
|
||||
/* */
|
||||
/* tx_thread.h PORTABLE C */
|
||||
/* 6.1.2 */
|
||||
/* 6.1.9 */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
/* William E. Lamie, Microsoft Corporation */
|
||||
@@ -48,6 +48,9 @@
|
||||
/* moved TX_THREAD_GET_SYSTEM_ */
|
||||
/* STATE to tx_api.h, */
|
||||
/* resulting in version 6.1.2 */
|
||||
/* 10-15-2021 Scott Larson Modified comment(s), improved */
|
||||
/* stack check error handling, */
|
||||
/* resulting in version 6.1.9 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
|
||||
@@ -90,32 +93,33 @@
|
||||
|
||||
/* Define state change macro that can be used by run-mode debug agents to keep track of thread
|
||||
state changes. By default, it is mapped to white space. */
|
||||
|
||||
|
||||
#ifndef TX_THREAD_STATE_CHANGE
|
||||
#define TX_THREAD_STATE_CHANGE(a, b)
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the macro to get the current thread pointer. This is particularly useful in SMP
|
||||
/* Define the macro to get the current thread pointer. This is particularly useful in SMP
|
||||
versions of ThreadX to add additional processing. The default implementation is to simply
|
||||
access the global current thread pointer directly. */
|
||||
|
||||
|
||||
#ifndef TX_THREAD_GET_CURRENT
|
||||
#define TX_THREAD_GET_CURRENT(a) (a) = _tx_thread_current_ptr;
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the macro to set the current thread pointer. This is particularly useful in SMP
|
||||
/* Define the macro to set the current thread pointer. This is particularly useful in SMP
|
||||
versions of ThreadX to add additional processing. The default implementation is to simply
|
||||
access the global current thread pointer directly. */
|
||||
|
||||
|
||||
#ifndef TX_THREAD_SET_CURRENT
|
||||
#define TX_THREAD_SET_CURRENT(a) _tx_thread_current_ptr = (a);
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/* Define the get system state macro. By default, it simply maps to the variable _tx_thread_system_state. */
|
||||
/* This symbol is moved to tx_api.h. Therefore removed from this file.
|
||||
/* This symbol is moved to tx_api.h. Therefore removed from this file.
|
||||
#ifndef TX_THREAD_GET_SYSTEM_STATE
|
||||
#define TX_THREAD_GET_SYSTEM_STATE() _tx_thread_system_state
|
||||
#endif
|
||||
@@ -144,10 +148,10 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the lowest bit set macro. Note, that this may be overridden
|
||||
/* Define the lowest bit set macro. Note, that this may be overridden
|
||||
by a port specific definition if there is supporting assembly language
|
||||
instructions in the architecture. */
|
||||
|
||||
|
||||
#ifndef TX_LOWEST_SET_BIT_CALCULATE
|
||||
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) \
|
||||
(b) = ((ULONG) 0); \
|
||||
@@ -212,9 +216,9 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the default thread stack checking. This can be overridden by
|
||||
a particular port, which is necessary if the stack growth is from
|
||||
low address to high address (the default logic is for stacks that
|
||||
/* Define the default thread stack checking. This can be overridden by
|
||||
a particular port, which is necessary if the stack growth is from
|
||||
low address to high address (the default logic is for stacks that
|
||||
grow from high address to low address. */
|
||||
|
||||
#ifndef TX_THREAD_STACK_CHECK
|
||||
@@ -311,7 +315,7 @@ THREAD_DECLARE TX_THREAD * _tx_thread_current_ptr;
|
||||
|
||||
|
||||
/* Define the variable that holds the next thread to execute. It is important
|
||||
to remember that this is not necessarily equal to the current thread
|
||||
to remember that this is not necessarily equal to the current thread
|
||||
pointer. */
|
||||
|
||||
THREAD_DECLARE TX_THREAD * _tx_thread_execute_ptr;
|
||||
@@ -328,7 +332,7 @@ THREAD_DECLARE ULONG _tx_thread_created_count;
|
||||
|
||||
|
||||
/* Define the current state variable. When this value is 0, a thread
|
||||
is executing or the system is idle. Other values indicate that
|
||||
is executing or the system is idle. Other values indicate that
|
||||
interrupt or initialization processing is active. This variable is
|
||||
initialized to TX_INITIALIZE_IN_PROGRESS to indicate initialization is
|
||||
active. */
|
||||
@@ -337,15 +341,15 @@ THREAD_DECLARE volatile ULONG _tx_thread_system_state;
|
||||
|
||||
|
||||
/* Define the 32-bit priority bit-maps. There is one priority bit map for each
|
||||
32 priority levels supported. If only 32 priorities are supported there is
|
||||
only one bit map. Each bit within a priority bit map represents that one
|
||||
32 priority levels supported. If only 32 priorities are supported there is
|
||||
only one bit map. Each bit within a priority bit map represents that one
|
||||
or more threads at the associated thread priority are ready. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_priority_maps[TX_MAX_PRIORITIES/32];
|
||||
|
||||
|
||||
/* Define the priority map active bit map that specifies which of the previously
|
||||
defined priority maps have something set. This is only necessary if more than
|
||||
/* Define the priority map active bit map that specifies which of the previously
|
||||
defined priority maps have something set. This is only necessary if more than
|
||||
32 priorities are supported. */
|
||||
|
||||
#if TX_MAX_PRIORITIES > 32
|
||||
@@ -355,17 +359,17 @@ THREAD_DECLARE ULONG _tx_thread_priority_map_active;
|
||||
|
||||
#ifndef TX_DISABLE_PREEMPTION_THRESHOLD
|
||||
|
||||
/* Define the 32-bit preempt priority bit maps. There is one preempt bit map
|
||||
for each 32 priority levels supported. If only 32 priorities are supported
|
||||
there is only one bit map. Each set set bit corresponds to a preempted priority
|
||||
level that had preemption-threshold active to protect against preemption of a
|
||||
/* Define the 32-bit preempt priority bit maps. There is one preempt bit map
|
||||
for each 32 priority levels supported. If only 32 priorities are supported
|
||||
there is only one bit map. Each set set bit corresponds to a preempted priority
|
||||
level that had preemption-threshold active to protect against preemption of a
|
||||
range of relatively higher priority threads. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_preempted_maps[TX_MAX_PRIORITIES/32];
|
||||
|
||||
|
||||
/* Define the preempt map active bit map that specifies which of the previously
|
||||
defined preempt maps have something set. This is only necessary if more than
|
||||
/* Define the preempt map active bit map that specifies which of the previously
|
||||
defined preempt maps have something set. This is only necessary if more than
|
||||
32 priorities are supported. */
|
||||
|
||||
#if TX_MAX_PRIORITIES > 32
|
||||
@@ -373,7 +377,7 @@ THREAD_DECLARE ULONG _tx_thread_preempted_map_active;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Define the variable that holds the highest priority group ready for
|
||||
/* Define the variable that holds the highest priority group ready for
|
||||
execution. It is important to note that this is not necessarily the same
|
||||
as the priority of the thread pointed to by _tx_execute_thread. */
|
||||
|
||||
@@ -389,13 +393,13 @@ THREAD_DECLARE TX_THREAD * _tx_thread_priority_list[TX_MAX_PRIORITIES];
|
||||
|
||||
|
||||
/* Define the global preempt disable variable. If this is non-zero, preemption is
|
||||
disabled. It is used internally by ThreadX to prevent preemption of a thread in
|
||||
disabled. It is used internally by ThreadX to prevent preemption of a thread in
|
||||
the middle of a service that is resuming or suspending another thread. */
|
||||
|
||||
THREAD_DECLARE volatile UINT _tx_thread_preempt_disable;
|
||||
|
||||
|
||||
/* Define the global function pointer for mutex cleanup on thread completion or
|
||||
/* Define the global function pointer for mutex cleanup on thread completion or
|
||||
termination. This pointer is setup during mutex initialization. */
|
||||
|
||||
THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
|
||||
@@ -407,7 +411,7 @@ THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_pt
|
||||
Bit(s) Meaning
|
||||
|
||||
31 TX_NOT_INTERRUPTABLE defined
|
||||
30 TX_INLINE_THREAD_RESUME_SUSPEND define
|
||||
30 TX_INLINE_THREAD_RESUME_SUSPEND define
|
||||
29-24 Priority groups 1 -> 32 priorities
|
||||
2 -> 64 priorities
|
||||
3 -> 96 priorities
|
||||
@@ -437,10 +441,10 @@ THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_pt
|
||||
THREAD_DECLARE ULONG _tx_build_options;
|
||||
|
||||
|
||||
#ifdef TX_ENABLE_STACK_CHECKING
|
||||
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
|
||||
|
||||
/* Define the global function pointer for stack error handling. If a stack error is
|
||||
detected and the application has registered a stack error handler, it will be
|
||||
/* Define the global function pointer for stack error handling. If a stack error is
|
||||
detected and the application has registered a stack error handler, it will be
|
||||
called via this function pointer. */
|
||||
|
||||
THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX_THREAD *thread_ptr);
|
||||
@@ -455,20 +459,20 @@ THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_resume_count;
|
||||
|
||||
|
||||
/* Define the total number of thread suspensions. Each time a thread enters a
|
||||
/* Define the total number of thread suspensions. Each time a thread enters a
|
||||
suspended state this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_suspend_count;
|
||||
|
||||
|
||||
/* Define the total number of solicited thread preemptions. Each time a thread is
|
||||
/* Define the total number of solicited thread preemptions. Each time a thread is
|
||||
preempted by directly calling a ThreadX service, this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_solicited_preemption_count;
|
||||
|
||||
|
||||
/* Define the total number of interrupt thread preemptions. Each time a thread is
|
||||
preempted as a result of an ISR calling a ThreadX service, this variable is
|
||||
/* Define the total number of interrupt thread preemptions. Each time a thread is
|
||||
preempted as a result of an ISR calling a ThreadX service, this variable is
|
||||
incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_count;
|
||||
@@ -480,45 +484,45 @@ THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_coun
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_priority_inversion_count;
|
||||
|
||||
|
||||
/* Define the total number of time-slices. Each time a time-slice operation is
|
||||
actually performed (another thread is setup for running) this variable is
|
||||
/* Define the total number of time-slices. Each time a time-slice operation is
|
||||
actually performed (another thread is setup for running) this variable is
|
||||
incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_time_slice_count;
|
||||
|
||||
|
||||
/* Define the total number of thread relinquish operations. Each time a thread
|
||||
/* Define the total number of thread relinquish operations. Each time a thread
|
||||
relinquish operation is actually performed (another thread is setup for running)
|
||||
this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_relinquish_count;
|
||||
|
||||
|
||||
/* Define the total number of thread timeouts. Each time a thread has a
|
||||
/* Define the total number of thread timeouts. Each time a thread has a
|
||||
timeout this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_timeout_count;
|
||||
|
||||
|
||||
/* Define the total number of thread wait aborts. Each time a thread's suspension
|
||||
/* Define the total number of thread wait aborts. Each time a thread's suspension
|
||||
is lifted by the tx_thread_wait_abort call this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_wait_abort_count;
|
||||
|
||||
|
||||
/* Define the total number of idle system thread returns. Each time a thread returns to
|
||||
/* Define the total number of idle system thread returns. Each time a thread returns to
|
||||
an idle system (no other thread is ready to run) this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_idle_return_count;
|
||||
|
||||
|
||||
/* Define the total number of non-idle system thread returns. Each time a thread returns to
|
||||
/* Define the total number of non-idle system thread returns. Each time a thread returns to
|
||||
a non-idle system (another thread is ready to run) this variable is incremented. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance_non_idle_return_count;
|
||||
|
||||
|
||||
/* Define the last TX_THREAD_EXECUTE_LOG_SIZE threads scheduled in ThreadX. This
|
||||
/* Define the last TX_THREAD_EXECUTE_LOG_SIZE threads scheduled in ThreadX. This
|
||||
is a circular list, where the index points to the oldest entry. */
|
||||
|
||||
THREAD_DECLARE ULONG _tx_thread_performance__execute_log_index;
|
||||
|
||||
@@ -79,7 +79,7 @@ VOID _tx_timer_thread_entry(ULONG timer_thread_input);
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the system clock value that is continually incremented by the
|
||||
/* Define the system clock value that is continually incremented by the
|
||||
periodic timer interrupt processing. */
|
||||
|
||||
TIMER_DECLARE volatile ULONG _tx_timer_system_clock;
|
||||
@@ -116,7 +116,7 @@ TIMER_DECLARE TX_TIMER_INTERNAL **_tx_timer_list_end;
|
||||
TIMER_DECLARE TX_TIMER_INTERNAL **_tx_timer_current_ptr;
|
||||
|
||||
|
||||
/* Define the timer expiration flag. This is used to indicate that a timer
|
||||
/* Define the timer expiration flag. This is used to indicate that a timer
|
||||
has expired. */
|
||||
|
||||
TIMER_DECLARE UINT _tx_timer_expired;
|
||||
@@ -195,8 +195,8 @@ TIMER_DECLARE ULONG _tx_timer_performance_expiration_count;
|
||||
|
||||
|
||||
/* Define the total number of timer expiration adjustments. These are required
|
||||
if the expiration time is greater than the size of the timer list. In such
|
||||
cases, the timer is placed at the end of the list and then reactivated
|
||||
if the expiration time is greater than the size of the timer list. In such
|
||||
cases, the timer is placed at the end of the list and then reactivated
|
||||
as many times as necessary to finally achieve the resulting timeout. */
|
||||
|
||||
TIMER_DECLARE ULONG _tx_timer_performance__expiration_adjust_count;
|
||||
|
||||
@@ -78,8 +78,8 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the default clock source for trace event entry time stamp. The following two item are port specific.
|
||||
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
|
||||
/* Define the default clock source for trace event entry time stamp. The following two item are port specific.
|
||||
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
|
||||
source constants would be:
|
||||
|
||||
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
|
||||
@@ -101,8 +101,8 @@
|
||||
|
||||
|
||||
/* ThreadX Trace Description. The ThreadX Trace feature is designed to capture
|
||||
events in real-time in a circular event buffer. This buffer may be analyzed by other
|
||||
tools. The high-level format of the Trace structure is:
|
||||
events in real-time in a circular event buffer. This buffer may be analyzed by other
|
||||
tools. The high-level format of the Trace structure is:
|
||||
|
||||
[Trace Control Header ]
|
||||
[Trace Object Registry - Entry 0 ]
|
||||
@@ -115,23 +115,23 @@
|
||||
*/
|
||||
|
||||
|
||||
/* Trace Control Header. The Trace Control Header contains information that
|
||||
defines the format of the Trace Object Registry as well as the location and
|
||||
current entry of the Trace Buffer itself. The high-level format of the
|
||||
/* Trace Control Header. The Trace Control Header contains information that
|
||||
defines the format of the Trace Object Registry as well as the location and
|
||||
current entry of the Trace Buffer itself. The high-level format of the
|
||||
Trace Control Header is:
|
||||
|
||||
Entry Size Description
|
||||
|
||||
[Trace ID] 4 This 4-byte field contains the ThreadX Trace
|
||||
Identification. If the trace buffer is valid, the
|
||||
contents are 0x54585442 (TXTB). Since it is written as
|
||||
contents are 0x54585442 (TXTB). Since it is written as
|
||||
a 32-bit unsigned word, this value is also used to
|
||||
determine if the event trace information is in
|
||||
determine if the event trace information is in
|
||||
little or big endian format.
|
||||
[Timer Valid Mask] 4 Mask of valid bits in the 32-bit time stamp. This
|
||||
[Timer Valid Mask] 4 Mask of valid bits in the 32-bit time stamp. This
|
||||
enables use of 32, 24, 16, or event 8-bit timers.
|
||||
If the time source is 32-bits, the mask is
|
||||
0xFFFFFFFF. If the time source is 16-bits, the
|
||||
If the time source is 32-bits, the mask is
|
||||
0xFFFFFFFF. If the time source is 16-bits, the
|
||||
mask is 0x0000FFFF.
|
||||
[Trace Base Address] 4 The base address for all trace pointer. Subtracting
|
||||
the pointer and this address will yield the proper
|
||||
@@ -143,7 +143,7 @@
|
||||
[Trace Buffer Start Pointer] 4 Pointer to the start of the Trace Buffer Area
|
||||
[Trace Buffer End Pointer] 4 Pointer to the end of the Trace Buffer Area
|
||||
[Trace Buffer Current Pointer] 4 Pointer to the oldest entry in the Trace Buffer.
|
||||
This entry will be overwritten on the next event and
|
||||
This entry will be overwritten on the next event and
|
||||
incremented to the next event (wrapping to the top
|
||||
if the buffer end pointer is exceeded).
|
||||
[Reserved] 4 Reserved 4 bytes, should be 0xAAAAAAAA
|
||||
@@ -173,7 +173,7 @@ typedef struct TX_TRACE_HEADER_STRUCT
|
||||
} TX_TRACE_HEADER;
|
||||
|
||||
|
||||
/* Trace Object Registry. The Trace Object Registry is used to map the object pointer in the trace buffer to
|
||||
/* Trace Object Registry. The Trace Object Registry is used to map the object pointer in the trace buffer to
|
||||
the application's name for the object (defined during object creation in ThreadX). */
|
||||
|
||||
#ifndef TX_TRACE_OBJECT_REGISTRY_NAME
|
||||
@@ -181,7 +181,7 @@ typedef struct TX_TRACE_HEADER_STRUCT
|
||||
#endif
|
||||
|
||||
|
||||
/* Define the object name types as well as the contents of any additional parameters that might be useful in
|
||||
/* Define the object name types as well as the contents of any additional parameters that might be useful in
|
||||
trace analysis. */
|
||||
|
||||
#define TX_TRACE_OBJECT_TYPE_NOT_VALID ((UCHAR) 0) /* Object is not valid */
|
||||
@@ -214,27 +214,27 @@ typedef struct TX_TRACE_OBJECT_ENTRY_STRUCT
|
||||
|
||||
Entry Size Description
|
||||
|
||||
[Thread Pointer] 4 This 4-byte field contains the pointer to the
|
||||
ThreadX thread running that caused the event.
|
||||
[Thread Pointer] 4 This 4-byte field contains the pointer to the
|
||||
ThreadX thread running that caused the event.
|
||||
If this field is NULL, the entry hasn't been used
|
||||
yet. If this field is 0xFFFFFFFF, the event occurred
|
||||
from within an ISR. If this entry is 0xF0F0F0F0, the
|
||||
from within an ISR. If this entry is 0xF0F0F0F0, the
|
||||
event occurred during initialization.
|
||||
[Thread Priority or 4 This 4-byte field contains the current thread pointer for interrupt
|
||||
Current Thread events or the thread preemption-threshold/priority for thread events.
|
||||
Preemption-Threshold/
|
||||
Priority]
|
||||
[Event ID] 4 This 4-byte field contains the Event ID of the event. A value of
|
||||
[Event ID] 4 This 4-byte field contains the Event ID of the event. A value of
|
||||
0xFFFFFFFF indicates the event is invalid. All events are marked
|
||||
as invalid during initialization.
|
||||
[Time Stamp] 4 This 4-byte field contains the time stamp of the event.
|
||||
[Information Field 1] 4 This 4-byte field contains the first 4-bytes of information
|
||||
[Information Field 1] 4 This 4-byte field contains the first 4-bytes of information
|
||||
specific to the event.
|
||||
[Information Field 2] 4 This 4-byte field contains the second 4-bytes of information
|
||||
[Information Field 2] 4 This 4-byte field contains the second 4-bytes of information
|
||||
specific to the event.
|
||||
[Information Field 3] 4 This 4-byte field contains the third 4-bytes of information
|
||||
[Information Field 3] 4 This 4-byte field contains the third 4-bytes of information
|
||||
specific to the event.
|
||||
[Information Field 4] 4 This 4-byte field contains the fourth 4-bytes of information
|
||||
[Information Field 4] 4 This 4-byte field contains the fourth 4-bytes of information
|
||||
specific to the event.
|
||||
*/
|
||||
|
||||
@@ -244,15 +244,15 @@ typedef struct TX_TRACE_OBJECT_ENTRY_STRUCT
|
||||
/* Define ThreadX Trace Events, along with a brief description of the additional information fields,
|
||||
where I1 -> Information Field 1, I2 -> Information Field 2, etc. */
|
||||
|
||||
/* Event numbers 0 through 4095 are reserved by Azure RTOS. Specific event assignments are:
|
||||
|
||||
ThreadX events: 1-199
|
||||
/* Event numbers 0 through 4095 are reserved by Azure RTOS. Specific event assignments are:
|
||||
|
||||
ThreadX events: 1-199
|
||||
FileX events: 200-299
|
||||
NetX events: 300-599
|
||||
USBX events: 600-999
|
||||
|
||||
User-defined event numbers start at 4096 and continue through 65535, as defined by the constants
|
||||
TX_TRACE_USER_EVENT_START and TX_TRACE_USER_EVENT_END, respectively. User events should be based
|
||||
|
||||
User-defined event numbers start at 4096 and continue through 65535, as defined by the constants
|
||||
TX_TRACE_USER_EVENT_START and TX_TRACE_USER_EVENT_END, respectively. User events should be based
|
||||
on these constants in case the user event number assignment is changed in future releases. */
|
||||
|
||||
/* Define the basic ThreadX thread scheduling events first. */
|
||||
@@ -417,7 +417,7 @@ TRACE_DECLARE TX_TRACE_BUFFER_ENTRY *_tx_trace_buffer_end_ptr;
|
||||
TRACE_DECLARE TX_TRACE_BUFFER_ENTRY *_tx_trace_buffer_current_ptr;
|
||||
|
||||
|
||||
/* Define the trace event enable bits, where each bit represents a type of event that can be enabled
|
||||
/* Define the trace event enable bits, where each bit represents a type of event that can be enabled
|
||||
or disabled dynamically by the application. */
|
||||
|
||||
TRACE_DECLARE ULONG _tx_trace_event_enable_bits;
|
||||
@@ -429,9 +429,9 @@ TRACE_DECLARE ULONG _tx_trace_event_enable_bits;
|
||||
TRACE_DECLARE ULONG _tx_trace_simulated_time;
|
||||
|
||||
|
||||
/* Define the function pointer used to call the application when the trace buffer wraps. If NULL,
|
||||
/* Define the function pointer used to call the application when the trace buffer wraps. If NULL,
|
||||
the application has not registered a callback function. */
|
||||
|
||||
|
||||
TRACE_DECLARE VOID (*_tx_trace_full_notify_function)(VOID *buffer);
|
||||
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
/* PORT SPECIFIC C INFORMATION RELEASE */
|
||||
/* */
|
||||
/* tx_user.h PORTABLE C */
|
||||
/* 6.1.5 */
|
||||
/* 6.1.9 */
|
||||
/* */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
@@ -51,6 +51,13 @@
|
||||
/* added option to remove */
|
||||
/* FileX pointer, */
|
||||
/* resulting in version 6.1.5 */
|
||||
/* 06-02-2021 Scott Larson Added options for multiple */
|
||||
/* block pool search & delay, */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* 10-15-2021 Yuxin Zhou Modified comment(s), added */
|
||||
/* user-configurable symbol */
|
||||
/* TX_TIMER_TICKS_PER_SECOND */
|
||||
/* resulting in version 6.1.9 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
|
||||
@@ -59,9 +66,9 @@
|
||||
|
||||
|
||||
/* Define various build options for the ThreadX port. The application should either make changes
|
||||
here by commenting or un-commenting the conditional compilation defined OR supply the defines
|
||||
though the compiler's equivalent of the -D option.
|
||||
|
||||
here by commenting or un-commenting the conditional compilation defined OR supply the defines
|
||||
though the compiler's equivalent of the -D option.
|
||||
|
||||
For maximum speed, the following should be defined:
|
||||
|
||||
TX_MAX_PRIORITIES 32
|
||||
@@ -73,9 +80,9 @@
|
||||
TX_REACTIVATE_INLINE
|
||||
TX_DISABLE_STACK_FILLING
|
||||
TX_INLINE_THREAD_RESUME_SUSPEND
|
||||
|
||||
|
||||
For minimum size, the following should be defined:
|
||||
|
||||
|
||||
TX_MAX_PRIORITIES 32
|
||||
TX_DISABLE_PREEMPTION_THRESHOLD
|
||||
TX_DISABLE_REDUNDANT_CLEARING
|
||||
@@ -83,12 +90,12 @@
|
||||
TX_NO_FILEX_POINTER
|
||||
TX_NOT_INTERRUPTABLE
|
||||
TX_TIMER_PROCESS_IN_ISR
|
||||
|
||||
|
||||
Of course, many of these defines reduce functionality and/or change the behavior of the
|
||||
system in ways that may not be worth the trade-off. For example, the TX_TIMER_PROCESS_IN_ISR
|
||||
results in faster and smaller code, however, it increases the amount of processing in the ISR.
|
||||
In addition, some services that are available in timers are not available from ISRs and will
|
||||
therefore return an error if this option is used. This may or may not be desirable for a
|
||||
therefore return an error if this option is used. This may or may not be desirable for a
|
||||
given application. */
|
||||
|
||||
|
||||
@@ -103,19 +110,28 @@
|
||||
#define TX_TIMER_THREAD_PRIORITY ????
|
||||
*/
|
||||
|
||||
/* Define the common timer tick reference for use by other middleware components. The default
|
||||
value is 10ms (i.e. 100 ticks, defined in tx_api.h), but may be replaced by a port-specific
|
||||
version in tx_port.h or here.
|
||||
Note: the actual hardware timer value may need to be changed (usually in tx_initialize_low_level). */
|
||||
|
||||
/*
|
||||
#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
|
||||
*/
|
||||
|
||||
/* Determine if there is a FileX pointer in the thread control block.
|
||||
By default, the pointer is there for legacy/backwards compatibility.
|
||||
By default, the pointer is there for legacy/backwards compatibility.
|
||||
The pointer must also be there for applications using FileX.
|
||||
Define this to save space in the thread control block.
|
||||
Define this to save space in the thread control block.
|
||||
*/
|
||||
|
||||
/*
|
||||
#define TX_NO_FILEX_POINTER
|
||||
*/
|
||||
|
||||
/* Determine if timer expirations (application timers, timeouts, and tx_thread_sleep calls
|
||||
should be processed within the a system timer thread or directly in the timer ISR.
|
||||
By default, the timer thread is used. When the following is defined, the timer expiration
|
||||
/* Determine if timer expirations (application timers, timeouts, and tx_thread_sleep calls
|
||||
should be processed within the a system timer thread or directly in the timer ISR.
|
||||
By default, the timer thread is used. When the following is defined, the timer expiration
|
||||
processing is done directly from the timer ISR, thereby eliminating the timer thread control
|
||||
block, stack, and context switching to activate it. */
|
||||
|
||||
@@ -140,7 +156,7 @@
|
||||
#define TX_DISABLE_STACK_FILLING
|
||||
*/
|
||||
|
||||
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
|
||||
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
|
||||
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
|
||||
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
|
||||
define is negated, thereby forcing the stack fill which is necessary for the stack checking
|
||||
@@ -150,7 +166,7 @@
|
||||
#define TX_ENABLE_STACK_CHECKING
|
||||
*/
|
||||
|
||||
/* Determine if preemption-threshold should be disabled. By default, preemption-threshold is
|
||||
/* Determine if preemption-threshold should be disabled. By default, preemption-threshold is
|
||||
enabled. If the application does not use preemption-threshold, it may be disabled to reduce
|
||||
code size and improve performance. */
|
||||
|
||||
@@ -158,7 +174,7 @@
|
||||
#define TX_DISABLE_PREEMPTION_THRESHOLD
|
||||
*/
|
||||
|
||||
/* Determine if global ThreadX variables should be cleared. If the compiler startup code clears
|
||||
/* Determine if global ThreadX variables should be cleared. If the compiler startup code clears
|
||||
the .bss section prior to ThreadX running, the define can be used to eliminate unnecessary
|
||||
clearing of ThreadX global variables. */
|
||||
|
||||
@@ -166,9 +182,9 @@
|
||||
#define TX_DISABLE_REDUNDANT_CLEARING
|
||||
*/
|
||||
|
||||
/* Determine if no timer processing is required. This option will help eliminate the timer
|
||||
processing when not needed. The user will also have to comment out the call to
|
||||
tx_timer_interrupt, which is typically made from assembly language in
|
||||
/* Determine if no timer processing is required. This option will help eliminate the timer
|
||||
processing when not needed. The user will also have to comment out the call to
|
||||
tx_timer_interrupt, which is typically made from assembly language in
|
||||
tx_initialize_low_level. Note: if TX_NO_TIMER is used, the define TX_TIMER_PROCESS_IN_ISR
|
||||
must also be used and tx_timer_initialize must be removed from ThreadX library. */
|
||||
|
||||
@@ -188,8 +204,8 @@
|
||||
*/
|
||||
|
||||
|
||||
/* Determine if the tx_thread_resume and tx_thread_suspend services should have their internal
|
||||
code in-line. This results in a larger image, but improves the performance of the thread
|
||||
/* Determine if the tx_thread_resume and tx_thread_suspend services should have their internal
|
||||
code in-line. This results in a larger image, but improves the performance of the thread
|
||||
resume and suspend services. */
|
||||
|
||||
/*
|
||||
@@ -197,7 +213,7 @@
|
||||
*/
|
||||
|
||||
|
||||
/* Determine if the internal ThreadX code is non-interruptable. This results in smaller code
|
||||
/* Determine if the internal ThreadX code is non-interruptable. This results in smaller code
|
||||
size and less processing overhead, but increases the interrupt lockout time. */
|
||||
|
||||
/*
|
||||
@@ -205,8 +221,8 @@
|
||||
*/
|
||||
|
||||
|
||||
/* Determine if the trace event logging code should be enabled. This causes slight increases in
|
||||
code size and overhead, but provides the ability to generate system trace information which
|
||||
/* Determine if the trace event logging code should be enabled. This causes slight increases in
|
||||
code size and overhead, but provides the ability to generate system trace information which
|
||||
is available for viewing in TraceX. */
|
||||
|
||||
/*
|
||||
@@ -270,5 +286,17 @@
|
||||
#define TX_TIMER_ENABLE_PERFORMANCE_INFO
|
||||
*/
|
||||
|
||||
/* Override options for byte pool searches of multiple blocks. */
|
||||
|
||||
/*
|
||||
#define TX_BYTE_POOL_MULTIPLE_BLOCK_SEARCH 20
|
||||
*/
|
||||
|
||||
/* Override options for byte pool search delay to avoid thrashing. */
|
||||
|
||||
/*
|
||||
#define TX_BYTE_POOL_DELAY_VALUE 3
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -170,7 +170,7 @@ ULONG lower_tbu;
|
||||
/* Save the pool's address in the block for when it is released! */
|
||||
temp_ptr = TX_BLOCK_POOL_TO_UCHAR_POINTER_CONVERT(pool_ptr);
|
||||
*next_block_ptr = temp_ptr;
|
||||
|
||||
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Check that the event time stamp is unchanged. A different
|
||||
@@ -182,7 +182,7 @@ ULONG lower_tbu;
|
||||
/* Is the time stamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, update the entry with the address. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
|
||||
@@ -200,7 +200,7 @@ ULONG lower_tbu;
|
||||
|
||||
/* Set status to success. */
|
||||
status = TX_SUCCESS;
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
}
|
||||
@@ -229,7 +229,7 @@ ULONG lower_tbu;
|
||||
{
|
||||
|
||||
/* Prepare for suspension of this thread. */
|
||||
|
||||
|
||||
#ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
/* Increment the total suspensions counter. */
|
||||
@@ -261,7 +261,7 @@ ULONG lower_tbu;
|
||||
|
||||
/* Pickup the number of suspended threads. */
|
||||
suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
|
||||
|
||||
|
||||
/* Increment the number of suspended threads. */
|
||||
(pool_ptr -> tx_block_pool_suspended_count)++;
|
||||
|
||||
@@ -322,11 +322,11 @@ ULONG lower_tbu;
|
||||
allocate event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the time-stamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, update the entry with the address. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
|
||||
|
||||
@@ -87,7 +87,7 @@ UINT suspended_count;
|
||||
TX_THREAD *next_thread;
|
||||
TX_THREAD *previous_thread;
|
||||
|
||||
|
||||
|
||||
#ifndef TX_NOT_INTERRUPTABLE
|
||||
|
||||
/* Disable interrupts to remove the suspended thread from the block pool. */
|
||||
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if the cleanup is still required. */
|
||||
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_block_pool_cleanup))
|
||||
{
|
||||
|
||||
|
||||
/* Check for valid suspension sequence. */
|
||||
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
|
||||
{
|
||||
@@ -107,7 +107,7 @@ TX_THREAD *previous_thread;
|
||||
/* Check for a NULL byte pool pointer. */
|
||||
if (pool_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Check for valid pool ID. */
|
||||
if (pool_ptr -> tx_block_pool_id == TX_BLOCK_POOL_ID)
|
||||
{
|
||||
@@ -133,13 +133,13 @@ TX_THREAD *previous_thread;
|
||||
suspended_count = pool_ptr -> tx_block_pool_suspended_count;
|
||||
|
||||
/* Remove the suspended thread from the list. */
|
||||
|
||||
|
||||
/* See if this is the only suspended thread on the list. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
/* Yes, the only suspended thread. */
|
||||
|
||||
|
||||
/* Update the head pointer. */
|
||||
pool_ptr -> tx_block_pool_suspension_list = TX_NULL;
|
||||
}
|
||||
@@ -157,7 +157,7 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if we need to update the head pointer. */
|
||||
if (pool_ptr -> tx_block_pool_suspension_list == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Update the list head pointer. */
|
||||
pool_ptr -> tx_block_pool_suspension_list = next_thread;
|
||||
}
|
||||
@@ -168,7 +168,7 @@ TX_THREAD *previous_thread;
|
||||
if (thread_ptr -> tx_thread_state == TX_BLOCK_MEMORY)
|
||||
{
|
||||
|
||||
/* Timeout condition and the thread still suspended on the block pool.
|
||||
/* Timeout condition and the thread still suspended on the block pool.
|
||||
Setup return error status and resume the thread. */
|
||||
|
||||
#ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
@@ -97,7 +97,7 @@ TX_BLOCK_POOL *previous_pool;
|
||||
an ALIGN_TYPE (typically this is a 32-bit ULONG). This helps guarantee proper alignment. */
|
||||
block_size = (((block_size + (sizeof(ALIGN_TYPE))) - ((ALIGN_TYPE) 1))/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
|
||||
|
||||
/* Round the pool size down to something that is evenly divisible by
|
||||
/* Round the pool size down to something that is evenly divisible by
|
||||
an ALIGN_TYPE (typically this is a 32-bit ULONG). */
|
||||
pool_size = (pool_size/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
|
||||
|
||||
@@ -106,7 +106,7 @@ TX_BLOCK_POOL *previous_pool;
|
||||
pool_ptr -> tx_block_pool_start = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
|
||||
pool_ptr -> tx_block_pool_size = pool_size;
|
||||
pool_ptr -> tx_block_pool_block_size = (UINT) block_size;
|
||||
|
||||
|
||||
/* Calculate the total number of blocks. */
|
||||
total_blocks = pool_size/(block_size + (sizeof(UCHAR *)));
|
||||
|
||||
@@ -145,7 +145,7 @@ TX_BLOCK_POOL *previous_pool;
|
||||
/* Set the last block's forward pointer to NULL. */
|
||||
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(block_ptr);
|
||||
*block_link_ptr = TX_NULL;
|
||||
|
||||
|
||||
/* Setup the starting pool address. */
|
||||
pool_ptr -> tx_block_pool_available_list = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
|
||||
|
||||
@@ -180,7 +180,7 @@ TX_BLOCK_POOL *previous_pool;
|
||||
pool_ptr -> tx_block_pool_created_previous = previous_pool;
|
||||
pool_ptr -> tx_block_pool_created_next = next_pool;
|
||||
}
|
||||
|
||||
|
||||
/* Increment the created count. */
|
||||
_tx_block_pool_created_count++;
|
||||
|
||||
@@ -208,7 +208,7 @@ TX_BLOCK_POOL *previous_pool;
|
||||
/* Not enough memory for one block, return appropriate error. */
|
||||
status = TX_SIZE_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ TX_BLOCK_POOL *previous_pool;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_block_pool_created_ptr == pool_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_block_pool_created_ptr = next_pool;
|
||||
}
|
||||
@@ -148,14 +148,14 @@ TX_BLOCK_POOL *previous_pool;
|
||||
on this block pool. */
|
||||
while (suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
|
||||
@@ -77,8 +77,8 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
|
||||
ULONG *total_blocks, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BLOCK_POOL **next_pool)
|
||||
{
|
||||
|
||||
@@ -100,42 +100,42 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the block pool. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = pool_ptr -> tx_block_pool_name;
|
||||
}
|
||||
|
||||
/* Retrieve the number of available blocks in the block pool. */
|
||||
if (available_blocks != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*available_blocks = (ULONG) pool_ptr -> tx_block_pool_available;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of blocks in the block pool. */
|
||||
if (total_blocks != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*total_blocks = (ULONG) pool_ptr -> tx_block_pool_total;
|
||||
}
|
||||
|
||||
/* Retrieve the first thread suspended on this block pool. */
|
||||
if (first_suspended != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*first_suspended = pool_ptr -> tx_block_pool_suspension_list;
|
||||
}
|
||||
|
||||
/* Retrieve the number of threads suspended on this block pool. */
|
||||
if (suspended_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspended_count = (ULONG) pool_ptr -> tx_block_pool_suspended_count;
|
||||
}
|
||||
|
||||
/* Retrieve the pointer to the next block pool created. */
|
||||
if (next_pool != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_pool = pool_ptr -> tx_block_pool_created_next;
|
||||
}
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (pool_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Block pool pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -101,13 +101,13 @@ UINT status;
|
||||
/* Determine if the pool ID is invalid. */
|
||||
else if (pool_ptr -> tx_block_pool_id != TX_BLOCK_POOL_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Block pool pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
@@ -123,28 +123,28 @@ UINT status;
|
||||
/* Retrieve the number of allocations from this block pool. */
|
||||
if (allocates != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*allocates = pool_ptr -> tx_block_pool_performance_allocate_count;
|
||||
}
|
||||
|
||||
/* Retrieve the number of blocks released to this block pool. */
|
||||
if (releases != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*releases = pool_ptr -> tx_block_pool_performance_release_count;
|
||||
}
|
||||
|
||||
/* Retrieve the number of thread suspensions on this block pool. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = pool_ptr -> tx_block_pool_performance_suspension_count;
|
||||
}
|
||||
|
||||
/* Retrieve the number of thread timeouts on this block pool. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = pool_ptr -> tx_block_pool_performance_timeout_count;
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ UINT status;
|
||||
#else
|
||||
UINT status;
|
||||
|
||||
|
||||
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (pool_ptr != TX_NULL)
|
||||
{
|
||||
@@ -191,7 +191,7 @@ UINT status;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
@@ -100,28 +100,28 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the total number of block allocations. */
|
||||
if (allocates != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*allocates = _tx_block_pool_performance_allocate_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of blocks released. */
|
||||
if (releases != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*releases = _tx_block_pool_performance_release_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of block pool thread suspensions. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = _tx_block_pool_performance_suspension_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of block pool thread timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_block_pool_performance_timeout_count;
|
||||
}
|
||||
|
||||
@@ -139,35 +139,35 @@ UINT status;
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (allocates != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (releases != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
#endif
|
||||
|
||||
@@ -130,7 +130,7 @@ UINT list_changed;
|
||||
|
||||
/* Remember the suspension count and head pointer. */
|
||||
head_ptr = pool_ptr -> tx_block_pool_suspension_list;
|
||||
|
||||
|
||||
/* Default the highest priority thread to the thread at the front of the list. */
|
||||
priority_thread_ptr = head_ptr;
|
||||
|
||||
@@ -142,7 +142,7 @@ UINT list_changed;
|
||||
|
||||
/* Set the list changed flag to false. */
|
||||
list_changed = TX_FALSE;
|
||||
|
||||
|
||||
/* Search through the list to find the highest priority thread. */
|
||||
do
|
||||
{
|
||||
@@ -160,33 +160,33 @@ UINT list_changed;
|
||||
|
||||
/* Disable interrupts again. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Determine if any changes to the list have occurred while
|
||||
|
||||
/* Determine if any changes to the list have occurred while
|
||||
interrupts were enabled. */
|
||||
|
||||
|
||||
/* Is the list head the same? */
|
||||
if (head_ptr != pool_ptr -> tx_block_pool_suspension_list)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Is the suspended count the same? */
|
||||
if (suspended_count != pool_ptr -> tx_block_pool_suspended_count)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the list has changed. */
|
||||
if (list_changed == TX_FALSE)
|
||||
{
|
||||
|
||||
|
||||
/* Move the thread pointer to the next thread. */
|
||||
thread_ptr = thread_ptr -> tx_thread_suspended_next;
|
||||
}
|
||||
@@ -202,7 +202,7 @@ UINT list_changed;
|
||||
|
||||
/* Setup search pointer. */
|
||||
thread_ptr = priority_thread_ptr -> tx_thread_suspended_next;
|
||||
|
||||
|
||||
/* Reset the list changed flag. */
|
||||
list_changed = TX_FALSE;
|
||||
}
|
||||
@@ -212,12 +212,12 @@ UINT list_changed;
|
||||
/* Release preemption. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
of the list. */
|
||||
if (priority_thread_ptr != head_ptr)
|
||||
{
|
||||
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
front of the list. */
|
||||
|
||||
/* First, remove the highest priority thread by updating the
|
||||
|
||||
@@ -90,7 +90,7 @@ TX_THREAD *previous_thread;
|
||||
/* Disable interrupts to put this block back in the pool. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Pickup the pool pointer which is just previous to the starting
|
||||
/* Pickup the pool pointer which is just previous to the starting
|
||||
address of the block that the caller sees. */
|
||||
work_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(block_ptr);
|
||||
work_ptr = TX_UCHAR_POINTER_SUB(work_ptr, (sizeof(UCHAR *)));
|
||||
@@ -121,7 +121,7 @@ TX_THREAD *previous_thread;
|
||||
|
||||
/* Decrement the number of threads suspended. */
|
||||
(pool_ptr -> tx_block_pool_suspended_count)--;
|
||||
|
||||
|
||||
/* Pickup the suspended count. */
|
||||
suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
|
||||
|
||||
@@ -148,7 +148,7 @@ TX_THREAD *previous_thread;
|
||||
next_thread -> tx_thread_suspended_previous = previous_thread;
|
||||
previous_thread -> tx_thread_suspended_next = next_thread;
|
||||
}
|
||||
|
||||
|
||||
/* Prepare for resumption of the first thread. */
|
||||
|
||||
/* Clear cleanup routine to avoid timeout. */
|
||||
|
||||
@@ -181,17 +181,17 @@ ULONG lower_tbu;
|
||||
/* Determine if we are finished. */
|
||||
if (work_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, we have found a block the search is finished. */
|
||||
finished = TX_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* No block was found, does this thread still own the pool? */
|
||||
if (pool_ptr -> tx_byte_pool_owner == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, then we have looked through the entire pool and haven't found the memory. */
|
||||
finished = TX_TRUE;
|
||||
}
|
||||
@@ -217,7 +217,7 @@ ULONG lower_tbu;
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, update the entry with the address. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*memory_ptr);
|
||||
@@ -242,7 +242,7 @@ ULONG lower_tbu;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Set the status to success. */
|
||||
status = TX_SUCCESS;
|
||||
}
|
||||
@@ -304,7 +304,7 @@ ULONG lower_tbu;
|
||||
|
||||
/* Increment the suspension count. */
|
||||
(pool_ptr -> tx_byte_pool_suspended_count)++;
|
||||
|
||||
|
||||
/* Setup suspension list. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
@@ -366,7 +366,7 @@ ULONG lower_tbu;
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, update the entry with the address. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*memory_ptr);
|
||||
@@ -396,7 +396,7 @@ ULONG lower_tbu;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ UINT suspended_count;
|
||||
TX_THREAD *next_thread;
|
||||
TX_THREAD *previous_thread;
|
||||
|
||||
|
||||
|
||||
#ifndef TX_NOT_INTERRUPTABLE
|
||||
|
||||
/* Disable interrupts to remove the suspended thread from the byte pool. */
|
||||
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if the cleanup is still required. */
|
||||
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_byte_pool_cleanup))
|
||||
{
|
||||
|
||||
|
||||
/* Check for valid suspension sequence. */
|
||||
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
|
||||
{
|
||||
@@ -107,7 +107,7 @@ TX_THREAD *previous_thread;
|
||||
/* Check for a NULL byte pool pointer. */
|
||||
if (pool_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Check for valid pool ID. */
|
||||
if (pool_ptr -> tx_byte_pool_id == TX_BYTE_POOL_ID)
|
||||
{
|
||||
@@ -126,18 +126,18 @@ TX_THREAD *previous_thread;
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
pool_ptr -> tx_byte_pool_suspended_count--;
|
||||
|
||||
|
||||
/* Pickup the suspended count. */
|
||||
suspended_count = pool_ptr -> tx_byte_pool_suspended_count;
|
||||
|
||||
/* Remove the suspended thread from the list. */
|
||||
|
||||
|
||||
/* See if this is the only suspended thread on the list. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
/* Yes, the only suspended thread. */
|
||||
|
||||
|
||||
/* Update the head pointer. */
|
||||
pool_ptr -> tx_byte_pool_suspension_list = TX_NULL;
|
||||
}
|
||||
@@ -155,7 +155,7 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if we need to update the head pointer. */
|
||||
if (pool_ptr -> tx_byte_pool_suspension_list == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Update the list head pointer. */
|
||||
pool_ptr -> tx_byte_pool_suspension_list = next_thread;
|
||||
}
|
||||
@@ -166,7 +166,7 @@ TX_THREAD *previous_thread;
|
||||
if (thread_ptr -> tx_thread_state == TX_BYTE_MEMORY)
|
||||
{
|
||||
|
||||
/* Timeout condition and the thread still suspended on the byte pool.
|
||||
/* Timeout condition and the thread still suspended on the byte pool.
|
||||
Setup return error status and resume the thread. */
|
||||
|
||||
#ifdef TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
@@ -89,7 +89,7 @@ ALIGN_TYPE *free_ptr;
|
||||
/* Initialize the byte pool control block to all zeros. */
|
||||
TX_MEMSET(pool_ptr, 0, (sizeof(TX_BYTE_POOL)));
|
||||
|
||||
/* Round the pool size down to something that is evenly divisible by
|
||||
/* Round the pool size down to something that is evenly divisible by
|
||||
an ULONG. */
|
||||
pool_size = (pool_size/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
|
||||
|
||||
@@ -104,17 +104,17 @@ ALIGN_TYPE *free_ptr;
|
||||
pool_ptr -> tx_byte_pool_list = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
|
||||
pool_ptr -> tx_byte_pool_search = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
|
||||
|
||||
/* Initially, the pool will have two blocks. One large block at the
|
||||
/* Initially, the pool will have two blocks. One large block at the
|
||||
beginning that is available and a small allocated block at the end
|
||||
of the pool that is there just for the algorithm. Be sure to count
|
||||
the available block's header in the available bytes count. */
|
||||
pool_ptr -> tx_byte_pool_available = pool_size - ((sizeof(VOID *)) + (sizeof(ALIGN_TYPE)));
|
||||
pool_ptr -> tx_byte_pool_fragments = ((UINT) 2);
|
||||
|
||||
|
||||
/* Each block contains a "next" pointer that points to the next block in the pool followed by a ALIGN_TYPE
|
||||
field that contains either the constant TX_BYTE_BLOCK_FREE (if the block is free) or a pointer to the
|
||||
owning pool (if the block is allocated). */
|
||||
|
||||
|
||||
/* Calculate the end of the pool's memory area. */
|
||||
block_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
|
||||
block_ptr = TX_UCHAR_POINTER_ADD(block_ptr, pool_size);
|
||||
@@ -177,7 +177,7 @@ ALIGN_TYPE *free_ptr;
|
||||
|
||||
/* Increment the number of created byte pools. */
|
||||
_tx_byte_pool_created_count++;
|
||||
|
||||
|
||||
/* Optional byte pool create extended processing. */
|
||||
TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ TX_BYTE_POOL *previous_pool;
|
||||
|
||||
/* Decrement the number of byte pools created. */
|
||||
_tx_byte_pool_created_count--;
|
||||
|
||||
|
||||
/* See if the byte pool is the only one on the list. */
|
||||
if (_tx_byte_pool_created_count == TX_EMPTY)
|
||||
{
|
||||
@@ -130,7 +130,7 @@ TX_BYTE_POOL *previous_pool;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_byte_pool_created_ptr == pool_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_byte_pool_created_ptr = next_pool;
|
||||
}
|
||||
@@ -144,7 +144,7 @@ TX_BYTE_POOL *previous_pool;
|
||||
pool_ptr -> tx_byte_pool_suspension_list = TX_NULL;
|
||||
suspended_count = pool_ptr -> tx_byte_pool_suspended_count;
|
||||
pool_ptr -> tx_byte_pool_suspended_count = TX_NO_SUSPENSIONS;
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
@@ -152,14 +152,14 @@ TX_BYTE_POOL *previous_pool;
|
||||
on this byte pool. */
|
||||
while (suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
|
||||
@@ -77,8 +77,8 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
|
||||
ULONG *fragments, TX_THREAD **first_suspended,
|
||||
ULONG *suspended_count, TX_BYTE_POOL **next_pool)
|
||||
{
|
||||
|
||||
@@ -100,42 +100,42 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the byte pool. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = pool_ptr -> tx_byte_pool_name;
|
||||
}
|
||||
|
||||
/* Retrieve the number of available bytes in the byte pool. */
|
||||
if (available_bytes != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*available_bytes = pool_ptr -> tx_byte_pool_available;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of bytes in the byte pool. */
|
||||
if (fragments != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*fragments = (ULONG) pool_ptr -> tx_byte_pool_fragments;
|
||||
}
|
||||
|
||||
/* Retrieve the first thread suspended on this byte pool. */
|
||||
if (first_suspended != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*first_suspended = pool_ptr -> tx_byte_pool_suspension_list;
|
||||
}
|
||||
|
||||
/* Retrieve the number of threads suspended on this byte pool. */
|
||||
if (suspended_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspended_count = (ULONG) pool_ptr -> tx_byte_pool_suspended_count;
|
||||
}
|
||||
|
||||
/* Retrieve the pointer to the next byte pool created. */
|
||||
if (next_pool != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_pool = pool_ptr -> tx_byte_pool_created_next;
|
||||
}
|
||||
|
||||
|
||||
@@ -102,15 +102,15 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (pool_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Byte pool pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the pool ID is invalid. */
|
||||
else if (pool_ptr -> tx_byte_pool_id != TX_BYTE_POOL_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Byte pool pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -132,7 +132,7 @@ UINT status;
|
||||
/* Retrieve the number of allocates on this byte pool. */
|
||||
if (allocates != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*allocates = pool_ptr -> tx_byte_pool_performance_allocate_count;
|
||||
}
|
||||
|
||||
@@ -146,35 +146,35 @@ UINT status;
|
||||
/* Retrieve the number of fragments searched in this byte pool. */
|
||||
if (fragments_searched != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*fragments_searched = pool_ptr -> tx_byte_pool_performance_search_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of fragments merged on this byte pool. */
|
||||
if (merges != TX_NULL)
|
||||
{
|
||||
|
||||
*merges = pool_ptr -> tx_byte_pool_performance_merge_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of fragment splits on this byte pool. */
|
||||
if (splits != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*splits = pool_ptr -> tx_byte_pool_performance_split_count;
|
||||
}
|
||||
|
||||
/* Retrieve the number of suspensions on this byte pool. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = pool_ptr -> tx_byte_pool_performance_suspension_count;
|
||||
}
|
||||
|
||||
/* Retrieve the number of timeouts on this byte pool. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = pool_ptr -> tx_byte_pool_performance_timeout_count;
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ UINT status;
|
||||
/* Return completion status. */
|
||||
status = TX_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
#else
|
||||
@@ -195,55 +195,55 @@ UINT status;
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (pool_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (allocates != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (releases != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (fragments_searched != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (merges != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (splits != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
@@ -109,58 +109,58 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the total number of byte pool allocates. */
|
||||
if (allocates != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*allocates = _tx_byte_pool_performance_allocate_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of byte pool releases. */
|
||||
if (releases != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*releases = _tx_byte_pool_performance_release_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of byte pool fragments searched. */
|
||||
if (fragments_searched != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*fragments_searched = _tx_byte_pool_performance_search_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of byte pool fragments merged. */
|
||||
if (merges != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*merges = _tx_byte_pool_performance_merge_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of byte pool fragment splits. */
|
||||
if (splits != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*splits = _tx_byte_pool_performance_split_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of byte pool suspensions. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = _tx_byte_pool_performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of byte pool timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_byte_pool_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Return completion status. */
|
||||
return(TX_SUCCESS);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
UINT status;
|
||||
@@ -215,7 +215,7 @@ UINT status;
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
#endif
|
||||
|
||||
@@ -161,19 +161,19 @@ UINT list_changed;
|
||||
/* Disable interrupts again. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Determine if any changes to the list have occurred while
|
||||
/* Determine if any changes to the list have occurred while
|
||||
interrupts were enabled. */
|
||||
|
||||
|
||||
/* Is the list head the same? */
|
||||
if (head_ptr != pool_ptr -> tx_byte_pool_suspension_list)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Is the suspended count the same? */
|
||||
if (suspended_count != pool_ptr -> tx_byte_pool_suspended_count)
|
||||
{
|
||||
@@ -212,12 +212,12 @@ UINT list_changed;
|
||||
/* Release preemption. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
of the list. */
|
||||
if (priority_thread_ptr != head_ptr)
|
||||
{
|
||||
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
front of the list. */
|
||||
|
||||
/* First, remove the highest priority thread by updating the
|
||||
|
||||
@@ -98,7 +98,7 @@ UCHAR **suspend_info_ptr;
|
||||
|
||||
/* Default to successful status. */
|
||||
status = TX_SUCCESS;
|
||||
|
||||
|
||||
/* Set the pool pointer to NULL. */
|
||||
pool_ptr = TX_NULL;
|
||||
|
||||
@@ -109,7 +109,7 @@ UCHAR **suspend_info_ptr;
|
||||
work_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(memory_ptr);
|
||||
if (work_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Back off the memory pointer to pickup its header. */
|
||||
work_ptr = TX_UCHAR_POINTER_SUB(work_ptr, ((sizeof(UCHAR *)) + (sizeof(ALIGN_TYPE))));
|
||||
|
||||
@@ -127,7 +127,7 @@ UCHAR **suspend_info_ptr;
|
||||
/* See if we have a valid pool pointer. */
|
||||
if (pool_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Return pointer error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -137,10 +137,10 @@ UCHAR **suspend_info_ptr;
|
||||
/* See if we have a valid pool. */
|
||||
if (pool_ptr -> tx_byte_pool_id != TX_BYTE_POOL_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Return pointer error. */
|
||||
status = TX_PTR_ERROR;
|
||||
|
||||
|
||||
/* Reset the pool pointer is NULL. */
|
||||
pool_ptr = TX_NULL;
|
||||
}
|
||||
@@ -163,13 +163,13 @@ UCHAR **suspend_info_ptr;
|
||||
/* Determine if the pointer is valid. */
|
||||
if (pool_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* At this point, we know that the pointer is valid. */
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
@@ -201,7 +201,7 @@ UCHAR **suspend_info_ptr;
|
||||
/* Update the number of available bytes in the pool. */
|
||||
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
|
||||
next_block_ptr = *block_link_ptr;
|
||||
pool_ptr -> tx_byte_pool_available =
|
||||
pool_ptr -> tx_byte_pool_available =
|
||||
pool_ptr -> tx_byte_pool_available + TX_UCHAR_POINTER_DIF(next_block_ptr, work_ptr);
|
||||
|
||||
/* Determine if the free block is prior to current search pointer. */
|
||||
@@ -215,8 +215,8 @@ UCHAR **suspend_info_ptr;
|
||||
/* Determine if there are threads suspended on this byte pool. */
|
||||
if (pool_ptr -> tx_byte_pool_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
/* Now examine the suspension list to find threads waiting for
|
||||
|
||||
/* Now examine the suspension list to find threads waiting for
|
||||
memory. Maybe it is now available! */
|
||||
while (pool_ptr -> tx_byte_pool_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
@@ -245,7 +245,7 @@ UCHAR **suspend_info_ptr;
|
||||
/* If there is not enough memory, break this loop! */
|
||||
if (work_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Break out of the loop. */
|
||||
break;
|
||||
}
|
||||
@@ -257,7 +257,7 @@ UCHAR **suspend_info_ptr;
|
||||
/* Also, makes sure the memory size is the same. */
|
||||
if (susp_thread_ptr -> tx_thread_suspend_info == memory_size)
|
||||
{
|
||||
|
||||
|
||||
/* Remove the suspended thread from the list. */
|
||||
|
||||
/* Decrement the number of threads suspended. */
|
||||
@@ -302,7 +302,7 @@ UCHAR **suspend_info_ptr;
|
||||
|
||||
/* Clear the memory pointer to indicate that it was given to the suspended thread. */
|
||||
work_ptr = TX_NULL;
|
||||
|
||||
|
||||
/* Put return status into the thread control block. */
|
||||
susp_thread_ptr -> tx_thread_suspend_status = TX_SUCCESS;
|
||||
|
||||
@@ -328,11 +328,11 @@ UCHAR **suspend_info_ptr;
|
||||
TX_DISABLE
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the memory was given to the suspended thread. */
|
||||
if (work_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* No, it wasn't given to the suspended thread. */
|
||||
|
||||
/* Put the memory back on the available list since this thread is no longer
|
||||
@@ -345,7 +345,7 @@ UCHAR **suspend_info_ptr;
|
||||
/* Update the number of available bytes in the pool. */
|
||||
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
|
||||
next_block_ptr = *block_link_ptr;
|
||||
pool_ptr -> tx_byte_pool_available =
|
||||
pool_ptr -> tx_byte_pool_available =
|
||||
pool_ptr -> tx_byte_pool_available + TX_UCHAR_POINTER_DIF(next_block_ptr, work_ptr);
|
||||
|
||||
/* Determine if the current pointer is before the search pointer. */
|
||||
@@ -357,7 +357,7 @@ UCHAR **suspend_info_ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
@@ -366,7 +366,7 @@ UCHAR **suspend_info_ptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* No, threads suspended, restore interrupts. */
|
||||
TX_RESTORE
|
||||
}
|
||||
|
||||
@@ -104,11 +104,11 @@ TX_THREAD *previous_thread;
|
||||
|
||||
/* Setup pointer to event flags control block. */
|
||||
group_ptr = TX_VOID_TO_EVENT_FLAGS_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
|
||||
|
||||
|
||||
/* Check for a NULL event flags control block pointer. */
|
||||
if (group_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the group pointer ID valid? */
|
||||
if (group_ptr -> tx_event_flags_group_id == TX_EVENT_FLAGS_ID)
|
||||
{
|
||||
@@ -133,9 +133,9 @@ TX_THREAD *previous_thread;
|
||||
/* Pickup the suspension head. */
|
||||
suspension_head = group_ptr -> tx_event_flags_group_suspension_list;
|
||||
|
||||
/* Determine if the cleanup is being done while a set operation was interrupted. If the
|
||||
/* Determine if the cleanup is being done while a set operation was interrupted. If the
|
||||
suspended count is non-zero and the suspension head is NULL, the list is being processed
|
||||
and cannot be touched from here. The suspension list removal will instead take place
|
||||
and cannot be touched from here. The suspension list removal will instead take place
|
||||
inside the event flag set code. */
|
||||
if (suspension_head != TX_NULL)
|
||||
{
|
||||
@@ -144,7 +144,7 @@ TX_THREAD *previous_thread;
|
||||
|
||||
/* Decrement the local suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
|
||||
/* Store the updated suspended count. */
|
||||
group_ptr -> tx_event_flags_group_suspended_count = suspended_count;
|
||||
|
||||
@@ -153,7 +153,7 @@ TX_THREAD *previous_thread;
|
||||
{
|
||||
|
||||
/* Yes, the only suspended thread. */
|
||||
|
||||
|
||||
/* Update the head pointer. */
|
||||
group_ptr -> tx_event_flags_group_suspension_list = TX_NULL;
|
||||
}
|
||||
@@ -161,17 +161,17 @@ TX_THREAD *previous_thread;
|
||||
{
|
||||
|
||||
/* At least one more thread is on the same suspension list. */
|
||||
|
||||
|
||||
/* Update the links of the adjacent threads. */
|
||||
next_thread = thread_ptr -> tx_thread_suspended_next;
|
||||
previous_thread = thread_ptr -> tx_thread_suspended_previous;
|
||||
next_thread -> tx_thread_suspended_previous = previous_thread;
|
||||
previous_thread -> tx_thread_suspended_next = next_thread;
|
||||
|
||||
|
||||
/* Determine if we need to update the head pointer. */
|
||||
if (suspension_head == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Update the list head pointer. */
|
||||
group_ptr -> tx_event_flags_group_suspension_list = next_thread;
|
||||
}
|
||||
@@ -179,7 +179,7 @@ TX_THREAD *previous_thread;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* In this case, the search pointer in an interrupted event flag set must be reset. */
|
||||
group_ptr -> tx_event_flags_group_reset_search = TX_TRUE;
|
||||
}
|
||||
@@ -189,7 +189,7 @@ TX_THREAD *previous_thread;
|
||||
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
|
||||
{
|
||||
|
||||
/* Timeout condition and the thread still suspended on the event flags group.
|
||||
/* Timeout condition and the thread still suspended on the event flags group.
|
||||
Setup return error status and resume the thread. */
|
||||
|
||||
#ifdef TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO
|
||||
@@ -216,8 +216,8 @@ TX_THREAD *previous_thread;
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Resume the thread! Check for preemption even though we are executing
|
||||
from the system timer thread right now which normally executes at the
|
||||
/* Resume the thread! Check for preemption even though we are executing
|
||||
from the system timer thread right now which normally executes at the
|
||||
highest priority. */
|
||||
_tx_thread_system_resume(thread_ptr);
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
|
||||
|
||||
/* Setup the basic event flags group fields. */
|
||||
group_ptr -> tx_event_flags_group_name = name_ptr;
|
||||
|
||||
|
||||
/* Disable interrupts to put the event flags group on the created list. */
|
||||
TX_DISABLE
|
||||
|
||||
@@ -121,7 +121,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
|
||||
|
||||
/* Increment the number of created event flag groups. */
|
||||
_tx_event_flags_created_count++;
|
||||
|
||||
|
||||
/* Optional event flag group create extended processing. */
|
||||
TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
|
||||
|
||||
/* Decrement the number of created event flag groups. */
|
||||
_tx_event_flags_created_count--;
|
||||
|
||||
|
||||
/* See if this group is the only one on the list. */
|
||||
if (_tx_event_flags_created_count == TX_EMPTY)
|
||||
{
|
||||
@@ -126,7 +126,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_event_flags_created_ptr == group_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_event_flags_created_ptr = next_group;
|
||||
}
|
||||
@@ -144,18 +144,18 @@ TX_EVENT_FLAGS_GROUP *previous_group;
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Walk through the event flag suspension list to resume any and all threads
|
||||
/* Walk through the event flag suspension list to resume any and all threads
|
||||
suspended on this group. */
|
||||
while (suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the number of suspended threads. */
|
||||
suspended_count--;
|
||||
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
|
||||
@@ -130,16 +130,16 @@ UINT interrupted_set_request;
|
||||
/* Check for AND condition. All flags must be present to satisfy request. */
|
||||
if (and_request == TX_AND)
|
||||
{
|
||||
|
||||
|
||||
/* AND request is present. */
|
||||
|
||||
|
||||
/* Calculate the flags present. */
|
||||
flags_satisfied = (current_flags & requested_flags);
|
||||
|
||||
|
||||
/* Determine if they satisfy the AND request. */
|
||||
if (flags_satisfied != requested_flags)
|
||||
{
|
||||
|
||||
|
||||
/* No, not all the requested flags are present. Clear the flags present variable. */
|
||||
flags_satisfied = ((ULONG) 0);
|
||||
}
|
||||
@@ -150,7 +150,7 @@ UINT interrupted_set_request;
|
||||
/* OR request is present. Simply or the requested flags and the current flags. */
|
||||
flags_satisfied = (current_flags & requested_flags);
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the request is satisfied. */
|
||||
if (flags_satisfied != ((ULONG) 0))
|
||||
{
|
||||
@@ -164,7 +164,7 @@ UINT interrupted_set_request;
|
||||
/* Determine whether or not clearing needs to take place. */
|
||||
if (clear_request == TX_TRUE)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, clear the flags that satisfied this request. */
|
||||
group_ptr -> tx_event_flags_group_current =
|
||||
group_ptr -> tx_event_flags_group_current & (~requested_flags);
|
||||
@@ -190,16 +190,16 @@ UINT interrupted_set_request;
|
||||
/* Check for AND condition. All flags must be present to satisfy request. */
|
||||
if (and_request == TX_AND)
|
||||
{
|
||||
|
||||
|
||||
/* AND request is present. */
|
||||
|
||||
|
||||
/* Calculate the flags present. */
|
||||
flags_satisfied = (current_flags & requested_flags);
|
||||
|
||||
|
||||
/* Determine if they satisfy the AND request. */
|
||||
if (flags_satisfied != requested_flags)
|
||||
{
|
||||
|
||||
|
||||
/* No, not all the requested flags are present. Clear the flags present variable. */
|
||||
flags_satisfied = ((ULONG) 0);
|
||||
}
|
||||
@@ -211,7 +211,7 @@ UINT interrupted_set_request;
|
||||
to see if any are present. */
|
||||
flags_satisfied = (current_flags & requested_flags);
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the request is satisfied. */
|
||||
if (flags_satisfied != ((ULONG) 0))
|
||||
{
|
||||
@@ -235,7 +235,7 @@ UINT interrupted_set_request;
|
||||
set request. */
|
||||
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL)
|
||||
{
|
||||
|
||||
@@ -252,7 +252,7 @@ UINT interrupted_set_request;
|
||||
event clearing until the set operation is complete. */
|
||||
|
||||
/* Remember the events to clear. */
|
||||
group_ptr -> tx_event_flags_group_delayed_clear =
|
||||
group_ptr -> tx_event_flags_group_delayed_clear =
|
||||
group_ptr -> tx_event_flags_group_delayed_clear | requested_flags;
|
||||
}
|
||||
else
|
||||
@@ -279,7 +279,7 @@ UINT interrupted_set_request;
|
||||
/* Determine if the preempt disable flag is non-zero. */
|
||||
if (_tx_thread_preempt_disable != ((UINT) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Suspension is not allowed if the preempt disable flag is non-zero at this point, return error completion. */
|
||||
status = TX_NO_EVENTS;
|
||||
}
|
||||
@@ -296,7 +296,7 @@ UINT interrupted_set_request;
|
||||
/* Increment the number of event flags suspensions on this semaphore. */
|
||||
group_ptr -> tx_event_flags_group___performance_suspension_count++;
|
||||
#endif
|
||||
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
TX_THREAD_GET_CURRENT(thread_ptr)
|
||||
|
||||
@@ -325,7 +325,7 @@ UINT interrupted_set_request;
|
||||
|
||||
/* Pickup the suspended count. */
|
||||
suspended_count = group_ptr -> tx_event_flags_group_suspended_count;
|
||||
|
||||
|
||||
/* Setup suspension list. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
@@ -350,7 +350,7 @@ UINT interrupted_set_request;
|
||||
|
||||
/* Increment the number of threads suspended. */
|
||||
group_ptr -> tx_event_flags_group_suspended_count++;
|
||||
|
||||
|
||||
/* Set the state to suspended. */
|
||||
thread_ptr -> tx_thread_state = TX_EVENT_FLAG;
|
||||
|
||||
@@ -377,10 +377,10 @@ UINT interrupted_set_request;
|
||||
|
||||
/* Call actual thread suspension routine. */
|
||||
_tx_thread_system_suspend(thread_ptr);
|
||||
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
|
||||
/* Return the completion status. */
|
||||
status = thread_ptr -> tx_thread_suspend_status;
|
||||
#endif
|
||||
@@ -388,7 +388,7 @@ UINT interrupted_set_request;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Immediate return, return error completion. */
|
||||
status = TX_NO_EVENTS;
|
||||
}
|
||||
|
||||
@@ -79,8 +79,8 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_EVENT_FLAGS_GROUP **next_group)
|
||||
{
|
||||
|
||||
@@ -102,7 +102,7 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the event flag group. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = group_ptr -> tx_event_flags_group_name;
|
||||
}
|
||||
|
||||
@@ -111,31 +111,31 @@ TX_INTERRUPT_SAVE_AREA
|
||||
{
|
||||
|
||||
/* Pickup the current flags and apply delayed clearing. */
|
||||
*current_flags = group_ptr -> tx_event_flags_group_current &
|
||||
*current_flags = group_ptr -> tx_event_flags_group_current &
|
||||
~group_ptr -> tx_event_flags_group_delayed_clear;
|
||||
}
|
||||
|
||||
/* Retrieve the first thread suspended on this event flag group. */
|
||||
if (first_suspended != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*first_suspended = group_ptr -> tx_event_flags_group_suspension_list;
|
||||
}
|
||||
|
||||
/* Retrieve the number of threads suspended on this event flag group. */
|
||||
if (suspended_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspended_count = (ULONG) group_ptr -> tx_event_flags_group_suspended_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the pointer to the next event flag group created. */
|
||||
if (next_group != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_group = group_ptr -> tx_event_flags_group_created_next;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -94,15 +94,15 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (group_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Event flags group pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the event group ID is invalid. */
|
||||
else if (group_ptr -> tx_event_flags_group_id != TX_EVENT_FLAGS_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Event flags group pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -124,34 +124,34 @@ UINT status;
|
||||
/* Retrieve the number of set operations on this event flag group. */
|
||||
if (sets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*sets = group_ptr -> tx_event_flags_group_performance_set_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of get operations on this event flag group. */
|
||||
if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*gets = group_ptr -> tx_event_flags_group__performance_get_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of thread suspensions on this event flag group. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = group_ptr -> tx_event_flags_group___performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of thread timeouts on this event flag group. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = group_ptr -> tx_event_flags_group____performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Return successful completion. */
|
||||
status = TX_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -101,37 +101,37 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the total number of event flag set operations. */
|
||||
if (sets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*sets = _tx_event_flags_performance_set_count;
|
||||
}
|
||||
|
||||
/* Retrieve the total number of event flag get operations. */
|
||||
if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*gets = _tx_event_flags_performance_get_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of event flag thread suspensions. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = _tx_event_flags_performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of event flag thread timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_event_flags_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Return completion status. */
|
||||
return(TX_SUCCESS);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
UINT status;
|
||||
|
||||
@@ -137,7 +137,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
set request. */
|
||||
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL)
|
||||
{
|
||||
|
||||
@@ -154,15 +154,15 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
event clearing until the set operation is complete. */
|
||||
|
||||
/* Remember the events to clear. */
|
||||
group_ptr -> tx_event_flags_group_delayed_clear =
|
||||
group_ptr -> tx_event_flags_group_delayed_clear =
|
||||
group_ptr -> tx_event_flags_group_delayed_clear | ~flags_to_set;
|
||||
}
|
||||
else
|
||||
{
|
||||
#endif
|
||||
|
||||
/* Previous set operation was not interrupted, simply clear the
|
||||
specified flags by "ANDing" the flags into the current events
|
||||
/* Previous set operation was not interrupted, simply clear the
|
||||
specified flags by "ANDing" the flags into the current events
|
||||
of the group. */
|
||||
group_ptr -> tx_event_flags_group_current =
|
||||
group_ptr -> tx_event_flags_group_current & flags_to_set;
|
||||
@@ -195,7 +195,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
{
|
||||
|
||||
/* Yes, we need to neutralize the delayed clearing as well. */
|
||||
group_ptr -> tx_event_flags_group_delayed_clear =
|
||||
group_ptr -> tx_event_flags_group_delayed_clear =
|
||||
group_ptr -> tx_event_flags_group_delayed_clear & ~flags_to_set;
|
||||
}
|
||||
#endif
|
||||
@@ -210,7 +210,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
if (group_ptr -> tx_event_flags_group_suspension_list != TX_NULL)
|
||||
{
|
||||
|
||||
/* Determine if there is just a single thread waiting on the event
|
||||
/* Determine if there is just a single thread waiting on the event
|
||||
flag group. */
|
||||
if (suspended_count == ((UINT) 1))
|
||||
{
|
||||
@@ -223,7 +223,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
|
||||
/* Pickup the current event flags. */
|
||||
current_event_flags = group_ptr -> tx_event_flags_group_current;
|
||||
|
||||
|
||||
/* Pickup the suspend information. */
|
||||
requested_flags = thread_ptr -> tx_thread_suspend_info;
|
||||
|
||||
@@ -236,16 +236,16 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* Check for AND condition. All flags must be present to satisfy request. */
|
||||
if (and_request == TX_AND)
|
||||
{
|
||||
|
||||
|
||||
/* AND request is present. */
|
||||
|
||||
|
||||
/* Calculate the flags present. */
|
||||
flags_satisfied = (current_event_flags & requested_flags);
|
||||
|
||||
|
||||
/* Determine if they satisfy the AND request. */
|
||||
if (flags_satisfied != requested_flags)
|
||||
{
|
||||
|
||||
|
||||
/* No, not all the requested flags are present. Clear the flags present variable. */
|
||||
flags_satisfied = ((ULONG) 0);
|
||||
}
|
||||
@@ -256,7 +256,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* OR request is present. Simply or the requested flags and the current flags. */
|
||||
flags_satisfied = (current_event_flags & requested_flags);
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the request is satisfied. */
|
||||
if (flags_satisfied != ((ULONG) 0))
|
||||
{
|
||||
@@ -315,7 +315,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
else
|
||||
{
|
||||
|
||||
/* Otherwise, the event flag requests of multiple threads must be
|
||||
/* Otherwise, the event flag requests of multiple threads must be
|
||||
examined. */
|
||||
|
||||
/* Setup thread pointer, keep a local copy of the head pointer. */
|
||||
@@ -325,7 +325,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* Clear the suspended list head pointer to thwart manipulation of
|
||||
the list in ISR's while we are processing here. */
|
||||
group_ptr -> tx_event_flags_group_suspension_list = TX_NULL;
|
||||
|
||||
|
||||
/* Setup the satisfied thread pointers. */
|
||||
satisfied_list = TX_NULL;
|
||||
last_satisfied = TX_NULL;
|
||||
@@ -382,16 +382,16 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* Check for AND condition. All flags must be present to satisfy request. */
|
||||
if (and_request == TX_AND)
|
||||
{
|
||||
|
||||
|
||||
/* AND request is present. */
|
||||
|
||||
|
||||
/* Calculate the flags present. */
|
||||
flags_satisfied = (current_event_flags & requested_flags);
|
||||
|
||||
|
||||
/* Determine if they satisfy the AND request. */
|
||||
if (flags_satisfied != requested_flags)
|
||||
{
|
||||
|
||||
|
||||
/* No, not all the requested flags are present. Clear the flags present variable. */
|
||||
flags_satisfied = ((ULONG) 0);
|
||||
}
|
||||
@@ -402,13 +402,13 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* OR request is present. Simply or the requested flags and the current flags. */
|
||||
flags_satisfied = (current_event_flags & requested_flags);
|
||||
}
|
||||
|
||||
/* Check to see if the thread had a timeout or wait abort during the event search processing.
|
||||
If so, just set the flags satisfied to ensure the processing here removes the thread from
|
||||
|
||||
/* Check to see if the thread had a timeout or wait abort during the event search processing.
|
||||
If so, just set the flags satisfied to ensure the processing here removes the thread from
|
||||
the suspension list. */
|
||||
if (thread_ptr -> tx_thread_state != TX_EVENT_FLAG)
|
||||
{
|
||||
|
||||
|
||||
/* Simply set the satisfied flags to 1 in order to remove the thread from the suspension list. */
|
||||
flags_satisfied = ((ULONG) 1);
|
||||
}
|
||||
@@ -421,7 +421,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
|
||||
/* Set the preempt check flag. */
|
||||
preempt_check = TX_TRUE;
|
||||
|
||||
|
||||
/* Determine if the thread is still suspended on the event flag group. If not, a wait
|
||||
abort must have been done from an ISR. */
|
||||
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
|
||||
@@ -437,11 +437,11 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* Determine whether or not clearing needs to take place. */
|
||||
if (clear_request == TX_TRUE)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, clear the flags that satisfied this request. */
|
||||
group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & ~requested_flags;
|
||||
}
|
||||
|
||||
|
||||
/* Prepare for resumption of the first thread. */
|
||||
|
||||
/* Clear cleanup routine to avoid timeout. */
|
||||
@@ -478,7 +478,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
list. */
|
||||
if (suspended_list == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, head pointer needs to be updated. */
|
||||
suspended_list = thread_ptr -> tx_thread_suspended_next;
|
||||
}
|
||||
@@ -494,7 +494,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* First thread on the satisfied list. */
|
||||
satisfied_list = thread_ptr;
|
||||
last_satisfied = thread_ptr;
|
||||
|
||||
|
||||
/* Setup initial next pointer. */
|
||||
thread_ptr -> tx_thread_suspended_next = TX_NULL;
|
||||
}
|
||||
@@ -502,7 +502,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
{
|
||||
|
||||
/* Not the first thread on the satisfied list. */
|
||||
|
||||
|
||||
/* Link it up at the end. */
|
||||
last_satisfied -> tx_thread_suspended_next = thread_ptr;
|
||||
thread_ptr -> tx_thread_suspended_next = TX_NULL;
|
||||
@@ -515,7 +515,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
|
||||
} while (suspended_count != TX_NO_SUSPENSIONS);
|
||||
|
||||
/* Setup the group's suspension list head again. */
|
||||
@@ -543,7 +543,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
thread_ptr = satisfied_list;
|
||||
while(thread_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Get next pointer first. */
|
||||
next_thread_ptr = thread_ptr -> tx_thread_suspended_next;
|
||||
|
||||
@@ -586,7 +586,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
|
||||
/* Determine if we need to set the reset search field. */
|
||||
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* We interrupted a search of an event flag group suspension
|
||||
list. Make sure we reset the search. */
|
||||
group_ptr -> tx_event_flags_group_reset_search = TX_TRUE;
|
||||
|
||||
@@ -44,9 +44,9 @@
|
||||
#include "tx_byte_pool.h"
|
||||
|
||||
|
||||
/* Define the unused memory pointer. The value of the first available
|
||||
/* Define the unused memory pointer. The value of the first available
|
||||
memory address is placed in this variable in the low-level
|
||||
initialization function. The content of this variable is passed
|
||||
initialization function. The content of this variable is passed
|
||||
to the application's system definition function. */
|
||||
|
||||
VOID *_tx_initialize_unused_memory;
|
||||
|
||||
@@ -98,8 +98,8 @@ VOID _tx_initialize_kernel_enter(VOID)
|
||||
|
||||
/* No, the initialization still needs to take place. */
|
||||
|
||||
/* Ensure that the system state variable is set to indicate
|
||||
initialization is in progress. Note that this variable is
|
||||
/* Ensure that the system state variable is set to indicate
|
||||
initialization is in progress. Note that this variable is
|
||||
later used to represent interrupt nesting. */
|
||||
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
|
||||
|
||||
@@ -109,9 +109,9 @@ VOID _tx_initialize_kernel_enter(VOID)
|
||||
/* Invoke the low-level initialization to handle all processor specific
|
||||
initialization issues. */
|
||||
_tx_initialize_low_level();
|
||||
|
||||
/* Invoke the high-level initialization to exercise all of the
|
||||
ThreadX components and the application's initialization
|
||||
|
||||
/* Invoke the high-level initialization to exercise all of the
|
||||
ThreadX components and the application's initialization
|
||||
function. */
|
||||
_tx_initialize_high_level();
|
||||
|
||||
@@ -122,8 +122,8 @@ VOID _tx_initialize_kernel_enter(VOID)
|
||||
/* Optional processing extension. */
|
||||
TX_INITIALIZE_KERNEL_ENTER_EXTENSION
|
||||
|
||||
/* Ensure that the system state variable is set to indicate
|
||||
initialization is in progress. Note that this variable is
|
||||
/* Ensure that the system state variable is set to indicate
|
||||
initialization is in progress. Note that this variable is
|
||||
later used to represent interrupt nesting. */
|
||||
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
|
||||
|
||||
@@ -131,7 +131,7 @@ VOID _tx_initialize_kernel_enter(VOID)
|
||||
first available memory address to it. */
|
||||
tx_application_define(_tx_initialize_unused_memory);
|
||||
|
||||
/* Set the system state in preparation for entering the thread
|
||||
/* Set the system state in preparation for entering the thread
|
||||
scheduler. */
|
||||
_tx_thread_system_state = TX_INITIALIZE_IS_FINISHED;
|
||||
|
||||
|
||||
@@ -76,8 +76,8 @@
|
||||
VOID _tx_initialize_kernel_setup(VOID)
|
||||
{
|
||||
|
||||
/* Ensure that the system state variable is set to indicate
|
||||
initialization is in progress. Note that this variable is
|
||||
/* Ensure that the system state variable is set to indicate
|
||||
initialization is in progress. Note that this variable is
|
||||
later used to represent interrupt nesting. */
|
||||
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
|
||||
|
||||
@@ -87,9 +87,9 @@ VOID _tx_initialize_kernel_setup(VOID)
|
||||
/* Invoke the low-level initialization to handle all processor specific
|
||||
initialization issues. */
|
||||
_tx_initialize_low_level();
|
||||
|
||||
/* Invoke the high-level initialization to exercise all of the
|
||||
ThreadX components and the application's initialization
|
||||
|
||||
/* Invoke the high-level initialization to exercise all of the
|
||||
ThreadX components and the application's initialization
|
||||
function. */
|
||||
_tx_initialize_high_level();
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2)
|
||||
{
|
||||
|
||||
ULONG value;
|
||||
|
||||
|
||||
value = (ULONG)(ptr1 - ptr2);
|
||||
return(value);
|
||||
}
|
||||
@@ -150,7 +150,7 @@ ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount)
|
||||
ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2)
|
||||
{
|
||||
ULONG value;
|
||||
|
||||
|
||||
value = (ULONG)(ptr1 - ptr2);
|
||||
return(value);
|
||||
}
|
||||
@@ -362,7 +362,7 @@ TX_THREAD *trace_thread_ptr;
|
||||
#endif
|
||||
trace_event_ptr++;
|
||||
if (trace_event_ptr >= _tx_trace_buffer_end_ptr)
|
||||
{
|
||||
{
|
||||
trace_event_ptr = _tx_trace_buffer_start_ptr;
|
||||
_tx_trace_buffer_current_ptr = trace_event_ptr;
|
||||
_tx_trace_header_ptr -> tx_trace_header_buffer_current_pointer = (ULONG) trace_event_ptr;
|
||||
@@ -813,7 +813,7 @@ UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer)
|
||||
/* Return a UCHAR pointer. */
|
||||
return((UCHAR *) ((VOID *) pointer));
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
@@ -100,14 +100,14 @@ TX_THREAD *previous_thread;
|
||||
/* Check for valid suspension sequence. */
|
||||
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
|
||||
{
|
||||
|
||||
|
||||
/* Setup pointer to mutex control block. */
|
||||
mutex_ptr = TX_VOID_TO_MUTEX_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
|
||||
|
||||
|
||||
/* Check for NULL mutex pointer. */
|
||||
if (mutex_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Determine if the mutex ID is valid. */
|
||||
if (mutex_ptr -> tx_mutex_id == TX_MUTEX_ID)
|
||||
{
|
||||
@@ -133,7 +133,7 @@ TX_THREAD *previous_thread;
|
||||
suspended_count = mutex_ptr -> tx_mutex_suspended_count;
|
||||
|
||||
/* Remove the suspended thread from the list. */
|
||||
|
||||
|
||||
/* See if this is the only suspended thread on the list. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
@@ -147,7 +147,7 @@ TX_THREAD *previous_thread;
|
||||
{
|
||||
|
||||
/* At least one more thread is on the same suspension list. */
|
||||
|
||||
|
||||
/* Update the links of the adjacent threads. */
|
||||
next_thread = thread_ptr -> tx_thread_suspended_next;
|
||||
previous_thread = thread_ptr -> tx_thread_suspended_previous;
|
||||
@@ -157,18 +157,18 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if we need to update the head pointer. */
|
||||
if (mutex_ptr -> tx_mutex_suspension_list == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Update the list head pointer. */
|
||||
mutex_ptr -> tx_mutex_suspension_list = next_thread;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Now we need to determine if this cleanup is from a terminate, timeout,
|
||||
or from a wait abort. */
|
||||
if (thread_ptr -> tx_thread_state == TX_MUTEX_SUSP)
|
||||
{
|
||||
|
||||
/* Timeout condition and the thread still suspended on the mutex.
|
||||
/* Timeout condition and the thread still suspended on the mutex.
|
||||
Setup return error status and resume the thread. */
|
||||
|
||||
#ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
|
||||
@@ -194,7 +194,7 @@ TX_THREAD *previous_thread;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Resume the thread! */
|
||||
_tx_thread_system_resume(thread_ptr);
|
||||
|
||||
@@ -208,7 +208,7 @@ TX_THREAD *previous_thread;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
#endif
|
||||
@@ -269,21 +269,21 @@ UINT status;
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
|
||||
/* Temporarily disable preemption. */
|
||||
_tx_thread_preempt_disable++;
|
||||
|
||||
/* Loop to look at all the mutexes. */
|
||||
do
|
||||
{
|
||||
|
||||
|
||||
/* Pickup the mutex head pointer. */
|
||||
mutex_ptr = thread_ptr -> tx_thread_owned_mutex_list;
|
||||
|
||||
/* Determine if there is a mutex. */
|
||||
if (mutex_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, set the ownership count to 1. */
|
||||
mutex_ptr -> tx_mutex_ownership_count = ((UINT) 1);
|
||||
|
||||
@@ -307,10 +307,10 @@ UINT status;
|
||||
mutex_ptr = thread_ptr -> tx_thread_owned_mutex_list;
|
||||
}
|
||||
} while (mutex_ptr != TX_NULL);
|
||||
|
||||
|
||||
/* Restore preemption. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ TX_MUTEX *previous_mutex;
|
||||
/* Setup the basic mutex fields. */
|
||||
mutex_ptr -> tx_mutex_name = name_ptr;
|
||||
mutex_ptr -> tx_mutex_inherit = inherit;
|
||||
|
||||
|
||||
/* Disable interrupts to place the mutex on the created list. */
|
||||
TX_DISABLE
|
||||
|
||||
@@ -126,7 +126,7 @@ TX_MUTEX *previous_mutex;
|
||||
|
||||
/* Increment the ownership count. */
|
||||
_tx_mutex_created_count++;
|
||||
|
||||
|
||||
/* Optional mutex create extended processing. */
|
||||
TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ UINT status;
|
||||
|
||||
/* Decrement the created count. */
|
||||
_tx_mutex_created_count--;
|
||||
|
||||
|
||||
/* See if the mutex is the only one on the list. */
|
||||
if (_tx_mutex_created_count == TX_EMPTY)
|
||||
{
|
||||
@@ -130,7 +130,7 @@ UINT status;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_mutex_created_ptr == mutex_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_mutex_created_ptr = next_mutex;
|
||||
}
|
||||
@@ -156,7 +156,7 @@ UINT status;
|
||||
{
|
||||
|
||||
/* Yes, remove this mutex from the owned list. */
|
||||
|
||||
|
||||
/* Set the ownership count to 1. */
|
||||
mutex_ptr -> tx_mutex_ownership_count = ((UINT) 1);
|
||||
|
||||
@@ -184,14 +184,14 @@ UINT status;
|
||||
on this mutex. */
|
||||
while (suspended_count != ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
@@ -215,7 +215,7 @@ UINT status;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Resume the thread. */
|
||||
_tx_thread_system_resume(thread_ptr);
|
||||
#endif
|
||||
|
||||
@@ -126,7 +126,7 @@ UINT status;
|
||||
/* Determine if priority inheritance is required. */
|
||||
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
|
||||
{
|
||||
|
||||
|
||||
/* Remember the current priority of thread. */
|
||||
mutex_ptr -> tx_mutex_original_priority = thread_ptr -> tx_thread_priority;
|
||||
|
||||
@@ -178,7 +178,7 @@ UINT status;
|
||||
else if (mutex_ptr -> tx_mutex_owner == thread_ptr)
|
||||
{
|
||||
|
||||
/* The owning thread is requesting the mutex again, just
|
||||
/* The owning thread is requesting the mutex again, just
|
||||
increment the ownership count. */
|
||||
mutex_ptr -> tx_mutex_ownership_count++;
|
||||
|
||||
@@ -279,7 +279,7 @@ UINT status;
|
||||
previous_thread -> tx_thread_suspended_next = thread_ptr;
|
||||
next_thread -> tx_thread_suspended_previous = thread_ptr;
|
||||
}
|
||||
|
||||
|
||||
/* Increment the suspension count. */
|
||||
mutex_ptr -> tx_mutex_suspended_count++;
|
||||
|
||||
@@ -288,7 +288,7 @@ UINT status;
|
||||
|
||||
#ifdef TX_NOT_INTERRUPTABLE
|
||||
|
||||
/* Determine if we need to raise the priority of the thread
|
||||
/* Determine if we need to raise the priority of the thread
|
||||
owning the mutex. */
|
||||
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
|
||||
{
|
||||
@@ -304,7 +304,7 @@ UINT status;
|
||||
/* Determine if we have to update inherit priority level of the mutex owner. */
|
||||
if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_inherit_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Remember the new priority inheritance priority. */
|
||||
mutex_owner -> tx_thread_inherit_priority = thread_ptr -> tx_thread_priority;
|
||||
}
|
||||
@@ -347,7 +347,7 @@ UINT status;
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Determine if we need to raise the priority of the thread
|
||||
/* Determine if we need to raise the priority of the thread
|
||||
owning the mutex. */
|
||||
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
|
||||
{
|
||||
@@ -363,7 +363,7 @@ UINT status;
|
||||
/* Determine if we have to update inherit priority level of the mutex owner. */
|
||||
if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_inherit_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Remember the new priority inheritance priority. */
|
||||
mutex_owner -> tx_thread_inherit_priority = thread_ptr -> tx_thread_priority;
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_MUTEX **next_mutex)
|
||||
{
|
||||
|
||||
@@ -101,45 +101,45 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the mutex. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = mutex_ptr -> tx_mutex_name;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the current ownership count of the mutex. */
|
||||
if (count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*count = ((ULONG) mutex_ptr -> tx_mutex_ownership_count);
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the current owner of the mutex. */
|
||||
if (owner != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*owner = mutex_ptr -> tx_mutex_owner;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the first thread suspended on this mutex. */
|
||||
if (first_suspended != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*first_suspended = mutex_ptr -> tx_mutex_suspension_list;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of threads suspended on this mutex. */
|
||||
if (suspended_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspended_count = (ULONG) mutex_ptr -> tx_mutex_suspended_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the pointer to the next mutex created. */
|
||||
if (next_mutex != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_mutex = mutex_ptr -> tx_mutex_created_next;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -99,15 +99,15 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (mutex_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Mutex pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the mutex ID is invalid. */
|
||||
else if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Mutex pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -129,45 +129,45 @@ UINT status;
|
||||
/* Retrieve the number of puts on this mutex. */
|
||||
if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*puts = mutex_ptr -> tx_mutex_performance_put_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of gets on this mutex. */
|
||||
if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*gets = mutex_ptr -> tx_mutex_performance_get_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of suspensions on this mutex. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = mutex_ptr -> tx_mutex_performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of timeouts on this mutex. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = mutex_ptr -> tx_mutex_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of priority inversions on this mutex. */
|
||||
if (inversions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*inversions = mutex_ptr -> tx_mutex_performance_priority_inversion_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of priority inheritances on this mutex. */
|
||||
if (inheritances != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*inheritances = mutex_ptr -> tx_mutex_performance__priority_inheritance_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
}
|
||||
@@ -225,7 +225,7 @@ UINT status;
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions,
|
||||
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions,
|
||||
ULONG *timeouts, ULONG *inversions, ULONG *inheritances)
|
||||
{
|
||||
|
||||
@@ -106,51 +106,51 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the total number of mutex puts. */
|
||||
if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*puts = _tx_mutex_performance_put_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of mutex gets. */
|
||||
if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*gets = _tx_mutex_performance_get_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of mutex suspensions. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = _tx_mutex_performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of mutex timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_mutex_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of mutex priority inversions. */
|
||||
if (inversions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*inversions = _tx_mutex_performance_priority_inversion_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of mutex priority inheritances. */
|
||||
if (inheritances != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*inheritances = _tx_mutex_performance__priority_inheritance_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Return completion status. */
|
||||
return(TX_SUCCESS);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
UINT status;
|
||||
@@ -159,43 +159,43 @@ UINT status;
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (inversions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (inheritances != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
@@ -164,28 +164,28 @@ UINT status;
|
||||
/* Disable interrupts again. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Determine if any changes to the list have occurred while
|
||||
/* Determine if any changes to the list have occurred while
|
||||
interrupts were enabled. */
|
||||
|
||||
|
||||
/* Is the list head the same? */
|
||||
if (head_ptr != mutex_ptr -> tx_mutex_suspension_list)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Is the suspended count the same? */
|
||||
if (suspended_count != mutex_ptr -> tx_mutex_suspended_count)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the list has changed. */
|
||||
if (list_changed == TX_FALSE)
|
||||
{
|
||||
@@ -215,12 +215,12 @@ UINT status;
|
||||
/* Release preemption. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
of the list. */
|
||||
if (priority_thread_ptr != head_ptr)
|
||||
{
|
||||
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
front of the list. */
|
||||
|
||||
/* First, remove the highest priority thread by updating the
|
||||
|
||||
@@ -116,17 +116,17 @@ UINT map_index;
|
||||
|
||||
/* Change thread priority to the new mutex priority-inheritance priority. */
|
||||
thread_ptr -> tx_thread_priority = new_priority;
|
||||
|
||||
|
||||
/* Determine how to setup the thread's preemption-threshold. */
|
||||
if (thread_ptr -> tx_thread_user_preempt_threshold < new_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Change thread preemption-threshold to the user's preemption-threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Change the thread preemption-threshold to the new threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_priority;
|
||||
}
|
||||
@@ -156,22 +156,22 @@ UINT map_index;
|
||||
/* Call actual non-interruptable thread suspension routine. */
|
||||
_tx_thread_system_ni_suspend(thread_ptr, ((ULONG) 0));
|
||||
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
protection against all preemption. */
|
||||
|
||||
/* Change thread priority to the new mutex priority-inheritance priority. */
|
||||
thread_ptr -> tx_thread_priority = new_priority;
|
||||
|
||||
|
||||
/* Determine how to setup the thread's preemption-threshold. */
|
||||
if (thread_ptr -> tx_thread_user_preempt_threshold < new_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Change thread preemption-threshold to the user's preemption-threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Change the thread preemption-threshold to the new threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_priority;
|
||||
}
|
||||
@@ -198,29 +198,29 @@ UINT map_index;
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* The thread is ready and must first be removed from the list. Call the
|
||||
/* The thread is ready and must first be removed from the list. Call the
|
||||
system suspend function to accomplish this. */
|
||||
_tx_thread_system_suspend(thread_ptr);
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
protection against all preemption. */
|
||||
|
||||
/* Change thread priority to the new mutex priority-inheritance priority. */
|
||||
thread_ptr -> tx_thread_priority = new_priority;
|
||||
|
||||
|
||||
/* Determine how to setup the thread's preemption-threshold. */
|
||||
if (thread_ptr -> tx_thread_user_preempt_threshold < new_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Change thread preemption-threshold to the user's preemption-threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Change the thread preemption-threshold to the new threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_priority;
|
||||
}
|
||||
@@ -240,14 +240,14 @@ UINT map_index;
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
#endif
|
||||
|
||||
|
||||
/* Pickup the next thread to execute. */
|
||||
next_execute_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
/* Determine if this thread is not the next thread to execute. */
|
||||
if (thread_ptr != next_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Make sure the thread is still ready. */
|
||||
if (thread_ptr -> tx_thread_state == TX_READY)
|
||||
{
|
||||
@@ -269,7 +269,7 @@ UINT map_index;
|
||||
/* Determine if we moved to a lower priority. If so, move the thread to the front of its priority list. */
|
||||
if (original_priority < new_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Ensure that this thread is placed at the front of the priority list. */
|
||||
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr;
|
||||
}
|
||||
@@ -287,7 +287,7 @@ UINT map_index;
|
||||
/* Compare the next thread to execute thread's priority against the thread's preemption-threshold. */
|
||||
if (thread_ptr -> tx_thread_preempt_threshold <= next_execute_ptr -> tx_thread_priority)
|
||||
{
|
||||
|
||||
|
||||
/* We must swap execute pointers to enforce the preemption-threshold of a thread coming out of
|
||||
priority inheritance. */
|
||||
_tx_thread_execute_ptr = thread_ptr;
|
||||
@@ -295,7 +295,7 @@ UINT map_index;
|
||||
/* Determine if we moved to a lower priority. If so, move the thread to the front of its priority list. */
|
||||
if (original_priority < new_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Ensure that this thread is placed at the front of the priority list. */
|
||||
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr;
|
||||
}
|
||||
@@ -330,7 +330,7 @@ UINT map_index;
|
||||
}
|
||||
|
||||
#ifndef TX_NOT_INTERRUPTABLE
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
#endif
|
||||
|
||||
@@ -131,8 +131,8 @@ UINT inheritance_priority;
|
||||
/* Check to see if the mutex is owned by the calling thread. */
|
||||
if (mutex_ptr -> tx_mutex_owner != current_thread)
|
||||
{
|
||||
|
||||
/* Determine if the preempt disable flag is set, indicating that
|
||||
|
||||
/* Determine if the preempt disable flag is set, indicating that
|
||||
the caller is not the application but from ThreadX. In such
|
||||
cases, the thread mutex owner does not need to match. */
|
||||
if (_tx_thread_preempt_disable == ((UINT) 0))
|
||||
@@ -147,11 +147,11 @@ UINT inheritance_priority;
|
||||
status = TX_NOT_OWNED;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if we should continue. */
|
||||
if (status == TX_NOT_DONE)
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the mutex ownership count. */
|
||||
mutex_ptr -> tx_mutex_ownership_count--;
|
||||
|
||||
@@ -182,9 +182,9 @@ UINT inheritance_priority;
|
||||
{
|
||||
|
||||
/* The mutex is now available. */
|
||||
|
||||
|
||||
/* Remove this mutex from the owned mutex list. */
|
||||
|
||||
|
||||
/* Decrement the ownership count. */
|
||||
thread_ptr -> tx_thread_owned_mutex_count--;
|
||||
|
||||
@@ -218,14 +218,14 @@ UINT inheritance_priority;
|
||||
/* Determine if the simple, non-suspension, non-priority inheritance case is present. */
|
||||
if (mutex_ptr -> tx_mutex_suspension_list == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is this a priority inheritance mutex? */
|
||||
if (mutex_ptr -> tx_mutex_inherit == TX_FALSE)
|
||||
{
|
||||
|
||||
/* Yes, we are done - set the mutex owner to NULL. */
|
||||
mutex_ptr -> tx_mutex_owner = TX_NULL;
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
@@ -233,11 +233,11 @@ UINT inheritance_priority;
|
||||
status = TX_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the processing is complete. */
|
||||
if (status == TX_NOT_DONE)
|
||||
{
|
||||
|
||||
|
||||
/* Initialize original owner and thread priority. */
|
||||
old_owner = TX_NULL;
|
||||
old_priority = thread_ptr -> tx_thread_user_priority;
|
||||
@@ -257,8 +257,8 @@ UINT inheritance_priority;
|
||||
|
||||
/* Default the inheritance priority to disabled. */
|
||||
inheritance_priority = ((UINT) TX_MAX_PRIORITIES);
|
||||
|
||||
/* Search the owned mutexes for this thread to determine the highest priority for this
|
||||
|
||||
/* Search the owned mutexes for this thread to determine the highest priority for this
|
||||
former mutex owner to return to. */
|
||||
next_mutex = thread_ptr -> tx_thread_owned_mutex_list;
|
||||
while (next_mutex != TX_NULL)
|
||||
@@ -267,8 +267,8 @@ UINT inheritance_priority;
|
||||
/* Does this mutex support priority inheritance? */
|
||||
if (next_mutex -> tx_mutex_inherit == TX_TRUE)
|
||||
{
|
||||
|
||||
/* Determine if highest priority field of the mutex is higher than the priority to
|
||||
|
||||
/* Determine if highest priority field of the mutex is higher than the priority to
|
||||
restore. */
|
||||
if (next_mutex -> tx_mutex_highest_priority_waiting < inheritance_priority)
|
||||
{
|
||||
@@ -284,7 +284,7 @@ UINT inheritance_priority;
|
||||
/* Are we at the end of the list? */
|
||||
if (next_mutex == thread_ptr -> tx_thread_owned_mutex_list)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, set the next mutex to NULL. */
|
||||
next_mutex = TX_NULL;
|
||||
}
|
||||
@@ -298,14 +298,14 @@ UINT inheritance_priority;
|
||||
/* Undo the temporarily preemption disable. */
|
||||
_tx_thread_preempt_disable--;
|
||||
#endif
|
||||
|
||||
|
||||
/* Set the inherit priority to that of the highest priority thread waiting on the mutex. */
|
||||
thread_ptr -> tx_thread_inherit_priority = inheritance_priority;
|
||||
|
||||
|
||||
/* Determine if the inheritance priority is less than the default old priority. */
|
||||
if (inheritance_priority < old_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, update the old priority. */
|
||||
old_priority = inheritance_priority;
|
||||
}
|
||||
@@ -332,7 +332,7 @@ UINT inheritance_priority;
|
||||
TX_RESTORE
|
||||
#endif
|
||||
|
||||
/* Call the mutex prioritize processing to ensure the
|
||||
/* Call the mutex prioritize processing to ensure the
|
||||
highest priority thread is resumed. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
do
|
||||
@@ -375,17 +375,17 @@ UINT inheritance_priority;
|
||||
TX_RESTORE
|
||||
#endif
|
||||
|
||||
/* Mutex is not owned, but it is possible that a thread that
|
||||
/* Mutex is not owned, but it is possible that a thread that
|
||||
caused a priority inheritance to occur is no longer waiting
|
||||
on the mutex. */
|
||||
|
||||
/* Setup the highest priority waiting thread. */
|
||||
mutex_ptr -> tx_mutex_highest_priority_waiting = (UINT) TX_MAX_PRIORITIES;
|
||||
|
||||
|
||||
/* Determine if we need to restore priority. */
|
||||
if ((mutex_ptr -> tx_mutex_owner) -> tx_thread_priority != old_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, restore the priority of thread. */
|
||||
_tx_mutex_priority_change(mutex_ptr -> tx_mutex_owner, old_priority);
|
||||
}
|
||||
@@ -424,7 +424,7 @@ UINT inheritance_priority;
|
||||
|
||||
/* Remember the old mutex owner. */
|
||||
old_owner = mutex_ptr -> tx_mutex_owner;
|
||||
|
||||
|
||||
/* Setup owner thread priority information. */
|
||||
mutex_ptr -> tx_mutex_original_priority = thread_ptr -> tx_thread_priority;
|
||||
|
||||
@@ -473,7 +473,7 @@ UINT inheritance_priority;
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
mutex_ptr -> tx_mutex_suspended_count--;
|
||||
|
||||
|
||||
/* Pickup the suspended count. */
|
||||
suspended_count = mutex_ptr -> tx_mutex_suspended_count;
|
||||
|
||||
@@ -482,7 +482,7 @@ UINT inheritance_priority;
|
||||
{
|
||||
|
||||
/* Yes, the only suspended thread. */
|
||||
|
||||
|
||||
/* Update the head pointer. */
|
||||
mutex_ptr -> tx_mutex_suspension_list = TX_NULL;
|
||||
}
|
||||
@@ -536,7 +536,7 @@ UINT inheritance_priority;
|
||||
_tx_mutex_prioritize(mutex_ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* Now, pickup the list head and set the priority. */
|
||||
|
||||
/* Determine if there still are threads suspended for this mutex. */
|
||||
@@ -551,11 +551,11 @@ UINT inheritance_priority;
|
||||
|
||||
/* Restore previous priority needs to be restored after priority
|
||||
inheritance. */
|
||||
|
||||
|
||||
/* Determine if we need to restore priority. */
|
||||
if (old_owner -> tx_thread_priority != old_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Restore priority of thread. */
|
||||
_tx_mutex_priority_change(old_owner, old_priority);
|
||||
}
|
||||
@@ -579,7 +579,7 @@ UINT inheritance_priority;
|
||||
{
|
||||
|
||||
/* Yes, priority inheritance is requested. */
|
||||
|
||||
|
||||
/* Determine if there are any more threads still suspended on the mutex. */
|
||||
if (mutex_ptr -> tx_mutex_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
@@ -594,7 +594,7 @@ UINT inheritance_priority;
|
||||
#else
|
||||
_tx_mutex_prioritize(mutex_ptr);
|
||||
#endif
|
||||
|
||||
|
||||
/* Now, pickup the list head and set the priority. */
|
||||
|
||||
/* Optional processing extension. */
|
||||
@@ -618,11 +618,11 @@ UINT inheritance_priority;
|
||||
|
||||
/* Restore previous priority needs to be restored after priority
|
||||
inheritance. */
|
||||
|
||||
|
||||
/* Is the priority different? */
|
||||
if (old_owner -> tx_thread_priority != old_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Restore the priority of thread. */
|
||||
_tx_mutex_priority_change(old_owner, old_priority);
|
||||
}
|
||||
@@ -631,7 +631,7 @@ UINT inheritance_priority;
|
||||
/* Resume thread. */
|
||||
_tx_thread_system_resume(thread_ptr);
|
||||
#endif
|
||||
|
||||
|
||||
/* Return a successful status. */
|
||||
status = TX_SUCCESS;
|
||||
}
|
||||
@@ -645,7 +645,7 @@ UINT inheritance_priority;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Caller does not own the mutex. */
|
||||
status = TX_NOT_OWNED;
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ UINT suspended_count;
|
||||
TX_THREAD *next_thread;
|
||||
TX_THREAD *previous_thread;
|
||||
|
||||
|
||||
|
||||
#ifndef TX_NOT_INTERRUPTABLE
|
||||
|
||||
/* Disable interrupts to remove the suspended thread from the queue. */
|
||||
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if the cleanup is still required. */
|
||||
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_queue_cleanup))
|
||||
{
|
||||
|
||||
|
||||
/* Check for valid suspension sequence. */
|
||||
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
|
||||
{
|
||||
@@ -122,7 +122,7 @@ TX_THREAD *previous_thread;
|
||||
#endif
|
||||
|
||||
/* Yes, we still have thread suspension! */
|
||||
|
||||
|
||||
/* Clear the suspension cleanup flag. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
@@ -168,7 +168,7 @@ TX_THREAD *previous_thread;
|
||||
if (thread_ptr -> tx_thread_state == TX_QUEUE_SUSP)
|
||||
{
|
||||
|
||||
/* Timeout condition and the thread still suspended on the queue.
|
||||
/* Timeout condition and the thread still suspended on the queue.
|
||||
Setup return error status and resume the thread. */
|
||||
|
||||
#ifdef TX_QUEUE_ENABLE_PERFORMANCE_INFO
|
||||
@@ -183,17 +183,17 @@ TX_THREAD *previous_thread;
|
||||
/* Setup return status. */
|
||||
if (queue_ptr -> tx_queue_enqueued != TX_NO_MESSAGES)
|
||||
{
|
||||
|
||||
|
||||
/* Queue full timeout! */
|
||||
thread_ptr -> tx_thread_suspend_status = TX_QUEUE_FULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Queue empty timeout! */
|
||||
thread_ptr -> tx_thread_suspend_status = TX_QUEUE_EMPTY;
|
||||
}
|
||||
|
||||
|
||||
#ifdef TX_NOT_INTERRUPTABLE
|
||||
|
||||
/* Resume the thread! */
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
UINT _tx_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
|
||||
VOID *queue_start, ULONG queue_size)
|
||||
{
|
||||
|
||||
@@ -91,7 +91,7 @@ TX_QUEUE *previous_queue;
|
||||
|
||||
/* Setup the basic queue fields. */
|
||||
queue_ptr -> tx_queue_name = name_ptr;
|
||||
|
||||
|
||||
/* Save the message size in the control block. */
|
||||
queue_ptr -> tx_queue_message_size = message_size;
|
||||
|
||||
@@ -100,7 +100,7 @@ TX_QUEUE *previous_queue;
|
||||
capacity = (UINT) (queue_size / ((ULONG) (((ULONG) message_size) * (sizeof(ULONG)))));
|
||||
used_words = capacity * message_size;
|
||||
|
||||
/* Save the starting address and calculate the ending address of
|
||||
/* Save the starting address and calculate the ending address of
|
||||
the queue. Note that the ending address is really one past the
|
||||
end! */
|
||||
queue_ptr -> tx_queue_start = TX_VOID_TO_ULONG_POINTER_CONVERT(queue_start);
|
||||
|
||||
@@ -125,7 +125,7 @@ TX_QUEUE *previous_queue;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_queue_created_ptr == queue_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_queue_created_ptr = next_queue;
|
||||
}
|
||||
@@ -147,14 +147,14 @@ TX_QUEUE *previous_queue;
|
||||
on this queue. */
|
||||
while (suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ TX_THREAD *thread_ptr;
|
||||
if (queue_ptr -> tx_queue_suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
/* Yes, there are threads suspended on this queue, they must be
|
||||
/* Yes, there are threads suspended on this queue, they must be
|
||||
resumed! */
|
||||
|
||||
/* Copy the information into temporary variables. */
|
||||
@@ -141,24 +141,24 @@ TX_THREAD *thread_ptr;
|
||||
thread_ptr = suspension_list;
|
||||
while (suspended_count != ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
/* Check for a NULL thread pointer. */
|
||||
if (thread_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Get out of the loop. */
|
||||
break;
|
||||
}
|
||||
|
||||
/* Resume the next suspended thread. */
|
||||
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
@@ -182,7 +182,7 @@ TX_THREAD *thread_ptr;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Resume the thread. */
|
||||
_tx_thread_system_resume(thread_ptr -> tx_thread_suspended_previous);
|
||||
#endif
|
||||
|
||||
@@ -123,7 +123,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
if (queue_ptr -> tx_queue_available_storage != ((UINT) 0))
|
||||
{
|
||||
|
||||
/* Yes there is room in the queue. Now determine if there is a thread waiting
|
||||
/* Yes there is room in the queue. Now determine if there is a thread waiting
|
||||
for a message. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
@@ -137,20 +137,20 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
/* See if the read pointer is at the beginning of the queue area. */
|
||||
if (queue_ptr -> tx_queue_read == queue_ptr -> tx_queue_start)
|
||||
{
|
||||
|
||||
|
||||
/* Adjust the read pointer to the last message at the end of the
|
||||
queue. */
|
||||
queue_ptr -> tx_queue_read = TX_ULONG_POINTER_SUB(queue_ptr -> tx_queue_end, queue_ptr -> tx_queue_message_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not at the beginning of the queue, just move back one message. */
|
||||
queue_ptr -> tx_queue_read = TX_ULONG_POINTER_SUB(queue_ptr -> tx_queue_read, queue_ptr -> tx_queue_message_size);
|
||||
}
|
||||
|
||||
/* Simply place the message in the queue. */
|
||||
|
||||
|
||||
/* Reduce the amount of available storage. */
|
||||
queue_ptr -> tx_queue_available_storage--;
|
||||
|
||||
@@ -162,7 +162,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
destination = queue_ptr -> tx_queue_read;
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
@@ -241,7 +241,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info);
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
@@ -300,7 +300,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
/* Yes, suspension is requested. */
|
||||
|
||||
/* Prepare for suspension of this thread. */
|
||||
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
TX_THREAD_GET_CURRENT(thread_ptr)
|
||||
|
||||
@@ -345,7 +345,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
next_thread -> tx_thread_suspended_previous = thread_ptr;
|
||||
|
||||
/* Update the suspension list to put this thread in front, which will put
|
||||
the message that was removed in the proper relative order when room is
|
||||
the message that was removed in the proper relative order when room is
|
||||
made in the queue. */
|
||||
queue_ptr -> tx_queue_suspension_list = thread_ptr;
|
||||
}
|
||||
|
||||
@@ -99,45 +99,45 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the queue. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = queue_ptr -> tx_queue_name;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of messages currently in the queue. */
|
||||
if (enqueued != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*enqueued = (ULONG) queue_ptr -> tx_queue_enqueued;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of messages that will still fit in the queue. */
|
||||
if (available_storage != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*available_storage = (ULONG) queue_ptr -> tx_queue_available_storage;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the first thread suspended on this queue. */
|
||||
if (first_suspended != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*first_suspended = queue_ptr -> tx_queue_suspension_list;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of threads suspended on this queue. */
|
||||
if (suspended_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspended_count = (ULONG) queue_ptr -> tx_queue_suspended_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the pointer to the next queue created. */
|
||||
if (next_queue != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_queue = queue_ptr -> tx_queue_created_next;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -95,15 +95,15 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (queue_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Queue pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the queue ID is invalid. */
|
||||
else if (queue_ptr -> tx_queue_id != TX_QUEUE_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Queue pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -125,45 +125,45 @@ UINT status;
|
||||
/* Retrieve the number of messages sent to this queue. */
|
||||
if (messages_sent != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*messages_sent = queue_ptr -> tx_queue_performance_messages_sent_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of messages received from this queue. */
|
||||
if (messages_received != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*messages_received = queue_ptr -> tx_queue_performance_messages_received_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of empty queue suspensions on this queue. */
|
||||
if (empty_suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*empty_suspensions = queue_ptr -> tx_queue_performance_empty_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of full queue suspensions on this queue. */
|
||||
if (full_suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*full_suspensions = queue_ptr -> tx_queue_performance_full_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of full errors (no suspension!) on this queue. */
|
||||
if (full_errors != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*full_errors = queue_ptr -> tx_queue_performance_full_error_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of timeouts on this queue. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = queue_ptr -> tx_queue_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -106,51 +106,51 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the total number of queue messages sent. */
|
||||
if (messages_sent != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*messages_sent = _tx_queue_performance_messages_sent_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of queue messages received. */
|
||||
if (messages_received != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*messages_received = _tx_queue_performance__messages_received_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of empty queue suspensions. */
|
||||
if (empty_suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*empty_suspensions = _tx_queue_performance_empty_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of full queue suspensions. */
|
||||
if (full_suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*full_suspensions = _tx_queue_performance_full_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of full errors. */
|
||||
if (full_errors != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*full_errors = _tx_queue_performance_full_error_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of queue timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_queue_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Return completion status. */
|
||||
return(TX_SUCCESS);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
UINT status;
|
||||
|
||||
@@ -161,28 +161,28 @@ UINT list_changed;
|
||||
/* Disable interrupts again. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Determine if any changes to the list have occurred while
|
||||
/* Determine if any changes to the list have occurred while
|
||||
interrupts were enabled. */
|
||||
|
||||
|
||||
/* Is the list head the same? */
|
||||
if (head_ptr != queue_ptr -> tx_queue_suspension_list)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Is the suspended count the same? */
|
||||
if (suspended_count != queue_ptr -> tx_queue_suspended_count)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the list has changed. */
|
||||
if (list_changed == TX_FALSE)
|
||||
{
|
||||
@@ -212,12 +212,12 @@ UINT list_changed;
|
||||
/* Release preemption. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
of the list. */
|
||||
if (priority_thread_ptr != head_ptr)
|
||||
{
|
||||
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
front of the list. */
|
||||
|
||||
/* First, remove the highest priority thread by updating the
|
||||
|
||||
@@ -118,7 +118,7 @@ UINT status;
|
||||
|
||||
/* Pickup the thread suspension count. */
|
||||
suspended_count = queue_ptr -> tx_queue_suspended_count;
|
||||
|
||||
|
||||
/* Determine if there is anything in the queue. */
|
||||
if (queue_ptr -> tx_queue_enqueued != TX_NO_MESSAGES)
|
||||
{
|
||||
@@ -128,13 +128,13 @@ UINT status;
|
||||
{
|
||||
|
||||
/* There is a message waiting in the queue and there are no suspensi. */
|
||||
|
||||
|
||||
/* Setup source and destination pointers. */
|
||||
source = queue_ptr -> tx_queue_read;
|
||||
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(destination_ptr);
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
@@ -145,10 +145,10 @@ UINT status;
|
||||
/* Yes, wrap around to the beginning. */
|
||||
source = queue_ptr -> tx_queue_start;
|
||||
}
|
||||
|
||||
|
||||
/* Setup the queue read pointer. */
|
||||
queue_ptr -> tx_queue_read = source;
|
||||
|
||||
|
||||
/* Increase the amount of available storage. */
|
||||
queue_ptr -> tx_queue_available_storage++;
|
||||
|
||||
@@ -160,18 +160,18 @@ UINT status;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* At this point we know the queue is full. */
|
||||
|
||||
/* Pickup thread suspension list head pointer. */
|
||||
thread_ptr = queue_ptr -> tx_queue_suspension_list;
|
||||
|
||||
/* Now determine if there is a queue front suspension active. */
|
||||
|
||||
|
||||
/* Is the front suspension flag set? */
|
||||
if (thread_ptr -> tx_thread_suspend_option == TX_TRUE)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, a queue front suspension is present. */
|
||||
|
||||
/* Return the message associated with this suspension. */
|
||||
@@ -181,11 +181,11 @@ UINT status;
|
||||
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(destination_ptr);
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
/* Message is now in the caller's destination. See if this is the only suspended thread
|
||||
/* Message is now in the caller's destination. See if this is the only suspended thread
|
||||
on the list. */
|
||||
suspended_count--;
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
@@ -244,7 +244,7 @@ UINT status;
|
||||
else
|
||||
{
|
||||
|
||||
/* At this point, we know that the queue is full and there
|
||||
/* At this point, we know that the queue is full and there
|
||||
are one or more threads suspended trying to send another
|
||||
message to this queue. */
|
||||
|
||||
@@ -253,7 +253,7 @@ UINT status;
|
||||
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(destination_ptr);
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
@@ -267,7 +267,7 @@ UINT status;
|
||||
|
||||
/* Setup the queue read pointer. */
|
||||
queue_ptr -> tx_queue_read = source;
|
||||
|
||||
|
||||
/* Disable preemption. */
|
||||
_tx_thread_preempt_disable++;
|
||||
|
||||
@@ -291,14 +291,14 @@ UINT status;
|
||||
destination = queue_ptr -> tx_queue_write;
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
/* Determine if we are at the end. */
|
||||
if (destination == queue_ptr -> tx_queue_end)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, wrap around to the beginning. */
|
||||
destination = queue_ptr -> tx_queue_start;
|
||||
}
|
||||
@@ -309,7 +309,7 @@ UINT status;
|
||||
/* Pickup thread pointer. */
|
||||
thread_ptr = queue_ptr -> tx_queue_suspension_list;
|
||||
|
||||
/* Message is now in the queue. See if this is the only suspended thread
|
||||
/* Message is now in the queue. See if this is the only suspended thread
|
||||
on the list. */
|
||||
suspended_count--;
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
@@ -378,7 +378,7 @@ UINT status;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Suspension is not allowed if the preempt disable flag is non-zero at this point - return error completion. */
|
||||
status = TX_QUEUE_EMPTY;
|
||||
}
|
||||
@@ -395,7 +395,7 @@ UINT status;
|
||||
/* Increment the number of empty suspensions on this queue. */
|
||||
queue_ptr -> tx_queue_performance_empty_suspension_count++;
|
||||
#endif
|
||||
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
TX_THREAD_GET_CURRENT(thread_ptr)
|
||||
|
||||
@@ -477,7 +477,7 @@ UINT status;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Immediate return, return error completion. */
|
||||
status = TX_QUEUE_EMPTY;
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ UINT _tx_queue_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_option)
|
||||
{
|
||||
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
|
||||
TX_THREAD *thread_ptr;
|
||||
ULONG *source;
|
||||
ULONG *destination;
|
||||
@@ -128,9 +128,9 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
/* Determine if there are suspended on this queue. */
|
||||
if (suspended_count == TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* No suspended threads, simply place the message in the queue. */
|
||||
|
||||
|
||||
/* Reduce the amount of available storage. */
|
||||
queue_ptr -> tx_queue_available_storage--;
|
||||
|
||||
@@ -142,7 +142,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
destination = queue_ptr -> tx_queue_write;
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
@@ -182,7 +182,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
else
|
||||
{
|
||||
|
||||
/* There is a thread suspended on an empty queue. Simply
|
||||
/* There is a thread suspended on an empty queue. Simply
|
||||
copy the message to the suspended thread's destination
|
||||
pointer. */
|
||||
|
||||
@@ -230,7 +230,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info);
|
||||
size = queue_ptr -> tx_queue_message_size;
|
||||
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
/* Copy message. Note that the source and destination pointers are
|
||||
incremented by the macro. */
|
||||
TX_QUEUE_MESSAGE_COPY(source, destination, size)
|
||||
|
||||
@@ -274,7 +274,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* At this point, the queue is full. Determine if suspension is requested. */
|
||||
else if (wait_option != TX_NO_WAIT)
|
||||
{
|
||||
@@ -302,7 +302,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
|
||||
/* Increment the number of full suspensions on this queue. */
|
||||
queue_ptr -> tx_queue_performance_full_suspension_count++;
|
||||
#endif
|
||||
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
TX_THREAD_GET_CURRENT(thread_ptr)
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ UINT suspended_count;
|
||||
TX_THREAD *next_thread;
|
||||
TX_THREAD *previous_thread;
|
||||
|
||||
|
||||
|
||||
|
||||
#ifndef TX_NOT_INTERRUPTABLE
|
||||
|
||||
@@ -97,7 +97,7 @@ TX_THREAD *previous_thread;
|
||||
/* Determine if the cleanup is still required. */
|
||||
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_semaphore_cleanup))
|
||||
{
|
||||
|
||||
|
||||
/* Check for valid suspension sequence. */
|
||||
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
|
||||
{
|
||||
@@ -121,7 +121,7 @@ TX_THREAD *previous_thread;
|
||||
/* Setup pointer to semaphore control block. */
|
||||
semaphore_ptr = TX_VOID_TO_SEMAPHORE_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
|
||||
#endif
|
||||
|
||||
|
||||
/* Yes, we still have thread suspension! */
|
||||
|
||||
/* Clear the suspension cleanup flag. */
|
||||
@@ -140,7 +140,7 @@ TX_THREAD *previous_thread;
|
||||
{
|
||||
|
||||
/* Yes, the only suspended thread. */
|
||||
|
||||
|
||||
/* Update the head pointer. */
|
||||
semaphore_ptr -> tx_semaphore_suspension_list = TX_NULL;
|
||||
}
|
||||
@@ -154,7 +154,7 @@ TX_THREAD *previous_thread;
|
||||
previous_thread = thread_ptr -> tx_thread_suspended_previous;
|
||||
next_thread -> tx_thread_suspended_previous = previous_thread;
|
||||
previous_thread -> tx_thread_suspended_next = next_thread;
|
||||
|
||||
|
||||
/* Determine if we need to update the head pointer. */
|
||||
if (semaphore_ptr -> tx_semaphore_suspension_list == thread_ptr)
|
||||
{
|
||||
|
||||
@@ -87,7 +87,7 @@ TX_SEMAPHORE *previous_semaphore;
|
||||
/* Setup the basic semaphore fields. */
|
||||
semaphore_ptr -> tx_semaphore_name = name_ptr;
|
||||
semaphore_ptr -> tx_semaphore_count = initial_count;
|
||||
|
||||
|
||||
/* Disable interrupts to place the semaphore on the created list. */
|
||||
TX_DISABLE
|
||||
|
||||
@@ -119,7 +119,7 @@ TX_SEMAPHORE *previous_semaphore;
|
||||
semaphore_ptr -> tx_semaphore_created_previous = previous_semaphore;
|
||||
semaphore_ptr -> tx_semaphore_created_next = next_semaphore;
|
||||
}
|
||||
|
||||
|
||||
/* Increment the created count. */
|
||||
_tx_semaphore_created_count++;
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ TX_SEMAPHORE *previous_semaphore;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_semaphore_created_ptr == semaphore_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_semaphore_created_ptr = next_semaphore;
|
||||
}
|
||||
@@ -148,14 +148,14 @@ TX_SEMAPHORE *previous_semaphore;
|
||||
on this semaphore. */
|
||||
while (suspended_count != TX_NO_SUSPENSIONS)
|
||||
{
|
||||
|
||||
|
||||
/* Decrement the suspension count. */
|
||||
suspended_count--;
|
||||
|
||||
/* Lockout interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
/* Clear the cleanup pointer, this prevents the timeout from doing
|
||||
anything. */
|
||||
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
|
||||
|
||||
@@ -179,7 +179,7 @@ TX_SEMAPHORE *previous_semaphore;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Resume the thread. */
|
||||
_tx_thread_system_resume(thread_ptr);
|
||||
#endif
|
||||
|
||||
@@ -77,7 +77,7 @@ UINT _tx_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option)
|
||||
{
|
||||
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
|
||||
TX_THREAD *thread_ptr;
|
||||
TX_THREAD *next_thread;
|
||||
TX_THREAD *previous_thread;
|
||||
@@ -123,7 +123,7 @@ UINT status;
|
||||
/* Determine if the preempt disable flag is non-zero. */
|
||||
if (_tx_thread_preempt_disable != ((UINT) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
@@ -143,7 +143,7 @@ UINT status;
|
||||
/* Increment the number of suspensions on this semaphore. */
|
||||
semaphore_ptr -> tx_semaphore_performance_suspension_count++;
|
||||
#endif
|
||||
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
TX_THREAD_GET_CURRENT(thread_ptr)
|
||||
|
||||
|
||||
@@ -77,8 +77,8 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
UINT _tx_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
|
||||
TX_THREAD **first_suspended, ULONG *suspended_count,
|
||||
TX_SEMAPHORE **next_semaphore)
|
||||
{
|
||||
|
||||
@@ -100,38 +100,38 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the semaphore. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = semaphore_ptr -> tx_semaphore_name;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the current value of the semaphore. */
|
||||
if (current_value != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*current_value = semaphore_ptr -> tx_semaphore_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the first thread suspended on this semaphore. */
|
||||
if (first_suspended != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*first_suspended = semaphore_ptr -> tx_semaphore_suspension_list;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of threads suspended on this semaphore. */
|
||||
if (suspended_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspended_count = (ULONG) semaphore_ptr -> tx_semaphore_suspended_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the pointer to the next semaphore created. */
|
||||
if (next_semaphore != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_semaphore = semaphore_ptr -> tx_semaphore_created_next;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -93,15 +93,15 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (semaphore_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Semaphore pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the semaphore ID is invalid. */
|
||||
else if (semaphore_ptr -> tx_semaphore_id != TX_SEMAPHORE_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Semaphore pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -123,31 +123,31 @@ UINT status;
|
||||
/* Retrieve the number of puts on this semaphore. */
|
||||
if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*puts = semaphore_ptr -> tx_semaphore_performance_put_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of gets on this semaphore. */
|
||||
if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*gets = semaphore_ptr -> tx_semaphore_performance_get_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of suspensions on this semaphore. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = semaphore_ptr -> tx_semaphore_performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the number of timeouts on this semaphore. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = semaphore_ptr -> tx_semaphore_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
@@ -161,37 +161,37 @@ UINT status;
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (semaphore_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
@@ -101,37 +101,37 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the total number of semaphore puts. */
|
||||
if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*puts = _tx_semaphore_performance_put_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of semaphore gets. */
|
||||
if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*gets = _tx_semaphore_performance_get_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of semaphore suspensions. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = _tx_semaphore_performance_suspension_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the total number of semaphore timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_semaphore_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Return completion status. */
|
||||
return(TX_SUCCESS);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
UINT status;
|
||||
@@ -140,31 +140,31 @@ UINT status;
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (puts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (gets != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
@@ -161,28 +161,28 @@ UINT list_changed;
|
||||
/* Disable interrupts again. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Determine if any changes to the list have occurred while
|
||||
/* Determine if any changes to the list have occurred while
|
||||
interrupts were enabled. */
|
||||
|
||||
|
||||
/* Is the list head the same? */
|
||||
if (head_ptr != semaphore_ptr -> tx_semaphore_suspension_list)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Is the suspended count the same? */
|
||||
if (suspended_count != semaphore_ptr -> tx_semaphore_suspended_count)
|
||||
{
|
||||
|
||||
|
||||
/* The list head has changed, set the list changed flag. */
|
||||
list_changed = TX_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the list has changed. */
|
||||
if (list_changed == TX_FALSE)
|
||||
{
|
||||
@@ -214,12 +214,12 @@ UINT list_changed;
|
||||
/* Release preemption. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
/* Now determine if the highest priority thread is at the front
|
||||
of the list. */
|
||||
if (priority_thread_ptr != head_ptr)
|
||||
{
|
||||
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
/* No, we need to move the highest priority suspended thread to the
|
||||
front of the list. */
|
||||
|
||||
/* First, remove the highest priority thread by updating the
|
||||
|
||||
@@ -139,7 +139,7 @@ TX_THREAD *previous_thread;
|
||||
{
|
||||
|
||||
/* A thread is suspended on this semaphore. */
|
||||
|
||||
|
||||
/* Pickup the pointer to the first suspended thread. */
|
||||
thread_ptr = semaphore_ptr -> tx_semaphore_suspension_list;
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ ALIGN_TYPE updated_stack_start;
|
||||
|
||||
#ifdef TX_ENABLE_STACK_CHECKING
|
||||
|
||||
/* Ensure that there are two ULONG of 0xEF patterns at the top and
|
||||
/* Ensure that there are two ULONG of 0xEF patterns at the top and
|
||||
bottom of the thread's stack. This will be used to check for stack
|
||||
overflow conditions during run-time. */
|
||||
stack_size = ((stack_size/(sizeof(ULONG))) * (sizeof(ULONG))) - (sizeof(ULONG));
|
||||
@@ -134,7 +134,7 @@ ALIGN_TYPE updated_stack_start;
|
||||
/* Determine if the starting stack address is different. */
|
||||
if (new_stack_start != updated_stack_start)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, subtract another ULONG from the size to avoid going past the stack area. */
|
||||
stack_size = stack_size - (sizeof(ULONG));
|
||||
}
|
||||
@@ -204,7 +204,7 @@ ALIGN_TYPE updated_stack_start;
|
||||
/* Perform any additional thread setup activities for tool or user purpose. */
|
||||
TX_THREAD_CREATE_INTERNAL_EXTENSION(thread_ptr)
|
||||
|
||||
/* Call the target specific stack frame building routine to build the
|
||||
/* Call the target specific stack frame building routine to build the
|
||||
thread's initial stack and to setup the actual stack pointer in the
|
||||
control block. */
|
||||
_tx_thread_stack_build(thread_ptr, _tx_thread_shell_entry);
|
||||
@@ -246,7 +246,7 @@ ALIGN_TYPE updated_stack_start;
|
||||
thread_ptr -> tx_thread_created_previous = previous_thread;
|
||||
thread_ptr -> tx_thread_created_next = next_thread;
|
||||
}
|
||||
|
||||
|
||||
/* Increment the thread created count. */
|
||||
_tx_thread_created_count++;
|
||||
|
||||
@@ -280,22 +280,22 @@ ALIGN_TYPE updated_stack_start;
|
||||
/* Yes, this create call was made from initialization. */
|
||||
|
||||
/* Pickup the current thread execute pointer, which corresponds to the
|
||||
highest priority thread ready to execute. Interrupt lockout is
|
||||
not required, since interrupts are assumed to be disabled during
|
||||
highest priority thread ready to execute. Interrupt lockout is
|
||||
not required, since interrupts are assumed to be disabled during
|
||||
initialization. */
|
||||
saved_thread_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
/* Determine if there is thread ready for execution. */
|
||||
if (saved_thread_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, a thread is ready for execution when initialization completes. */
|
||||
|
||||
/* Save the current preemption-threshold. */
|
||||
saved_threshold = saved_thread_ptr -> tx_thread_preempt_threshold;
|
||||
|
||||
/* For initialization, temporarily set the preemption-threshold to the
|
||||
priority level to make sure the highest-priority thread runs once
|
||||
/* For initialization, temporarily set the preemption-threshold to the
|
||||
priority level to make sure the highest-priority thread runs once
|
||||
initialization is complete. */
|
||||
saved_thread_ptr -> tx_thread_preempt_threshold = saved_thread_ptr -> tx_thread_priority;
|
||||
}
|
||||
@@ -328,7 +328,7 @@ ALIGN_TYPE updated_stack_start;
|
||||
/* Call the resume thread function to make this thread ready. */
|
||||
_tx_thread_system_resume(thread_ptr);
|
||||
#endif
|
||||
|
||||
|
||||
/* Determine if the thread's preemption-threshold needs to be restored. */
|
||||
if (saved_thread_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
@@ -83,7 +83,7 @@ UINT status;
|
||||
|
||||
/* Default status to success. */
|
||||
status = TX_SUCCESS;
|
||||
|
||||
|
||||
/* Lockout interrupts while the thread is being deleted. */
|
||||
TX_DISABLE
|
||||
|
||||
@@ -106,7 +106,7 @@ UINT status;
|
||||
/* Determine if the delete operation is okay. */
|
||||
if (status == TX_SUCCESS)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, continue with deleting the thread. */
|
||||
|
||||
/* Perform any additional activities for tool or user purpose. */
|
||||
@@ -129,7 +129,7 @@ UINT status;
|
||||
|
||||
/* Decrement the number of created threads. */
|
||||
_tx_thread_created_count--;
|
||||
|
||||
|
||||
/* See if the thread is the only one on the list. */
|
||||
if (_tx_thread_created_count == TX_EMPTY)
|
||||
{
|
||||
@@ -149,7 +149,7 @@ UINT status;
|
||||
/* See if we have to update the created list head pointer. */
|
||||
if (_tx_thread_created_ptr == thread_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, move the head pointer to the next link. */
|
||||
_tx_thread_created_ptr = next_thread;
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ TX_THREAD *thread_ptr;
|
||||
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
|
||||
|
||||
/* Disable interrupts to put the timer on the created list. */
|
||||
TX_DISABLE
|
||||
|
||||
|
||||
@@ -80,8 +80,8 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
UINT _tx_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
|
||||
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
|
||||
TX_THREAD **next_thread, TX_THREAD **next_suspended_thread)
|
||||
{
|
||||
|
||||
@@ -103,59 +103,59 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve the name of the thread. */
|
||||
if (name != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*name = thread_ptr -> tx_thread_name;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the thread's current state. */
|
||||
if (state != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*state = thread_ptr -> tx_thread_state;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the number of times the thread has been scheduled. */
|
||||
if (run_count != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*run_count = thread_ptr -> tx_thread_run_count;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the thread's priority. */
|
||||
if (priority != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*priority = thread_ptr -> tx_thread_user_priority;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the thread's preemption-threshold. */
|
||||
if (preemption_threshold != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*preemption_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the thread's current time-slice. */
|
||||
if (time_slice != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*time_slice = thread_ptr -> tx_thread_time_slice;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the next created thread. */
|
||||
if (next_thread != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_thread = thread_ptr -> tx_thread_created_next;
|
||||
}
|
||||
|
||||
|
||||
/* Pickup the next thread suspended. */
|
||||
if (next_suspended_thread != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*next_suspended_thread = thread_ptr -> tx_thread_suspended_next;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
|
||||
ULONG _tx_build_options;
|
||||
|
||||
|
||||
#ifdef TX_ENABLE_STACK_CHECKING
|
||||
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
|
||||
|
||||
/* Define the global function pointer for stack error handling. If a stack error is
|
||||
detected and the application has registered a stack error handler, it will be
|
||||
@@ -277,7 +277,7 @@ const CHAR _tx_thread_special_string[] =
|
||||
/* FUNCTION RELEASE */
|
||||
/* */
|
||||
/* _tx_thread_initialize PORTABLE C */
|
||||
/* 6.1 */
|
||||
/* 6.1.9 */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
/* William E. Lamie, Microsoft Corporation */
|
||||
@@ -312,7 +312,10 @@ const CHAR _tx_thread_special_string[] =
|
||||
/* resulting in version 6.1 */
|
||||
/* 06-02-2021 Yuxin Zhou Modified comment(s), added */
|
||||
/* Execution Profile support, */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* 10-15-2021 Yuxin Zhou Modified comment(s), improved */
|
||||
/* stack check error handling, */
|
||||
/* resulting in version 6.1.9 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
VOID _tx_thread_initialize(VOID)
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
/* resulting in version 6.1 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions,
|
||||
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions,
|
||||
ULONG *solicited_preemptions, ULONG *interrupt_preemptions, ULONG *priority_inversions,
|
||||
ULONG *time_slices, ULONG *relinquishes, ULONG *timeouts, ULONG *wait_aborts, TX_THREAD **last_preempted_by)
|
||||
{
|
||||
@@ -111,15 +111,15 @@ UINT status;
|
||||
/* Determine if this is a legal request. */
|
||||
if (thread_ptr == TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Thread pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the thread ID is invalid. */
|
||||
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
|
||||
{
|
||||
|
||||
|
||||
/* Thread pointer is illegal, return error. */
|
||||
status = TX_PTR_ERROR;
|
||||
}
|
||||
@@ -141,73 +141,73 @@ UINT status;
|
||||
/* Retrieve number of resumptions for this thread. */
|
||||
if (resumptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*resumptions = thread_ptr -> tx_thread_performance_resume_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of suspensions for this thread. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = thread_ptr -> tx_thread_performance_suspend_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of solicited preemptions for this thread. */
|
||||
if (solicited_preemptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*solicited_preemptions = thread_ptr -> tx_thread_performance_solicited_preemption_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of interrupt preemptions for this thread. */
|
||||
if (interrupt_preemptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*interrupt_preemptions = thread_ptr -> tx_thread_performance_interrupt_preemption_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of priority inversions for this thread. */
|
||||
if (priority_inversions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*priority_inversions = thread_ptr -> tx_thread_performance_priority_inversion_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of time-slices for this thread. */
|
||||
if (time_slices != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*time_slices = thread_ptr -> tx_thread_performance_time_slice_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of relinquishes for this thread. */
|
||||
if (relinquishes != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*relinquishes = thread_ptr -> tx_thread_performance_relinquish_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of timeouts for this thread. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = thread_ptr -> tx_thread_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve number of wait aborts for this thread. */
|
||||
if (wait_aborts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*wait_aborts = thread_ptr -> tx_thread_performance_wait_abort_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve the pointer of the last thread that preempted this thread. */
|
||||
if (last_preempted_by != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*last_preempted_by = thread_ptr -> tx_thread_performance_last_preempting_thread;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -123,86 +123,86 @@ TX_INTERRUPT_SAVE_AREA
|
||||
/* Retrieve total number of thread resumptions. */
|
||||
if (resumptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*resumptions = _tx_thread_performance_resume_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread suspensions. */
|
||||
if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*suspensions = _tx_thread_performance_suspend_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of solicited thread preemptions. */
|
||||
if (solicited_preemptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*solicited_preemptions = _tx_thread_performance_solicited_preemption_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of interrupt thread preemptions. */
|
||||
if (interrupt_preemptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*interrupt_preemptions = _tx_thread_performance_interrupt_preemption_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread priority inversions. */
|
||||
if (priority_inversions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*priority_inversions = _tx_thread_performance_priority_inversion_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread time-slices. */
|
||||
if (time_slices != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*time_slices = _tx_thread_performance_time_slice_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread relinquishes. */
|
||||
if (relinquishes != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*relinquishes = _tx_thread_performance_relinquish_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread timeouts. */
|
||||
if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*timeouts = _tx_thread_performance_timeout_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread wait aborts. */
|
||||
if (wait_aborts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*wait_aborts = _tx_thread_performance_wait_abort_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread non-idle system returns. */
|
||||
if (non_idle_returns != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*non_idle_returns = _tx_thread_performance_non_idle_return_count;
|
||||
}
|
||||
|
||||
|
||||
/* Retrieve total number of thread idle system returns. */
|
||||
if (idle_returns != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
*idle_returns = _tx_thread_performance_idle_return_count;
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Return completion status. */
|
||||
return(TX_SUCCESS);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
UINT status;
|
||||
@@ -211,73 +211,73 @@ UINT status;
|
||||
/* Access input arguments just for the sake of lint, MISRA, etc. */
|
||||
if (resumptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (suspensions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (solicited_preemptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (interrupt_preemptions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (priority_inversions != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (time_slices != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (relinquishes != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (timeouts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (wait_aborts != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (non_idle_returns != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else if (idle_returns != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Not enabled, return error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
@@ -93,16 +93,16 @@ UINT status;
|
||||
|
||||
#ifdef TX_DISABLE_PREEMPTION_THRESHOLD
|
||||
|
||||
/* Only allow 0 (disable all preemption) and returning preemption-threshold to the
|
||||
/* Only allow 0 (disable all preemption) and returning preemption-threshold to the
|
||||
current thread priority if preemption-threshold is disabled. All other threshold
|
||||
values are converted to 0. */
|
||||
if (thread_ptr -> tx_thread_user_priority != new_threshold)
|
||||
{
|
||||
|
||||
|
||||
/* Is the new threshold zero? */
|
||||
if (new_threshold != ((UINT) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Convert the new threshold to disable all preemption, since preemption-threshold is
|
||||
not supported. */
|
||||
new_threshold = ((UINT) 0);
|
||||
@@ -122,7 +122,7 @@ UINT status;
|
||||
/* Determine if the new threshold is greater than the current user priority. */
|
||||
if (new_threshold > thread_ptr -> tx_thread_user_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Return error. */
|
||||
status = TX_THRESH_ERROR;
|
||||
}
|
||||
@@ -174,13 +174,13 @@ UINT status;
|
||||
/* Determine if the new threshold represents a higher priority than the priority inheritance threshold. */
|
||||
if (new_threshold < thread_ptr -> tx_thread_inherit_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Update the actual preemption-threshold with the new threshold. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_threshold;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Update the actual preemption-threshold with the priority inheritance. */
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
|
||||
}
|
||||
@@ -240,18 +240,18 @@ UINT status;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -265,7 +265,7 @@ UINT status;
|
||||
|
||||
/* Check for preemption. */
|
||||
_tx_thread_system_preempt_check();
|
||||
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
}
|
||||
@@ -275,7 +275,7 @@ UINT status;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
}
|
||||
|
||||
@@ -116,19 +116,19 @@ UINT original_priority;
|
||||
block. */
|
||||
thread_ptr -> tx_thread_user_priority = new_priority;
|
||||
thread_ptr -> tx_thread_user_preempt_threshold = new_priority;
|
||||
|
||||
|
||||
/* Determine if the actual thread priority should be setup, which is the
|
||||
case if the new priority is higher than the priority inheritance. */
|
||||
if (new_priority < thread_ptr -> tx_thread_inherit_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Change thread priority to the new user's priority. */
|
||||
thread_ptr -> tx_thread_priority = new_priority;
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_priority;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Change thread priority to the priority inheritance. */
|
||||
thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
|
||||
@@ -157,7 +157,7 @@ UINT original_priority;
|
||||
/* Call actual non-interruptable thread suspension routine. */
|
||||
_tx_thread_system_ni_suspend(thread_ptr, ((ULONG) 0));
|
||||
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
protection against all preemption. */
|
||||
|
||||
/* Setup the new priority for this thread. */
|
||||
@@ -168,14 +168,14 @@ UINT original_priority;
|
||||
case if the new priority is higher than the priority inheritance. */
|
||||
if (new_priority < thread_ptr -> tx_thread_inherit_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Change thread priority to the new user's priority. */
|
||||
thread_ptr -> tx_thread_priority = new_priority;
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_priority;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Change thread priority to the priority inheritance. */
|
||||
thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
|
||||
@@ -186,7 +186,7 @@ UINT original_priority;
|
||||
|
||||
#else
|
||||
|
||||
/* Increment the preempt disable flag by 2 to prevent system suspend from
|
||||
/* Increment the preempt disable flag by 2 to prevent system suspend from
|
||||
returning to the system. */
|
||||
_tx_thread_preempt_disable = _tx_thread_preempt_disable + ((UINT) 3);
|
||||
|
||||
@@ -199,11 +199,11 @@ UINT original_priority;
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* The thread is ready and must first be removed from the list. Call the
|
||||
/* The thread is ready and must first be removed from the list. Call the
|
||||
system suspend function to accomplish this. */
|
||||
_tx_thread_system_suspend(thread_ptr);
|
||||
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
/* At this point, the preempt disable flag is still set, so we still have
|
||||
protection against all preemption. */
|
||||
|
||||
/* Setup the new priority for this thread. */
|
||||
@@ -214,14 +214,14 @@ UINT original_priority;
|
||||
case if the new priority is higher than the priority inheritance. */
|
||||
if (new_priority < thread_ptr -> tx_thread_inherit_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Change thread priority to the new user's priority. */
|
||||
thread_ptr -> tx_thread_priority = new_priority;
|
||||
thread_ptr -> tx_thread_preempt_threshold = new_priority;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Change thread priority to the priority inheritance. */
|
||||
thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
|
||||
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
|
||||
@@ -236,26 +236,26 @@ UINT original_priority;
|
||||
|
||||
/* Decrement the preempt disable flag. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
|
||||
/* Pickup the next thread to execute. */
|
||||
next_execute_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
/* Determine if this thread is not the next thread to execute. */
|
||||
if (thread_ptr != next_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Make sure the thread is still ready. */
|
||||
if (thread_ptr -> tx_thread_state == TX_READY)
|
||||
{
|
||||
|
||||
|
||||
/* Now check and see if this thread has an equal or higher priority. */
|
||||
if (thread_ptr -> tx_thread_priority <= next_execute_ptr -> tx_thread_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Now determine if this thread was the previously executing thread. */
|
||||
if (thread_ptr == execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, this thread was previously executing before we temporarily suspended and resumed
|
||||
it in order to change the priority. A lower or same priority thread cannot be the next thread
|
||||
to execute in this case since this thread really didn't suspend. Simply reset the execute
|
||||
@@ -265,7 +265,7 @@ UINT original_priority;
|
||||
/* Determine if we moved to a lower priority. If so, move the thread to the front of its priority list. */
|
||||
if (original_priority < new_priority)
|
||||
{
|
||||
|
||||
|
||||
/* Ensure that this thread is placed at the front of the priority list. */
|
||||
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr;
|
||||
}
|
||||
@@ -273,7 +273,7 @@ UINT original_priority;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ TX_THREAD *thread_ptr;
|
||||
/* Yes, there is another thread at this priority, make it the highest at
|
||||
this priority level. */
|
||||
_tx_thread_priority_list[priority] = thread_ptr -> tx_thread_ready_next;
|
||||
|
||||
|
||||
/* Mark the new thread as the one to execute. */
|
||||
_tx_thread_execute_ptr = thread_ptr -> tx_thread_ready_next;
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ UINT status;
|
||||
TX_MEMSET(thread_ptr -> tx_thread_stack_start, ((UCHAR) TX_STACK_FILL), thread_ptr -> tx_thread_stack_size);
|
||||
#endif
|
||||
|
||||
/* Call the target specific stack frame building routine to build the
|
||||
/* Call the target specific stack frame building routine to build the
|
||||
thread's initial stack and to setup the actual stack pointer in the
|
||||
control block. */
|
||||
_tx_thread_stack_build(thread_ptr, _tx_thread_shell_entry);
|
||||
|
||||
@@ -128,22 +128,22 @@ UINT map_index;
|
||||
/* Yes, this resume call was made from initialization. */
|
||||
|
||||
/* Pickup the current thread execute pointer, which corresponds to the
|
||||
highest priority thread ready to execute. Interrupt lockout is
|
||||
not required, since interrupts are assumed to be disabled during
|
||||
highest priority thread ready to execute. Interrupt lockout is
|
||||
not required, since interrupts are assumed to be disabled during
|
||||
initialization. */
|
||||
saved_thread_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
/* Determine if there is thread ready for execution. */
|
||||
if (saved_thread_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, a thread is ready for execution when initialization completes. */
|
||||
|
||||
/* Save the current preemption-threshold. */
|
||||
saved_threshold = saved_thread_ptr -> tx_thread_preempt_threshold;
|
||||
|
||||
/* For initialization, temporarily set the preemption-threshold to the
|
||||
priority level to make sure the highest-priority thread runs once
|
||||
/* For initialization, temporarily set the preemption-threshold to the
|
||||
priority level to make sure the highest-priority thread runs once
|
||||
initialization is complete. */
|
||||
saved_thread_ptr -> tx_thread_preempt_threshold = saved_thread_ptr -> tx_thread_priority;
|
||||
}
|
||||
@@ -184,7 +184,7 @@ UINT map_index;
|
||||
can only happen if this routine is called from initialization. */
|
||||
saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
|
||||
}
|
||||
|
||||
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
|
||||
/* Disable interrupts. */
|
||||
@@ -197,11 +197,11 @@ UINT map_index;
|
||||
/* Return successful completion. */
|
||||
return(TX_SUCCESS);
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/* In-line thread resumption processing follows, which is effectively just taking the
|
||||
/* In-line thread resumption processing follows, which is effectively just taking the
|
||||
logic in tx_thread_system_resume.c and placing it here! */
|
||||
|
||||
/* Resume the thread! */
|
||||
@@ -211,14 +211,14 @@ UINT map_index;
|
||||
/* If trace is enabled, save the current event pointer. */
|
||||
entry_ptr = _tx_trace_buffer_current_ptr;
|
||||
#endif
|
||||
|
||||
|
||||
/* Log the thread status change. */
|
||||
TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
|
||||
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Save the time stamp for later comparison to verify that
|
||||
the event hasn't been overwritten by the time we have
|
||||
the event hasn't been overwritten by the time we have
|
||||
computed the next thread to execute. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
@@ -288,7 +288,7 @@ UINT map_index;
|
||||
/* Pickup the execute pointer. Since it is going to be referenced multiple
|
||||
times, it is placed in a local variable. */
|
||||
execute_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
|
||||
/* Determine if no thread is currently executing. */
|
||||
if (execute_ptr == TX_NULL)
|
||||
{
|
||||
@@ -300,7 +300,7 @@ UINT map_index;
|
||||
{
|
||||
|
||||
/* Another thread has been scheduled for execution. */
|
||||
|
||||
|
||||
/* Check to see if this is a higher priority thread and determine if preemption is allowed. */
|
||||
if (priority < execute_ptr -> tx_thread_preempt_threshold)
|
||||
{
|
||||
@@ -341,7 +341,7 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
|
||||
{
|
||||
|
||||
@@ -378,18 +378,18 @@ UINT map_index;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -402,11 +402,11 @@ UINT map_index;
|
||||
resume event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
|
||||
be used by the trace analysis tool to show idle system conditions. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
@@ -453,7 +453,7 @@ UINT map_index;
|
||||
{
|
||||
|
||||
/* No, there are other threads at this priority already ready. */
|
||||
|
||||
|
||||
/* Just add this thread to the priority list. */
|
||||
tail_ptr = head_ptr -> tx_thread_ready_previous;
|
||||
tail_ptr -> tx_thread_ready_next = thread_ptr;
|
||||
@@ -469,18 +469,18 @@ UINT map_index;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -517,7 +517,7 @@ UINT map_index;
|
||||
can only happen if this routine is called from initialization. */
|
||||
saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
|
||||
}
|
||||
|
||||
|
||||
/* Setup successful return status. */
|
||||
status = TX_SUCCESS;
|
||||
#endif
|
||||
|
||||
@@ -119,7 +119,7 @@ VOID (*entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type);
|
||||
if (_tx_thread_mutex_release != TX_NULL)
|
||||
{
|
||||
|
||||
/* Yes, call the mutex release function via a function pointer that
|
||||
/* Yes, call the mutex release function via a function pointer that
|
||||
is setup during mutex initialization. */
|
||||
(_tx_thread_mutex_release)(thread_ptr);
|
||||
}
|
||||
|
||||
@@ -94,18 +94,18 @@ TX_THREAD *thread_ptr;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Illegal caller of this service. */
|
||||
status = TX_CALLER_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/* Is the caller an ISR or Initialization? */
|
||||
else if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
|
||||
{
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Illegal caller of this service. */
|
||||
status = TX_CALLER_ERROR;
|
||||
}
|
||||
@@ -118,7 +118,7 @@ TX_THREAD *thread_ptr;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Illegal caller of this service. */
|
||||
status = TX_CALLER_ERROR;
|
||||
}
|
||||
@@ -130,7 +130,7 @@ TX_THREAD *thread_ptr;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Just return with a successful status. */
|
||||
status = TX_SUCCESS;
|
||||
}
|
||||
@@ -143,13 +143,13 @@ TX_THREAD *thread_ptr;
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
/* Suspension is not allowed if the preempt disable flag is non-zero at this point - return error completion. */
|
||||
status = TX_CALLER_ERROR;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* If trace is enabled, insert this event into the trace buffer. */
|
||||
TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SLEEP, TX_ULONG_TO_POINTER_CONVERT(timer_ticks), thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&status), 0, TX_TRACE_THREAD_EVENTS)
|
||||
|
||||
@@ -193,7 +193,7 @@ TX_THREAD *thread_ptr;
|
||||
status = thread_ptr -> tx_thread_suspend_status;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ ULONG size;
|
||||
|
||||
/* Pickup the current stack variables. */
|
||||
stack_lowest = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_stack_start);
|
||||
|
||||
|
||||
/* Determine if the pointer is null. */
|
||||
if (stack_lowest != TX_NULL)
|
||||
{
|
||||
@@ -105,11 +105,11 @@ ULONG size;
|
||||
/* Determine if the pointer is null. */
|
||||
if (stack_highest != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* We need to binary search the remaining stack for missing 0xEFEFEFEF 32-bit data pattern.
|
||||
/* We need to binary search the remaining stack for missing 0xEFEFEFEF 32-bit data pattern.
|
||||
This is a best effort algorithm to find the highest stack usage. */
|
||||
do
|
||||
{
|
||||
@@ -137,7 +137,7 @@ ULONG size;
|
||||
/* Position to first used word - at this point we are within a few words. */
|
||||
while (*stack_ptr == TX_STACK_FILL)
|
||||
{
|
||||
|
||||
|
||||
/* Position to next word in stack. */
|
||||
stack_ptr = TX_ULONG_POINTER_ADD(stack_ptr, 1);
|
||||
}
|
||||
@@ -153,19 +153,19 @@ ULONG size;
|
||||
{
|
||||
|
||||
/* Yes, thread is still created. */
|
||||
|
||||
|
||||
/* Now check the new highest stack pointer is past the stack start. */
|
||||
if (stack_ptr > (TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_stack_start)))
|
||||
{
|
||||
|
||||
|
||||
/* Yes, now check that the new highest stack pointer is less than the previous highest stack pointer. */
|
||||
if (stack_ptr < (TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_stack_highest_ptr)))
|
||||
{
|
||||
|
||||
|
||||
/* Yes, is the current highest stack pointer pointing at used memory? */
|
||||
if (*stack_ptr != TX_STACK_FILL)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, setup the highest stack usage. */
|
||||
thread_ptr -> tx_thread_stack_highest_ptr = stack_ptr;
|
||||
}
|
||||
|
||||
@@ -26,8 +26,7 @@
|
||||
/* Include necessary system files. */
|
||||
|
||||
#include "tx_api.h"
|
||||
#ifndef TX_PORT_THREAD_STACK_ERROR_HANDLER
|
||||
#if defined(TX_MISRA_ENABLE) || defined(TX_ENABLE_STACK_CHECKING)
|
||||
#if defined(TX_MISRA_ENABLE) || defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
|
||||
#include "tx_thread.h"
|
||||
|
||||
|
||||
@@ -36,7 +35,7 @@
|
||||
/* FUNCTION RELEASE */
|
||||
/* */
|
||||
/* _tx_thread_stack_error_handler PORTABLE C */
|
||||
/* 6.1.7 */
|
||||
/* 6.1.9 */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
/* William E. Lamie, Microsoft Corporation */
|
||||
@@ -78,6 +77,9 @@
|
||||
/* conditional compilation */
|
||||
/* for ARMv8-M (Cortex M23/33) */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* 10-15-2021 Yuxin Zhou Modified comment(s), improved */
|
||||
/* stack check error handling, */
|
||||
/* resulting in version 6.1.9 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr)
|
||||
@@ -85,7 +87,7 @@ VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr)
|
||||
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
#ifdef TX_ENABLE_STACK_CHECKING
|
||||
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
@@ -109,12 +111,10 @@ TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* TX_MISRA_ENABLE */
|
||||
|
||||
#endif /* TX_PORT_THREAD_STACK_ERROR_HANDLER */
|
||||
#endif
|
||||
|
||||
@@ -26,9 +26,8 @@
|
||||
/* Include necessary system files. */
|
||||
|
||||
#include "tx_api.h"
|
||||
#ifndef TX_PORT_THREAD_STACK_ERROR_NOTIFY
|
||||
#include "tx_thread.h"
|
||||
#ifdef TX_ENABLE_STACK_CHECKING
|
||||
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
|
||||
#include "tx_trace.h"
|
||||
#endif
|
||||
|
||||
@@ -38,7 +37,7 @@
|
||||
/* FUNCTION RELEASE */
|
||||
/* */
|
||||
/* _tx_thread_stack_error_notify PORTABLE C */
|
||||
/* 6.1.7 */
|
||||
/* 6.1.9 */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
/* William E. Lamie, Microsoft Corporation */
|
||||
@@ -79,12 +78,15 @@
|
||||
/* conditional compilation */
|
||||
/* for ARMv8-M (Cortex M23/33) */
|
||||
/* resulting in version 6.1.7 */
|
||||
/* 10-15-2021 Yuxin Zhou Modified comment(s), improved */
|
||||
/* stack check error handling, */
|
||||
/* resulting in version 6.1.9 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
UINT _tx_thread_stack_error_notify(VOID (*stack_error_handler)(TX_THREAD *thread_ptr))
|
||||
{
|
||||
|
||||
#ifndef TX_ENABLE_STACK_CHECKING
|
||||
#if !defined(TX_ENABLE_STACK_CHECKING) && !defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
|
||||
|
||||
UINT status;
|
||||
|
||||
@@ -98,13 +100,14 @@ UINT status;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Stack checking is not enabled, just return an error. */
|
||||
status = TX_FEATURE_NOT_ENABLED;
|
||||
}
|
||||
|
||||
/* Return completion status. */
|
||||
return(status);
|
||||
|
||||
#else
|
||||
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
@@ -129,5 +132,3 @@ TX_INTERRUPT_SAVE_AREA
|
||||
return(TX_SUCCESS);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* TX_PORT_THREAD_STACK_ERROR_NOTIFY */
|
||||
|
||||
@@ -109,7 +109,7 @@ UINT status;
|
||||
/* Determine if we are in a thread context. */
|
||||
if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Yes, we are in a thread context. */
|
||||
|
||||
/* Determine if the current thread is also the suspending thread. */
|
||||
@@ -120,13 +120,13 @@ UINT status;
|
||||
if (_tx_thread_preempt_disable != ((UINT) 0))
|
||||
{
|
||||
|
||||
/* Current thread cannot suspend when the preempt disable flag is non-zero,
|
||||
/* Current thread cannot suspend when the preempt disable flag is non-zero,
|
||||
return an error. */
|
||||
status = TX_SUSPEND_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the status is still successful. */
|
||||
if (status == TX_SUCCESS)
|
||||
{
|
||||
@@ -163,7 +163,7 @@ UINT status;
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
|
||||
/* Return success. */
|
||||
status = TX_SUCCESS;
|
||||
#else
|
||||
@@ -204,15 +204,15 @@ UINT status;
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Always return success, since this function does not perform error
|
||||
/* Always return success, since this function does not perform error
|
||||
checking. */
|
||||
return(status);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/* In-line thread suspension processing follows, which is effectively just taking the
|
||||
/* In-line thread suspension processing follows, which is effectively just taking the
|
||||
logic in tx_thread_system_suspend.c and placing it here! */
|
||||
|
||||
|
||||
UINT priority;
|
||||
UINT base_priority;
|
||||
ULONG priority_map;
|
||||
@@ -270,7 +270,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Determine if we are in a thread context. */
|
||||
if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* Yes, we are in a thread context. */
|
||||
|
||||
/* Determine if the current thread is also the suspending thread. */
|
||||
@@ -281,19 +281,19 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
if (_tx_thread_preempt_disable != ((UINT) 0))
|
||||
{
|
||||
|
||||
/* Current thread cannot suspend when the preempt disable flag is non-zero,
|
||||
/* Current thread cannot suspend when the preempt disable flag is non-zero,
|
||||
return an error. */
|
||||
status = TX_SUSPEND_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the status is still successful. */
|
||||
if (status == TX_SUCCESS)
|
||||
{
|
||||
|
||||
#ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
|
||||
|
||||
|
||||
/* Increment the thread's suspend count. */
|
||||
thread_ptr -> tx_thread_performance_suspend_count++;
|
||||
|
||||
@@ -322,7 +322,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Save the time stamp for later comparison to verify that
|
||||
the event hasn't been overwritten by the time we have
|
||||
the event hasn't been overwritten by the time we have
|
||||
computed the next thread to execute. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
@@ -391,7 +391,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
else
|
||||
{
|
||||
|
||||
/* This is the only thread at this priority ready to run. Set the head
|
||||
/* This is the only thread at this priority ready to run. Set the head
|
||||
pointer to NULL. */
|
||||
_tx_thread_priority_list[priority] = TX_NULL;
|
||||
|
||||
@@ -483,13 +483,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
|
||||
}
|
||||
@@ -521,7 +521,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Calculate the lowest bit set in the priority map. */
|
||||
TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
|
||||
|
||||
@@ -529,7 +529,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
_tx_thread_highest_priority = base_priority + priority_bit;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if this thread is the thread designated to execute. */
|
||||
if (thread_ptr == _tx_thread_execute_ptr)
|
||||
{
|
||||
@@ -599,7 +599,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
|
||||
{
|
||||
|
||||
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
|
||||
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
|
||||
preemption-threshold. */
|
||||
_tx_thread_execute_ptr = _tx_thread_priority_list[priority];
|
||||
|
||||
@@ -614,9 +614,9 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
@@ -636,7 +636,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Determine if there are any other bits set in this preempt map. */
|
||||
if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* No, clear the active bit to signify this preempt map has nothing set. */
|
||||
TX_DIV32_BIT_SET(priority, priority_bit)
|
||||
_tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
|
||||
@@ -653,18 +653,18 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -677,13 +677,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
|
||||
}
|
||||
@@ -719,18 +719,18 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -747,9 +747,9 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
@@ -798,7 +798,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
_tx_thread_performance_non_idle_return_count++;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Preemption is needed - return to the system! */
|
||||
_tx_thread_system_return();
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ TX_THREAD *thread_ptr;
|
||||
/* Determine if we are in a system state (ISR or Initialization) or internal preemption is disabled. */
|
||||
if (combined_flags == ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* No, at thread execution level so continue checking for preemption. */
|
||||
|
||||
/* Pickup thread pointer. */
|
||||
|
||||
@@ -140,7 +140,7 @@ UINT map_index;
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Save the time stamp for later comparison to verify that
|
||||
the event hasn't been overwritten by the time we have
|
||||
the event hasn't been overwritten by the time we have
|
||||
computed the next thread to execute. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
@@ -158,7 +158,7 @@ UINT map_index;
|
||||
if (thread_ptr -> tx_thread_suspending == TX_FALSE)
|
||||
{
|
||||
|
||||
/* Thread is not in the process of suspending. Now check to make sure the thread
|
||||
/* Thread is not in the process of suspending. Now check to make sure the thread
|
||||
has not already been resumed. */
|
||||
if (thread_ptr -> tx_thread_state != TX_READY)
|
||||
{
|
||||
@@ -166,9 +166,9 @@ UINT map_index;
|
||||
/* No, now check to see if the delayed suspension flag is set. */
|
||||
if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
|
||||
{
|
||||
|
||||
|
||||
/* Resume the thread! */
|
||||
|
||||
|
||||
/* Make this thread ready. */
|
||||
|
||||
/* Change the state to ready. */
|
||||
@@ -229,7 +229,7 @@ UINT map_index;
|
||||
/* Pickup the execute pointer. Since it is going to be referenced multiple
|
||||
times, it is placed in a local variable. */
|
||||
execute_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
|
||||
/* Determine if no thread is currently executing. */
|
||||
if (execute_ptr == TX_NULL)
|
||||
{
|
||||
@@ -239,9 +239,9 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Another thread has been scheduled for execution. */
|
||||
|
||||
|
||||
/* Check to see if this is a higher priority thread and determine if preemption is allowed. */
|
||||
if (priority < execute_ptr -> tx_thread_preempt_threshold)
|
||||
{
|
||||
@@ -282,7 +282,7 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
|
||||
{
|
||||
|
||||
@@ -301,7 +301,7 @@ UINT map_index;
|
||||
|
||||
/* Yes, modify the execute thread pointer. */
|
||||
_tx_thread_execute_ptr = thread_ptr;
|
||||
|
||||
|
||||
#ifndef TX_MISRA_ENABLE
|
||||
|
||||
/* If MISRA is not-enabled, insert a preemption and return in-line for performance. */
|
||||
@@ -311,18 +311,18 @@ UINT map_index;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -335,11 +335,11 @@ UINT map_index;
|
||||
resume event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
|
||||
be used by the trace analysis tool to show idle system conditions. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
@@ -408,15 +408,15 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* A resumption occurred in the middle of a previous thread suspension. */
|
||||
|
||||
|
||||
/* Make sure the type of suspension under way is not a terminate or
|
||||
thread completion. In either of these cases, do not void the
|
||||
thread completion. In either of these cases, do not void the
|
||||
interrupted suspension processing. */
|
||||
if (thread_ptr -> tx_thread_state != TX_COMPLETED)
|
||||
{
|
||||
|
||||
|
||||
/* Make sure the thread isn't terminated. */
|
||||
if (thread_ptr -> tx_thread_state != TX_TERMINATED)
|
||||
{
|
||||
@@ -439,7 +439,7 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Clear the delayed suspend flag and change the state. */
|
||||
thread_ptr -> tx_thread_delayed_suspend = TX_FALSE;
|
||||
thread_ptr -> tx_thread_state = TX_SUSPENDED;
|
||||
@@ -462,18 +462,18 @@ UINT map_index;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -486,11 +486,11 @@ UINT map_index;
|
||||
resume event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
|
||||
be used by the trace analysis tool to show idle system conditions. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
@@ -564,7 +564,7 @@ UINT state;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* A resumption occurred in the middle of a previous thread suspension. */
|
||||
|
||||
/* Pickup the current thread state. */
|
||||
@@ -575,20 +575,20 @@ UINT state;
|
||||
/* Move the state into a different variable for MISRA compliance. */
|
||||
temp_state = state;
|
||||
#endif
|
||||
|
||||
|
||||
/* Log the thread status change. */
|
||||
TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, ((ULONG) state), TX_POINTER_TO_ULONG_CONVERT(&temp_state), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
|
||||
|
||||
|
||||
/* Make sure the type of suspension under way is not a terminate or
|
||||
thread completion. In either of these cases, do not void the
|
||||
thread completion. In either of these cases, do not void the
|
||||
interrupted suspension processing. */
|
||||
if (state != TX_COMPLETED)
|
||||
{
|
||||
|
||||
|
||||
/* Check for terminated thread. */
|
||||
if (state != TX_TERMINATED)
|
||||
{
|
||||
|
||||
|
||||
/* Clear the suspending flag. */
|
||||
thread_ptr -> tx_thread_suspending = TX_FALSE;
|
||||
|
||||
@@ -653,7 +653,7 @@ UINT map_index;
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Save the time stamp for later comparison to verify that
|
||||
the event hasn't been overwritten by the time we have
|
||||
the event hasn't been overwritten by the time we have
|
||||
computed the next thread to execute. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
@@ -681,7 +681,7 @@ UINT map_index;
|
||||
TX_THREAD_STACK_CHECK(thread_ptr)
|
||||
#endif
|
||||
|
||||
/* Thread is not in the process of suspending. Now check to make sure the thread
|
||||
/* Thread is not in the process of suspending. Now check to make sure the thread
|
||||
has not already been resumed. */
|
||||
if (thread_ptr -> tx_thread_state != TX_READY)
|
||||
{
|
||||
@@ -752,7 +752,7 @@ UINT map_index;
|
||||
/* Pickup the execute pointer. Since it is going to be referenced multiple
|
||||
times, it is placed in a local variable. */
|
||||
execute_ptr = _tx_thread_execute_ptr;
|
||||
|
||||
|
||||
/* Determine if no thread is currently executing. */
|
||||
if (execute_ptr == TX_NULL)
|
||||
{
|
||||
@@ -762,7 +762,7 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Check to see if this is a higher priority thread and determine if preemption is allowed. */
|
||||
if (priority < execute_ptr -> tx_thread_preempt_threshold)
|
||||
{
|
||||
@@ -803,7 +803,7 @@ UINT map_index;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
|
||||
{
|
||||
|
||||
@@ -831,18 +831,18 @@ UINT map_index;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -855,11 +855,11 @@ UINT map_index;
|
||||
resume event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
|
||||
be used by the trace analysis tool to show idle system conditions. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
@@ -903,7 +903,7 @@ UINT map_index;
|
||||
{
|
||||
|
||||
/* No, there are other threads at this priority already ready. */
|
||||
|
||||
|
||||
/* Just add this thread to the priority list. */
|
||||
tail_ptr = head_ptr -> tx_thread_ready_previous;
|
||||
tail_ptr -> tx_thread_ready_next = thread_ptr;
|
||||
@@ -928,18 +928,18 @@ UINT map_index;
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -952,11 +952,11 @@ UINT map_index;
|
||||
resume event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Does the timestamp match? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
|
||||
be used by the trace analysis tool to show idle system conditions. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
|
||||
@@ -85,7 +85,7 @@ VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr)
|
||||
{
|
||||
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
|
||||
UINT priority;
|
||||
UINT base_priority;
|
||||
ULONG priority_map;
|
||||
@@ -136,7 +136,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Make sure the suspension is not a wait-forever. */
|
||||
if (timeout != TX_WAIT_FOREVER)
|
||||
{
|
||||
|
||||
|
||||
/* Activate the thread timer with the timeout value setup in the caller. */
|
||||
_tx_timer_system_activate(&(thread_ptr -> tx_thread_timer));
|
||||
}
|
||||
@@ -146,7 +146,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
_tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Decrease the preempt disabled count. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
@@ -182,7 +182,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Save the time stamp for later comparison to verify that
|
||||
the event hasn't been overwritten by the time we have
|
||||
the event hasn't been overwritten by the time we have
|
||||
computed the next thread to execute. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
@@ -256,7 +256,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
else
|
||||
{
|
||||
|
||||
/* This is the only thread at this priority ready to run. Set the head
|
||||
/* This is the only thread at this priority ready to run. Set the head
|
||||
pointer to NULL. */
|
||||
_tx_thread_priority_list[priority] = TX_NULL;
|
||||
|
||||
@@ -348,13 +348,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
|
||||
}
|
||||
@@ -386,9 +386,9 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Other threads at different priority levels are ready to run. */
|
||||
|
||||
|
||||
/* Calculate the lowest bit set in the priority map. */
|
||||
TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
|
||||
|
||||
@@ -463,7 +463,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
|
||||
{
|
||||
|
||||
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
|
||||
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
|
||||
preemption-threshold. */
|
||||
_tx_thread_execute_ptr = _tx_thread_priority_list[priority];
|
||||
|
||||
@@ -476,7 +476,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Determine if there are any other bits set in this preempt map. */
|
||||
if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* No, clear the active bit to signify this preempt map has nothing set. */
|
||||
TX_DIV32_BIT_SET(priority, priority_bit)
|
||||
_tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
|
||||
@@ -493,18 +493,18 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -517,13 +517,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
}
|
||||
@@ -559,18 +559,18 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -583,13 +583,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
@@ -655,14 +655,14 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
TX_INTERRUPT_SAVE_AREA
|
||||
|
||||
ULONG wait_option;
|
||||
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
/* Determine if the thread is still suspending. */
|
||||
if (thread_ptr -> tx_thread_suspending == TX_TRUE)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, prepare to call the non-interruptable system suspend function. */
|
||||
|
||||
/* Clear the thread suspending flag. */
|
||||
@@ -670,7 +670,7 @@ ULONG wait_option;
|
||||
|
||||
/* Pickup the wait option. */
|
||||
wait_option = thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks;
|
||||
|
||||
|
||||
/* Decrement the preempt disable count. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
@@ -690,7 +690,7 @@ ULONG wait_option;
|
||||
|
||||
VOID _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG wait_option)
|
||||
{
|
||||
|
||||
|
||||
UINT priority;
|
||||
UINT base_priority;
|
||||
ULONG priority_map;
|
||||
@@ -719,7 +719,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Determine if a timeout needs to be activated. */
|
||||
if (thread_ptr == current_thread)
|
||||
{
|
||||
|
||||
|
||||
/* Is there a wait option? */
|
||||
if (wait_option != TX_NO_WAIT)
|
||||
{
|
||||
@@ -727,7 +727,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Make sure it is not a wait-forever option. */
|
||||
if (wait_option != TX_WAIT_FOREVER)
|
||||
{
|
||||
|
||||
|
||||
/* Setup the wait option. */
|
||||
thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks = wait_option;
|
||||
|
||||
@@ -735,7 +735,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
_tx_timer_system_activate(&(thread_ptr -> tx_thread_timer));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Reset time slice for current thread. */
|
||||
_tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
|
||||
}
|
||||
@@ -774,7 +774,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
/* Save the time stamp for later comparison to verify that
|
||||
the event hasn't been overwritten by the time we have
|
||||
the event hasn't been overwritten by the time we have
|
||||
computed the next thread to execute. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
@@ -845,7 +845,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
else
|
||||
{
|
||||
|
||||
/* This is the only thread at this priority ready to run. Set the head
|
||||
/* This is the only thread at this priority ready to run. Set the head
|
||||
pointer to NULL. */
|
||||
_tx_thread_priority_list[priority] = TX_NULL;
|
||||
|
||||
@@ -937,13 +937,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
|
||||
}
|
||||
@@ -972,7 +972,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
|
||||
/* Calculate the lowest bit set in the priority map. */
|
||||
TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
|
||||
|
||||
@@ -980,7 +980,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
_tx_thread_highest_priority = base_priority + ((UINT) priority_bit);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Determine if the suspending thread is the thread designated to execute. */
|
||||
if (thread_ptr == _tx_thread_execute_ptr)
|
||||
{
|
||||
@@ -1038,7 +1038,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
|
||||
{
|
||||
|
||||
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
|
||||
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
|
||||
preemption-threshold. */
|
||||
_tx_thread_execute_ptr = _tx_thread_priority_list[priority];
|
||||
|
||||
@@ -1051,7 +1051,7 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Determine if there are any other bits set in this preempt map. */
|
||||
if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
|
||||
{
|
||||
|
||||
|
||||
/* No, clear the active bit to signify this preempt map has nothing set. */
|
||||
TX_DIV32_BIT_SET(priority, priority_bit)
|
||||
_tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
|
||||
@@ -1068,18 +1068,18 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -1092,13 +1092,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
}
|
||||
@@ -1131,18 +1131,18 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
/* Is the execute pointer different? */
|
||||
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
|
||||
{
|
||||
|
||||
|
||||
/* Move to next entry. */
|
||||
_tx_thread_performance__execute_log_index++;
|
||||
|
||||
|
||||
/* Check for wrap condition. */
|
||||
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
|
||||
{
|
||||
|
||||
|
||||
/* Set the index to the beginning. */
|
||||
_tx_thread_performance__execute_log_index = ((UINT) 0);
|
||||
}
|
||||
|
||||
|
||||
/* Log the new execute pointer. */
|
||||
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
|
||||
}
|
||||
@@ -1155,13 +1155,13 @@ ULONG time_stamp = ((ULONG) 0);
|
||||
suspend event. In that case, do nothing here. */
|
||||
if (entry_ptr != TX_NULL)
|
||||
{
|
||||
|
||||
|
||||
/* Is the timestamp the same? */
|
||||
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
|
||||
{
|
||||
|
||||
/* Timestamp is the same, set the "next thread pointer" to the new value of the
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
next thread to execute. This can be used by the trace analysis tool to keep
|
||||
track of next thread execution. */
|
||||
#ifdef TX_MISRA_ENABLE
|
||||
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
|
||||
|
||||
@@ -201,7 +201,7 @@ ULONG suspension_sequence;
|
||||
/* Thread state change. */
|
||||
TX_THREAD_STATE_CHANGE(thread_ptr, TX_TERMINATED)
|
||||
|
||||
/* Set the suspending flag. This prevents the thread from being
|
||||
/* Set the suspending flag. This prevents the thread from being
|
||||
resumed before the cleanup routine is executed. */
|
||||
thread_ptr -> tx_thread_suspending = TX_TRUE;
|
||||
|
||||
@@ -279,7 +279,7 @@ ULONG suspension_sequence;
|
||||
if (_tx_thread_mutex_release != TX_NULL)
|
||||
{
|
||||
|
||||
/* Yes, call the mutex release function via a function pointer that
|
||||
/* Yes, call the mutex release function via a function pointer that
|
||||
is setup during initialization. */
|
||||
(_tx_thread_mutex_release)(thread_ptr);
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ UINT preempt_disable;
|
||||
|
||||
/* Check this thread's stack. */
|
||||
TX_THREAD_STACK_CHECK(thread_ptr)
|
||||
|
||||
|
||||
/* Set the next thread pointer to NULL. */
|
||||
next_thread_ptr = TX_NULL;
|
||||
#endif
|
||||
@@ -130,15 +130,15 @@ UINT preempt_disable;
|
||||
/* Check to see if preemption-threshold is not being used. */
|
||||
if (thread_ptr -> tx_thread_priority == thread_ptr -> tx_thread_preempt_threshold)
|
||||
{
|
||||
|
||||
|
||||
/* Preemption-threshold is not being used by this thread. */
|
||||
|
||||
|
||||
/* There is another thread at this priority, make it the highest at
|
||||
this priority level. */
|
||||
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr -> tx_thread_ready_next;
|
||||
|
||||
/* Designate the highest priority thread as the one to execute. Don't use this
|
||||
thread's priority as an index just in case a higher priority thread is now
|
||||
|
||||
/* Designate the highest priority thread as the one to execute. Don't use this
|
||||
thread's priority as an index just in case a higher priority thread is now
|
||||
ready! */
|
||||
_tx_thread_execute_ptr = _tx_thread_priority_list[_tx_thread_highest_priority];
|
||||
|
||||
@@ -167,11 +167,11 @@ UINT preempt_disable;
|
||||
/* Pickup the volatile information. */
|
||||
system_state = TX_THREAD_GET_SYSTEM_STATE();
|
||||
preempt_disable = _tx_thread_preempt_disable;
|
||||
|
||||
|
||||
/* Insert this event into the trace buffer. */
|
||||
TX_TRACE_IN_LINE_INSERT(TX_TRACE_TIME_SLICE, _tx_thread_execute_ptr, system_state, preempt_disable, TX_POINTER_TO_ULONG_CONVERT(&thread_ptr), TX_TRACE_INTERNAL_EVENTS)
|
||||
#endif
|
||||
|
||||
|
||||
/* Restore previous interrupt posture. */
|
||||
TX_RESTORE
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ TX_THREAD *current_thread;
|
||||
/* Determine if this thread is the currently executing thread. */
|
||||
if (thread_ptr == current_thread)
|
||||
{
|
||||
|
||||
|
||||
/* Yes, update the time-slice countdown variable. */
|
||||
_tx_timer_time_slice = new_time_slice;
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ ULONG suspension_sequence;
|
||||
/* Increment the number of timeouts for this thread. */
|
||||
thread_ptr -> tx_thread_performance_timeout_count++;
|
||||
#endif
|
||||
|
||||
|
||||
/* Pickup the cleanup routine address. */
|
||||
suspend_cleanup = thread_ptr -> tx_thread_suspend_cleanup;
|
||||
|
||||
@@ -152,6 +152,7 @@ ULONG suspension_sequence;
|
||||
/* Call any cleanup routines. */
|
||||
if (suspend_cleanup != TX_NULL)
|
||||
{
|
||||
|
||||
/* Yes, there is a function to call. */
|
||||
(suspend_cleanup)(thread_ptr, suspension_sequence);
|
||||
}
|
||||
|
||||
@@ -95,14 +95,14 @@ ULONG suspension_sequence;
|
||||
/* Determine if the thread is currently suspended. */
|
||||
if (thread_ptr -> tx_thread_state < TX_SLEEP)
|
||||
{
|
||||
|
||||
/* Thread is either ready, completed, terminated, or in a pure
|
||||
|
||||
/* Thread is either ready, completed, terminated, or in a pure
|
||||
suspension condition. */
|
||||
|
||||
/* Restore interrupts. */
|
||||
TX_RESTORE
|
||||
|
||||
/* Just return with an error message to indicate that
|
||||
/* Just return with an error message to indicate that
|
||||
nothing was done. */
|
||||
status = TX_WAIT_ABORT_ERROR;
|
||||
}
|
||||
@@ -136,7 +136,7 @@ ULONG suspension_sequence;
|
||||
{
|
||||
|
||||
/* Process all other suspension timeouts. */
|
||||
|
||||
|
||||
/* Set the state to suspended. */
|
||||
thread_ptr -> tx_thread_state = TX_SUSPENDED;
|
||||
|
||||
@@ -217,7 +217,7 @@ ULONG suspension_sequence;
|
||||
|
||||
/* Disable interrupts. */
|
||||
TX_DISABLE
|
||||
|
||||
|
||||
/* Decrement the disable preemption flag. */
|
||||
_tx_thread_preempt_disable--;
|
||||
|
||||
@@ -225,7 +225,7 @@ ULONG suspension_sequence;
|
||||
TX_RESTORE
|
||||
#endif
|
||||
|
||||
/* Return with an error message to indicate that
|
||||
/* Return with an error message to indicate that
|
||||
nothing was done. */
|
||||
status = TX_WAIT_ABORT_ERROR;
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
/* FUNCTION RELEASE */
|
||||
/* */
|
||||
/* _tx_time_get PORTABLE C */
|
||||
/* 6.1 */
|
||||
/* 6.1.3 */
|
||||
/* AUTHOR */
|
||||
/* */
|
||||
/* William E. Lamie, Microsoft Corporation */
|
||||
@@ -68,6 +68,8 @@
|
||||
/* 05-19-2020 William E. Lamie Initial Version 6.0 */
|
||||
/* 09-30-2020 Yuxin Zhou Modified comment(s), */
|
||||
/* resulting in version 6.1 */
|
||||
/* 12-31-2020 Andres Mlinar Modified comment(s), */
|
||||
/* resulting in version 6.1.3 */
|
||||
/* */
|
||||
/**************************************************************************/
|
||||
ULONG _tx_time_get(VOID)
|
||||
|
||||
@@ -83,7 +83,7 @@ UINT status;
|
||||
TX_DISABLE
|
||||
|
||||
#ifdef TX_ENABLE_EVENT_TRACE
|
||||
|
||||
|
||||
/* If trace is enabled, insert this event into the trace buffer. */
|
||||
TX_TRACE_IN_LINE_INSERT(TX_TRACE_TIMER_ACTIVATE, timer_ptr, 0, 0, 0, TX_TRACE_TIMER_EVENTS)
|
||||
#endif
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user