diff --git a/common/inc/tx_api.h b/common/inc/tx_api.h
index d91bfc9f..40ef7ef5 100644
--- a/common/inc/tx_api.h
+++ b/common/inc/tx_api.h
@@ -26,7 +26,7 @@
/* APPLICATION INTERFACE DEFINITION RELEASE */
/* */
/* tx_api.h PORTABLE C */
-/* 6.1.9 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -78,6 +78,10 @@
/* 10-15-2021 Yuxin Zhou Modified comment(s), */
/* update patch number, */
/* resulting in version 6.1.9 */
+/* 01-31-2022 Scott Larson Modified comment(s), */
+/* add unused parameter macro, */
+/* update patch number, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -95,6 +99,10 @@ extern "C" {
#endif
+/* Disable warning of parameter not used. */
+#ifndef TX_PARAMETER_NOT_USED
+#define TX_PARAMETER_NOT_USED(p) ((void)(p))
+#endif /* TX_PARAMETER_NOT_USED */
/* Include the port-specific data type file. */
@@ -110,7 +118,7 @@ extern "C" {
#define AZURE_RTOS_THREADX
#define THREADX_MAJOR_VERSION 6
#define THREADX_MINOR_VERSION 1
-#define THREADX_PATCH_VERSION 9
+#define THREADX_PATCH_VERSION 10
/* Define the following symbol for backward compatibility */
#define EL_PRODUCT_THREADX
diff --git a/common_modules/inc/txm_module.h b/common_modules/inc/txm_module.h
index aa9d2f92..90a35138 100644
--- a/common_modules/inc/txm_module.h
+++ b/common_modules/inc/txm_module.h
@@ -26,7 +26,7 @@
/* APPLICATION INTERFACE DEFINITION RELEASE */
/* */
/* txm_module.h PORTABLE C */
-/* 6.1.3 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -44,6 +44,9 @@
/* 12-31-2020 Scott Larson Modified comment(s), added */
/* port-specific extension, */
/* resulting in version 6.1.3 */
+/* 01-31-2022 Scott Larson Modified comment(s), added */
+/* callback thread prototype, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -64,23 +67,23 @@
#ifdef TXM_MODULE_ENABLE_FILEX
#include "txm_module_filex.h"
-#endif
+#endif
#ifdef TXM_MODULE_ENABLE_GUIX
#include "txm_module_guix.h"
-#endif
+#endif
#ifdef TXM_MODULE_ENABLE_NETX
#include "txm_module_netx.h"
-#endif
+#endif
#ifdef TXM_MODULE_ENABLE_NETXDUO
#include "txm_module_netxduo.h"
-#endif
+#endif
#ifdef TXM_MODULE_ENABLE_USBX
#include "txm_module_usbx.h"
-#endif
+#endif
#ifdef FX_FILEX_PRESENT
@@ -157,7 +160,7 @@ extern "C" {
/* Define each module's callback queue depth. This is used to queue up incoming call back requests. */
#ifndef TXM_MODULE_CALLBACKS_QUEUE_DEPTH
-#define TXM_MODULE_CALLBACKS_QUEUE_DEPTH 8 /* Number queued callback requests. */
+#define TXM_MODULE_CALLBACKS_QUEUE_DEPTH 8 /* Number queued callback requests. */
#endif
@@ -367,7 +370,7 @@ typedef struct TXM_MODULE_PREAMBLE_STRUCT
ULONG txm_module_preamble_property_flags; /* Properties Bit Map */
ULONG txm_module_preamble_shell_entry_function; /* Module shell Entry Function */
ULONG txm_module_preamble_start_function; /* Module Thread Start Function */
- ULONG txm_module_preamble_stop_function; /* Module Thread Stop Function */
+ ULONG txm_module_preamble_stop_function; /* Module Thread Stop Function */
ULONG txm_module_preamble_start_stop_priority; /* Module Start/Stop Thread Priority */
ULONG txm_module_preamble_start_stop_stack_size; /* Module Start/Stop Thread Priority */
ULONG txm_module_preamble_callback_function; /* Module Callback Thread Function */
@@ -448,10 +451,10 @@ typedef struct TXM_MODULE_INSTANCE_STRUCT
VOID (*txm_module_instance_start_thread_entry)(ULONG);
VOID (*txm_module_instance_stop_thread_entry)(ULONG);
VOID (*txm_module_instance_callback_request_thread_entry)(ULONG);
-
+
/* Define the port extention to the module manager structure. */
TXM_MODULE_MANAGER_PORT_EXTENSION
-
+
TX_THREAD txm_module_instance_start_stop_thread;
TX_THREAD txm_module_instance_callback_request_thread;
TX_QUEUE txm_module_instance_callback_request_queue;
@@ -462,7 +465,7 @@ typedef struct TXM_MODULE_INSTANCE_STRUCT
ULONG txm_module_instance_callback_priority;
ULONG txm_module_instance_application_module_id;
UINT txm_module_instance_maximum_priority;
-
+
/* Define the head pointer of the list of objects allocated by the module. */
struct TXM_MODULE_ALLOCATED_OBJECT_STRUCT
*txm_module_instance_object_list_head;
@@ -470,11 +473,11 @@ typedef struct TXM_MODULE_INSTANCE_STRUCT
struct TXM_MODULE_INSTANCE_STRUCT
*txm_module_instance_loaded_next,
- *txm_module_instance_loaded_previous;
+ *txm_module_instance_loaded_previous;
} TXM_MODULE_INSTANCE;
-/* Determine if the thread entry info control block has an extension defined. If not, define the extension to
+/* Determine if the thread entry info control block has an extension defined. If not, define the extension to
whitespace. */
#ifndef TXM_MODULE_THREAD_ENTRY_INFO_USER_EXTENSION
@@ -482,9 +485,9 @@ typedef struct TXM_MODULE_INSTANCE_STRUCT
#endif
-/* Define the thread entry information structure. This structure is placed on the thread's stack such that the
+/* Define the thread entry information structure. This structure is placed on the thread's stack such that the
module's _txm_thread_shell_entry function does not need to access anything in the thread control block. */
-
+
typedef struct TXM_MODULE_THREAD_ENTRY_INFO_STRUCT
{
TX_THREAD *txm_module_thread_entry_info_thread;
@@ -516,7 +519,7 @@ typedef struct TXM_MODULE_ALLOCATED_OBJECT_STRUCT
} TXM_MODULE_ALLOCATED_OBJECT;
-/* Determine if module code is being compiled. If so, remap the ThreadX API to
+/* Determine if module code is being compiled. If so, remap the ThreadX API to
the module shell functions that will go through the module <-> module manager
interface. */
@@ -541,6 +544,7 @@ VOID _txm_module_thread_shell_entry(TX_THREAD *thread_ptr, TXM_MODULE_THREAD_EN
UINT _txm_module_thread_system_suspend(TX_THREAD *thread_ptr);
UINT _txm_module_application_request(ULONG request, ALIGN_TYPE param_1, ALIGN_TYPE param_2, ALIGN_TYPE param_3);
+VOID _txm_module_callback_request_thread_entry(ULONG id);
UINT _txm_module_object_allocate(VOID **object_ptr, ULONG object_size);
UINT _txm_module_object_deallocate(VOID *object_ptr);
UINT _txm_module_object_pointer_get(UINT object_type, CHAR *name, VOID **object_ptr);
@@ -574,7 +578,7 @@ VOID _txm_module_usbx_duo_callback_request(TXM_MODULE_CALLBACK_MESSAGE *callbac
/* Map the module manager APIs just in case this is being included from the module manager in the
resident portion of the application. */
-
+
#define txm_module_manager_initialize _txm_module_manager_initialize
#define txm_module_manager_absolute_load _txm_module_manager_absolute_load
#define txm_module_manager_in_place_load _txm_module_manager_in_place_load
@@ -615,7 +619,7 @@ UINT _txm_module_manager_file_load(TXM_MODULE_INSTANCE *module_instance, CHAR *
UINT _txm_module_manager_initialize(VOID *module_memory_start, ULONG module_memory_size);
UINT _txm_module_manager_absolute_load(TXM_MODULE_INSTANCE *module_instance, CHAR *name, VOID *module_location);
UINT _txm_module_manager_in_place_load(TXM_MODULE_INSTANCE *module_instance, CHAR *name, VOID *module_location);
-UINT _txm_module_manager_internal_load(TXM_MODULE_INSTANCE *module_instance, CHAR *name, VOID *module_location,
+UINT _txm_module_manager_internal_load(TXM_MODULE_INSTANCE *module_instance, CHAR *name, VOID *module_location,
ULONG code_size, VOID *code_allocation_ptr, ULONG code_allocation_size);
ALIGN_TYPE _txm_module_manager_kernel_dispatch(ULONG kernel_request, ALIGN_TYPE param_0, ALIGN_TYPE param_1, ALIGN_TYPE param_2);
UINT _txm_module_manager_object_allocate(VOID **object_ptr_ptr, ULONG object_size, TXM_MODULE_INSTANCE *module_instance);
@@ -628,7 +632,7 @@ UINT _txm_module_manager_memory_load(TXM_MODULE_INSTANCE *module_instance, CHAR
UINT _txm_module_manager_properties_get(TXM_MODULE_INSTANCE *module_instance, ULONG *module_properties_ptr);
UINT _txm_module_manager_start(TXM_MODULE_INSTANCE *module_instance);
UINT _txm_module_manager_stop(TXM_MODULE_INSTANCE *module_instance);
-UINT _txm_module_manager_thread_create(TX_THREAD *thread_ptr, CHAR *name, VOID (*shell_function)(TX_THREAD *, TXM_MODULE_INSTANCE *),
+UINT _txm_module_manager_thread_create(TX_THREAD *thread_ptr, CHAR *name, VOID (*shell_function)(TX_THREAD *, TXM_MODULE_INSTANCE *),
VOID (*entry_function)(ULONG), ULONG entry_input,
VOID *stack_start, ULONG stack_size, UINT priority, UINT preempt_threshold,
ULONG time_slice, UINT auto_start, UINT thread_control_block_size, TXM_MODULE_INSTANCE *module_instance);
diff --git a/common_modules/inc/txm_module_user_sample.h b/common_modules/inc/txm_module_user_sample.h
index 3587eea3..55b471a7 100644
--- a/common_modules/inc/txm_module_user_sample.h
+++ b/common_modules/inc/txm_module_user_sample.h
@@ -10,41 +10,44 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** User Specific */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** User Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
-/**************************************************************************/
-/* */
-/* APPLICATION INTERFACE DEFINITION RELEASE */
-/* */
-/* txm_module_user.h PORTABLE C */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* APPLICATION INTERFACE DEFINITION RELEASE */
+/* */
+/* txm_module_user.h PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This file contains user defines for configuring the Module Manager */
-/* in specific ways. This file will have an effect only if the Module */
-/* Manager library is built with TXM_MODULE_INCLUDE_USER_DEFINE_FILE */
-/* defined. Note that all the defines in this file may also be made on */
-/* the command line when building Modules library and application */
-/* objects. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This file contains user defines for configuring the Module Manager */
+/* in specific ways. This file will have an effect only if the Module */
+/* Manager library is built with TXM_MODULE_INCLUDE_USER_DEFINE_FILE */
+/* defined. Note that all the defines in this file may also be made on */
+/* the command line when building Modules library and application */
+/* objects. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED defines, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -57,4 +60,106 @@
/* #define TXM_MODULE_KERNEL_STACK_SIZE 2048 */
+
+/* Uncomment any of these defines to prevent modules from being able to make that system call. */
+
+/* #define TXM_BLOCK_ALLOCATE_CALL_NOT_USED */
+/* #define TXM_BLOCK_POOL_CREATE_CALL_NOT_USED */
+/* #define TXM_BLOCK_POOL_DELETE_CALL_NOT_USED */
+/* #define TXM_BLOCK_POOL_INFO_GET_CALL_NOT_USED */
+/* #define TXM_BLOCK_POOL_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_BLOCK_POOL_PRIORITIZE_CALL_NOT_USED */
+/* #define TXM_BLOCK_RELEASE_CALL_NOT_USED */
+/* #define TXM_BYTE_ALLOCATE_CALL_NOT_USED */
+/* #define TXM_BYTE_POOL_CREATE_CALL_NOT_USED */
+/* #define TXM_BYTE_POOL_DELETE_CALL_NOT_USED */
+/* #define TXM_BYTE_POOL_INFO_GET_CALL_NOT_USED */
+/* #define TXM_BYTE_POOL_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_BYTE_POOL_PRIORITIZE_CALL_NOT_USED */
+/* #define TXM_BYTE_RELEASE_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_CREATE_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_DELETE_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_GET_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_INFO_GET_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_SET_CALL_NOT_USED */
+/* #define TXM_EVENT_FLAGS_SET_NOTIFY_CALL_NOT_USED */
+/* #define TXM_MUTEX_CREATE_CALL_NOT_USED */
+/* #define TXM_MUTEX_DELETE_CALL_NOT_USED */
+/* #define TXM_MUTEX_GET_CALL_NOT_USED */
+/* #define TXM_MUTEX_INFO_GET_CALL_NOT_USED */
+/* #define TXM_MUTEX_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_MUTEX_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_MUTEX_PRIORITIZE_CALL_NOT_USED */
+/* #define TXM_MUTEX_PUT_CALL_NOT_USED */
+/* #define TXM_QUEUE_CREATE_CALL_NOT_USED */
+/* #define TXM_QUEUE_DELETE_CALL_NOT_USED */
+/* #define TXM_QUEUE_FLUSH_CALL_NOT_USED */
+/* #define TXM_QUEUE_FRONT_SEND_CALL_NOT_USED */
+/* #define TXM_QUEUE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_QUEUE_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_QUEUE_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_QUEUE_PRIORITIZE_CALL_NOT_USED */
+/* #define TXM_QUEUE_RECEIVE_CALL_NOT_USED */
+/* #define TXM_QUEUE_SEND_CALL_NOT_USED */
+/* #define TXM_QUEUE_SEND_NOTIFY_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_CEILING_PUT_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_CREATE_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_DELETE_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_GET_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_PRIORITIZE_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_PUT_CALL_NOT_USED */
+/* #define TXM_SEMAPHORE_PUT_NOTIFY_CALL_NOT_USED */
+/* #define TXM_THREAD_CREATE_CALL_NOT_USED */
+/* #define TXM_THREAD_DELETE_CALL_NOT_USED */
+/* #define TXM_THREAD_ENTRY_EXIT_NOTIFY_CALL_NOT_USED */
+/* #define TXM_THREAD_IDENTIFY_CALL_NOT_USED */
+/* #define TXM_THREAD_INFO_GET_CALL_NOT_USED */
+/* #define TXM_THREAD_INTERRUPT_CONTROL_CALL_NOT_USED */
+/* #define TXM_THREAD_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_THREAD_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_THREAD_PREEMPTION_CHANGE_CALL_NOT_USED */
+/* #define TXM_THREAD_PRIORITY_CHANGE_CALL_NOT_USED */
+/* #define TXM_THREAD_RELINQUISH_CALL_NOT_USED */
+/* #define TXM_THREAD_RESET_CALL_NOT_USED */
+/* #define TXM_THREAD_RESUME_CALL_NOT_USED */
+/* #define TXM_THREAD_SLEEP_CALL_NOT_USED */
+/* #define TXM_THREAD_STACK_ERROR_NOTIFY_CALL_NOT_USED */
+/* #define TXM_THREAD_SUSPEND_CALL_NOT_USED */
+/* thread system suspend is needed in _txm_module_thread_shell_entry */
+/* #define TXM_THREAD_TERMINATE_CALL_NOT_USED */
+/* #define TXM_THREAD_TIME_SLICE_CHANGE_CALL_NOT_USED */
+/* #define TXM_THREAD_WAIT_ABORT_CALL_NOT_USED */
+/* #define TXM_TIME_GET_CALL_NOT_USED */
+/* #define TXM_TIME_SET_CALL_NOT_USED */
+/* #define TXM_TIMER_ACTIVATE_CALL_NOT_USED */
+/* #define TXM_TIMER_CHANGE_CALL_NOT_USED */
+/* #define TXM_TIMER_CREATE_CALL_NOT_USED */
+/* #define TXM_TIMER_DEACTIVATE_CALL_NOT_USED */
+/* #define TXM_TIMER_DELETE_CALL_NOT_USED */
+/* #define TXM_TIMER_INFO_GET_CALL_NOT_USED */
+/* #define TXM_TIMER_PERFORMANCE_INFO_GET_CALL_NOT_USED */
+/* #define TXM_TIMER_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED */
+/* #define TXM_TRACE_BUFFER_FULL_NOTIFY_CALL_NOT_USED */
+/* #define TXM_TRACE_DISABLE_CALL_NOT_USED */
+/* #define TXM_TRACE_ENABLE_CALL_NOT_USED */
+/* #define TXM_TRACE_EVENT_FILTER_CALL_NOT_USED */
+/* #define TXM_TRACE_EVENT_UNFILTER_CALL_NOT_USED */
+/* #define TXM_TRACE_INTERRUPT_CONTROL_CALL_NOT_USED */
+/* #define TXM_TRACE_ISR_ENTER_INSERT_CALL_NOT_USED */
+/* #define TXM_TRACE_ISR_EXIT_INSERT_CALL_NOT_USED */
+/* #define TXM_TRACE_USER_EVENT_INSERT_CALL_NOT_USED */
+/* #define TXM_MODULE_APPLICATION_REQUEST_CALL_NOT_USED */
+/* #define TXM_MODULE_OBJECT_ALLOCATE_CALL_NOT_USED */
+/* #define TXM_MODULE_OBJECT_DEALLOCATE_CALL_NOT_USED */
+/* #define TXM_MODULE_OBJECT_POINTER_GET_CALL_NOT_USED */
+/* #define TXM_MODULE_OBJECT_POINTER_GET_EXTENDED_CALL_NOT_USED */
+
+
#endif
diff --git a/common_modules/module_lib/src/txm_block_allocate.c b/common_modules/module_lib/src/txm_block_allocate.c
index c3a8d92a..3a96d8eb 100644
--- a/common_modules/module_lib/src/txm_block_allocate.c
+++ b/common_modules/module_lib/src/txm_block_allocate.c
@@ -22,49 +22,52 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_block_allocate PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_ALLOCATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_block_allocate PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the allocate block memory */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* block_ptr Pointer to place allocated block */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the allocate block memory */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* block_ptr Pointer to place allocated block */
/* pointer */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid pool pointer */
-/* TX_PTR_ERROR Invalid destination pointer */
-/* TX_WAIT_ERROR Invalid wait option */
-/* status Actual Completion status */
-/* */
-/* CALLS */
-/* */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid pool pointer */
+/* TX_PTR_ERROR Invalid destination pointer */
+/* TX_WAIT_ERROR Invalid wait option */
+/* status Actual Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_block_allocate(TX_BLOCK_POOL *pool_ptr, VOID **block_ptr, ULONG wait_option)
@@ -78,3 +81,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_pool_create.c b/common_modules/module_lib/src/txm_block_pool_create.c
index 5b550aaa..e1982e14 100644
--- a/common_modules/module_lib/src/txm_block_pool_create.c
+++ b/common_modules/module_lib/src/txm_block_pool_create.c
@@ -22,52 +22,55 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_block_pool_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_POOL_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_block_pool_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the create block memory pool */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* name_ptr Pointer to block pool name */
-/* block_size Number of bytes in each block */
-/* pool_start Address of beginning of pool area */
-/* pool_size Number of bytes in the block pool */
-/* pool_control_block_size Size of block pool control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid pool pointer */
-/* TX_PTR_ERROR Invalid starting address */
-/* TX_SIZE_ERROR Invalid pool size */
-/* TX_CALLER_ERROR Invalid caller of pool */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the create block memory pool */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* name_ptr Pointer to block pool name */
+/* block_size Number of bytes in each block */
+/* pool_start Address of beginning of pool area */
+/* pool_size Number of bytes in the block pool */
+/* pool_control_block_size Size of block pool control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid pool pointer */
+/* TX_PTR_ERROR Invalid starting address */
+/* TX_SIZE_ERROR Invalid pool size */
+/* TX_CALLER_ERROR Invalid caller of pool */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_block_pool_create(TX_BLOCK_POOL *pool_ptr, CHAR *name_ptr, ULONG block_size, VOID *pool_start, ULONG pool_size, UINT pool_control_block_size)
@@ -87,3 +90,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_pool_delete.c b/common_modules/module_lib/src/txm_block_pool_delete.c
index 89b1d064..9f803296 100644
--- a/common_modules/module_lib/src/txm_block_pool_delete.c
+++ b/common_modules/module_lib/src/txm_block_pool_delete.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_block_pool_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_POOL_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_block_pool_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the delete block pool memory */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid memory block pool pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual delete function status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the delete block pool memory */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid memory block pool pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual delete function status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_block_pool_delete(TX_BLOCK_POOL *pool_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_pool_info_get.c b/common_modules/module_lib/src/txm_block_pool_info_get.c
index f5a221b2..2cc52dbe 100644
--- a/common_modules/module_lib/src/txm_block_pool_info_get.c
+++ b/common_modules/module_lib/src/txm_block_pool_info_get.c
@@ -22,52 +22,55 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_block_pool_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_POOL_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_block_pool_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the block pool information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to block pool control blk */
-/* name Destination for the pool name */
-/* available_blocks Number of free blocks in pool */
-/* total_blocks Total number of blocks in pool */
-/* first_suspended Destination for pointer of first */
-/* thread suspended on block pool */
-/* suspended_count Destination for suspended count */
-/* next_pool Destination for pointer to next */
-/* block pool on the created list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid block pool pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the block pool information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to block pool control blk */
+/* name Destination for the pool name */
+/* available_blocks Number of free blocks in pool */
+/* total_blocks Total number of blocks in pool */
+/* first_suspended Destination for pointer of first */
+/* thread suspended on block pool */
+/* suspended_count Destination for suspended count */
+/* next_pool Destination for pointer to next */
+/* block pool on the created list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid block pool pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks, ULONG *total_blocks, TX_THREAD **first_suspended, ULONG *suspended_count, TX_BLOCK_POOL **next_pool)
@@ -88,3 +91,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_pool_performance_info_get.c b/common_modules/module_lib/src/txm_block_pool_performance_info_get.c
index 4d040bb1..b94aff3c 100644
--- a/common_modules/module_lib/src/txm_block_pool_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_block_pool_performance_info_get.c
@@ -22,51 +22,54 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_block_pool_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_POOL_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_block_pool_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* block pool. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to block pool control blk */
-/* allocates Destination for the number of */
-/* allocations from this pool */
-/* releases Destination for the number of */
-/* blocks released back to pool */
-/* suspensions Destination for number of */
-/* suspensions on this pool */
-/* timeouts Destination for number of timeouts*/
-/* on this pool */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* block pool. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to block pool control blk */
+/* allocates Destination for the number of */
+/* allocations from this pool */
+/* releases Destination for the number of */
+/* blocks released back to pool */
+/* suspensions Destination for number of */
+/* suspensions on this pool */
+/* timeouts Destination for number of timeouts*/
+/* on this pool */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_block_pool_performance_info_get(TX_BLOCK_POOL *pool_ptr, ULONG *allocates, ULONG *releases, ULONG *suspensions, ULONG *timeouts)
@@ -85,3 +88,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_pool_performance_system_info_get.c b/common_modules/module_lib/src/txm_block_pool_performance_system_info_get.c
index 0603a960..1c0da014 100644
--- a/common_modules/module_lib/src/txm_block_pool_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_block_pool_performance_system_info_get.c
@@ -22,49 +22,52 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_block_pool_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_block_pool_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves block pool performance information. */
-/* */
-/* INPUT */
-/* */
-/* allocates Destination for the total number */
-/* of block allocations */
-/* releases Destination for the total number */
-/* of blocks released */
-/* suspensions Destination for the total number */
-/* of suspensions */
-/* timeouts Destination for total number of */
-/* timeouts */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves block pool performance information. */
+/* */
+/* INPUT */
+/* */
+/* allocates Destination for the total number */
+/* of block allocations */
+/* releases Destination for the total number */
+/* of blocks released */
+/* suspensions Destination for the total number */
+/* of suspensions */
+/* timeouts Destination for total number of */
+/* timeouts */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_block_pool_performance_system_info_get(ULONG *allocates, ULONG *releases, ULONG *suspensions, ULONG *timeouts)
@@ -82,3 +85,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_pool_prioritize.c b/common_modules/module_lib/src/txm_block_pool_prioritize.c
index 67b0b4a2..7799331a 100644
--- a/common_modules/module_lib/src/txm_block_pool_prioritize.c
+++ b/common_modules/module_lib/src/txm_block_pool_prioritize.c
@@ -22,42 +22,45 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_block_pool_prioritize PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_POOL_PRIORITIZE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_block_pool_prioritize PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the block pool prioritize call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the block pool prioritize call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_block_pool_prioritize(TX_BLOCK_POOL *pool_ptr)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_block_release.c b/common_modules/module_lib/src/txm_block_release.c
index 370f8821..446f8d38 100644
--- a/common_modules/module_lib/src/txm_block_release.c
+++ b/common_modules/module_lib/src/txm_block_release.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_block_release PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BLOCK_RELEASE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_block_release PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the block release function call. */
-/* */
-/* INPUT */
-/* */
-/* block_ptr Pointer to memory block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_PTR_ERROR Invalid memory block pointer */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the block release function call. */
+/* */
+/* INPUT */
+/* */
+/* block_ptr Pointer to memory block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_PTR_ERROR Invalid memory block pointer */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_block_release(VOID *block_ptr)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_allocate.c b/common_modules/module_lib/src/txm_byte_allocate.c
index 193c7868..c1f4d8fe 100644
--- a/common_modules/module_lib/src/txm_byte_allocate.c
+++ b/common_modules/module_lib/src/txm_byte_allocate.c
@@ -22,51 +22,54 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_byte_allocate PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_ALLOCATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_byte_allocate PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in allocate bytes function call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* memory_ptr Pointer to place allocated bytes */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in allocate bytes function call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* memory_ptr Pointer to place allocated bytes */
/* pointer */
-/* memory_size Number of bytes to allocate */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid memory pool pointer */
-/* TX_PTR_ERROR Invalid destination pointer */
-/* TX_WAIT_ERROR Invalid wait option */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* TX_SIZE_ERROR Invalid size of memory request */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* memory_size Number of bytes to allocate */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid memory pool pointer */
+/* TX_PTR_ERROR Invalid destination pointer */
+/* TX_WAIT_ERROR Invalid wait option */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* TX_SIZE_ERROR Invalid size of memory request */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_byte_allocate(TX_BYTE_POOL *pool_ptr, VOID **memory_ptr, ULONG memory_size, ULONG wait_option)
@@ -84,3 +87,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_pool_create.c b/common_modules/module_lib/src/txm_byte_pool_create.c
index 170892ad..df5dd027 100644
--- a/common_modules/module_lib/src/txm_byte_pool_create.c
+++ b/common_modules/module_lib/src/txm_byte_pool_create.c
@@ -22,51 +22,54 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_byte_pool_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_POOL_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_byte_pool_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the create byte pool memory */
-/* function. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* name_ptr Pointer to byte pool name */
-/* pool_start Address of beginning of pool area */
-/* pool_size Number of bytes in the byte pool */
-/* pool_control_block_size Size of byte pool control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid byte pool pointer */
-/* TX_PTR_ERROR Invalid pool starting address */
-/* TX_SIZE_ERROR Invalid pool size */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the create byte pool memory */
+/* function. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* name_ptr Pointer to byte pool name */
+/* pool_start Address of beginning of pool area */
+/* pool_size Number of bytes in the byte pool */
+/* pool_control_block_size Size of byte pool control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid byte pool pointer */
+/* TX_PTR_ERROR Invalid pool starting address */
+/* TX_SIZE_ERROR Invalid pool size */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_byte_pool_create(TX_BYTE_POOL *pool_ptr, CHAR *name_ptr, VOID *pool_start, ULONG pool_size, UINT pool_control_block_size)
@@ -85,3 +88,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_pool_delete.c b/common_modules/module_lib/src/txm_byte_pool_delete.c
index 3160fa91..1d75816c 100644
--- a/common_modules/module_lib/src/txm_byte_pool_delete.c
+++ b/common_modules/module_lib/src/txm_byte_pool_delete.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_byte_pool_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_POOL_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_byte_pool_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the delete byte pool function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid pool pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the delete byte pool function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid pool pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_byte_pool_delete(TX_BYTE_POOL *pool_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_pool_info_get.c b/common_modules/module_lib/src/txm_byte_pool_info_get.c
index 29d246c7..240fb6b1 100644
--- a/common_modules/module_lib/src/txm_byte_pool_info_get.c
+++ b/common_modules/module_lib/src/txm_byte_pool_info_get.c
@@ -22,52 +22,55 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_byte_pool_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_POOL_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_byte_pool_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the byte pool information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to byte pool control block*/
-/* name Destination for the pool name */
-/* available_bytes Number of free bytes in byte pool */
-/* fragments Number of fragments in byte pool */
-/* first_suspended Destination for pointer of first */
-/* thread suspended on byte pool */
-/* suspended_count Destination for suspended count */
-/* next_pool Destination for pointer to next */
-/* byte pool on the created list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_POOL_ERROR Invalid byte pool pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the byte pool information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to byte pool control block*/
+/* name Destination for the pool name */
+/* available_bytes Number of free bytes in byte pool */
+/* fragments Number of fragments in byte pool */
+/* first_suspended Destination for pointer of first */
+/* thread suspended on byte pool */
+/* suspended_count Destination for suspended count */
+/* next_pool Destination for pointer to next */
+/* byte pool on the created list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_POOL_ERROR Invalid byte pool pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes, ULONG *fragments, TX_THREAD **first_suspended, ULONG *suspended_count, TX_BYTE_POOL **next_pool)
@@ -88,3 +91,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_pool_performance_info_get.c b/common_modules/module_lib/src/txm_byte_pool_performance_info_get.c
index fde49fdb..9c1d1766 100644
--- a/common_modules/module_lib/src/txm_byte_pool_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_byte_pool_performance_info_get.c
@@ -22,59 +22,62 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_byte_pool_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_POOL_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_byte_pool_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* byte pool. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to byte pool control block*/
-/* allocates Destination for number of */
-/* allocates on this pool */
-/* releases Destination for number of */
-/* releases on this pool */
-/* fragments_searched Destination for number of */
-/* fragments searched during */
-/* allocation */
-/* merges Destination for number of adjacent*/
-/* free fragments merged */
-/* splits Destination for number of */
-/* fragments split during */
-/* allocation */
-/* suspensions Destination for number of */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* byte pool. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to byte pool control block*/
+/* allocates Destination for number of */
+/* allocates on this pool */
+/* releases Destination for number of */
+/* releases on this pool */
+/* fragments_searched Destination for number of */
+/* fragments searched during */
+/* allocation */
+/* merges Destination for number of adjacent*/
+/* free fragments merged */
+/* splits Destination for number of */
+/* fragments split during */
+/* allocation */
+/* suspensions Destination for number of */
/* suspensions on this pool */
-/* timeouts Destination for number of timeouts*/
-/* on this byte pool */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* timeouts Destination for number of timeouts*/
+/* on this byte pool */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_byte_pool_performance_info_get(TX_BYTE_POOL *pool_ptr, ULONG *allocates, ULONG *releases, ULONG *fragments_searched, ULONG *merges, ULONG *splits, ULONG *suspensions, ULONG *timeouts)
@@ -96,3 +99,4 @@ ALIGN_TYPE extra_parameters[6];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_pool_performance_system_info_get.c b/common_modules/module_lib/src/txm_byte_pool_performance_system_info_get.c
index 3bceabfa..f1f57df5 100644
--- a/common_modules/module_lib/src/txm_byte_pool_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_byte_pool_performance_system_info_get.c
@@ -22,57 +22,60 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_byte_pool_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_byte_pool_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves byte pool performance information. */
-/* */
-/* INPUT */
-/* */
-/* allocates Destination for total number of */
-/* allocates */
-/* releases Destination for total number of */
-/* releases */
-/* fragments_searched Destination for total number of */
-/* fragments searched during */
-/* allocation */
-/* merges Destination for total number of */
-/* adjacent free fragments merged */
-/* splits Destination for total number of */
-/* fragments split during */
-/* allocation */
-/* suspensions Destination for total number of */
+/* DESCRIPTION */
+/* */
+/* This function retrieves byte pool performance information. */
+/* */
+/* INPUT */
+/* */
+/* allocates Destination for total number of */
+/* allocates */
+/* releases Destination for total number of */
+/* releases */
+/* fragments_searched Destination for total number of */
+/* fragments searched during */
+/* allocation */
+/* merges Destination for total number of */
+/* adjacent free fragments merged */
+/* splits Destination for total number of */
+/* fragments split during */
+/* allocation */
+/* suspensions Destination for total number of */
/* suspensions */
-/* timeouts Destination for total number of */
-/* timeouts */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* timeouts Destination for total number of */
+/* timeouts */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_byte_pool_performance_system_info_get(ULONG *allocates, ULONG *releases, ULONG *fragments_searched, ULONG *merges, ULONG *splits, ULONG *suspensions, ULONG *timeouts)
@@ -93,3 +96,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_pool_prioritize.c b/common_modules/module_lib/src/txm_byte_pool_prioritize.c
index 268a6f2f..a86eb409 100644
--- a/common_modules/module_lib/src/txm_byte_pool_prioritize.c
+++ b/common_modules/module_lib/src/txm_byte_pool_prioritize.c
@@ -22,42 +22,45 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_byte_pool_prioritize PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_POOL_PRIORITIZE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_byte_pool_prioritize PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the byte pool prioritize call. */
-/* */
-/* INPUT */
-/* */
-/* pool_ptr Pointer to pool control block */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the byte pool prioritize call. */
+/* */
+/* INPUT */
+/* */
+/* pool_ptr Pointer to pool control block */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_byte_pool_prioritize(TX_BYTE_POOL *pool_ptr)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_byte_release.c b/common_modules/module_lib/src/txm_byte_release.c
index 2dbb8998..c127f30b 100644
--- a/common_modules/module_lib/src/txm_byte_release.c
+++ b/common_modules/module_lib/src/txm_byte_release.c
@@ -22,44 +22,47 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_byte_release PORTABLE C */
-/* 6.1 */
+#ifndef TXM_BYTE_RELEASE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_byte_release PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the release byte function call. */
-/* */
-/* INPUT */
-/* */
-/* memory_ptr Pointer to allocated memory */
-/* */
-/* OUTPUT */
-/* */
-/* TX_PTR_ERROR Invalid memory pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the release byte function call. */
+/* */
+/* INPUT */
+/* */
+/* memory_ptr Pointer to allocated memory */
+/* */
+/* OUTPUT */
+/* */
+/* TX_PTR_ERROR Invalid memory pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_byte_release(VOID *memory_ptr)
@@ -73,3 +76,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_create.c b/common_modules/module_lib/src/txm_event_flags_create.c
index cec86527..a8db0d0d 100644
--- a/common_modules/module_lib/src/txm_event_flags_create.c
+++ b/common_modules/module_lib/src/txm_event_flags_create.c
@@ -22,48 +22,51 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_event_flags_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_event_flags_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the event flag creation function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to event flags group */
-/* control block */
-/* name_ptr Pointer to event flags name */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the event flag creation function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to event flags group */
+/* control block */
+/* name_ptr Pointer to event flags name */
/* event_control_block_size Size of event flags control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_GROUP_ERROR Invalid event flag group pointer */
-/* TX_CALLER_ERROR Invalid calling function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
+/* TX_GROUP_ERROR Invalid event flag group pointer */
+/* TX_CALLER_ERROR Invalid calling function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_event_flags_create(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR *name_ptr, UINT event_control_block_size)
@@ -77,3 +80,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_delete.c b/common_modules/module_lib/src/txm_event_flags_delete.c
index e69270c5..ebe544cc 100644
--- a/common_modules/module_lib/src/txm_event_flags_delete.c
+++ b/common_modules/module_lib/src/txm_event_flags_delete.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_event_flags_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_event_flags_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the delete event flags group */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to group control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_GROUP_ERROR Invalid event flag group pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the delete event flags group */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to group control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_GROUP_ERROR Invalid event flag group pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_event_flags_delete(TX_EVENT_FLAGS_GROUP *group_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_get.c b/common_modules/module_lib/src/txm_event_flags_get.c
index 7006bbe8..646ee58c 100644
--- a/common_modules/module_lib/src/txm_event_flags_get.c
+++ b/common_modules/module_lib/src/txm_event_flags_get.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_event_flags_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_event_flags_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the event flags get function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to group control block */
-/* requested_event_flags Event flags requested */
-/* get_option Specifies and/or and clear options*/
-/* actual_flags_ptr Pointer to place the actual flags */
-/* the service retrieved */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_GROUP_ERROR Invalid event flags group pointer */
-/* TX_PTR_ERROR Invalid actual flags pointer */
-/* TX_WAIT_ERROR Invalid wait option */
-/* TX_OPTION_ERROR Invalid get option */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the event flags get function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to group control block */
+/* requested_event_flags Event flags requested */
+/* get_option Specifies and/or and clear options*/
+/* actual_flags_ptr Pointer to place the actual flags */
+/* the service retrieved */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_GROUP_ERROR Invalid event flags group pointer */
+/* TX_PTR_ERROR Invalid actual flags pointer */
+/* TX_WAIT_ERROR Invalid wait option */
+/* TX_OPTION_ERROR Invalid get option */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_event_flags_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG requested_flags, UINT get_option, ULONG *actual_flags_ptr, ULONG wait_option)
@@ -87,3 +90,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_info_get.c b/common_modules/module_lib/src/txm_event_flags_info_get.c
index 9861dab1..3bb8b23d 100644
--- a/common_modules/module_lib/src/txm_event_flags_info_get.c
+++ b/common_modules/module_lib/src/txm_event_flags_info_get.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_event_flags_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_event_flags_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the event flag information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to event flag group */
-/* name Destination for the event flags */
-/* group name */
-/* current_flags Current event flags */
-/* first_suspended Destination for pointer of first */
-/* thread suspended on event flags */
-/* suspended_count Destination for suspended count */
-/* next_group Destination for pointer to next */
-/* event flag group on the created */
-/* list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_GROUP_ERROR Invalid event flag group pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the event flag information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to event flag group */
+/* name Destination for the event flags */
+/* group name */
+/* current_flags Current event flags */
+/* first_suspended Destination for pointer of first */
+/* thread suspended on event flags */
+/* suspended_count Destination for suspended count */
+/* next_group Destination for pointer to next */
+/* event flag group on the created */
+/* list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_GROUP_ERROR Invalid event flag group pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags, TX_THREAD **first_suspended, ULONG *suspended_count, TX_EVENT_FLAGS_GROUP **next_group)
@@ -88,3 +91,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_performance_info_get.c b/common_modules/module_lib/src/txm_event_flags_performance_info_get.c
index 43a7cf26..169a98ee 100644
--- a/common_modules/module_lib/src/txm_event_flags_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_event_flags_performance_info_get.c
@@ -22,52 +22,55 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_event_flags_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_event_flags_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* event flag group. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to event flag group */
-/* sets Destination for the number of */
-/* event flag sets on this group */
-/* gets Destination for the number of */
-/* event flag gets on this group */
-/* suspensions Destination for the number of */
-/* event flag suspensions on this */
-/* group */
-/* timeouts Destination for number of timeouts*/
-/* on this event flag group */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* event flag group. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to event flag group */
+/* sets Destination for the number of */
+/* event flag sets on this group */
+/* gets Destination for the number of */
+/* event flag gets on this group */
+/* suspensions Destination for the number of */
+/* event flag suspensions on this */
+/* group */
+/* timeouts Destination for number of timeouts*/
+/* on this event flag group */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_event_flags_performance_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG *sets, ULONG *gets, ULONG *suspensions, ULONG *timeouts)
@@ -86,3 +89,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_performance_system_info_get.c b/common_modules/module_lib/src/txm_event_flags_performance_system_info_get.c
index 387cd262..885cd4a1 100644
--- a/common_modules/module_lib/src/txm_event_flags_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_event_flags_performance_system_info_get.c
@@ -22,49 +22,52 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_event_flags_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_event_flags_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves system event flag performance information. */
-/* */
-/* INPUT */
-/* */
-/* sets Destination for total number of */
-/* event flag sets */
-/* gets Destination for total number of */
-/* event flag gets */
-/* suspensions Destination for total number of */
-/* event flag suspensions */
-/* timeouts Destination for total number of */
-/* timeouts */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves system event flag performance information. */
+/* */
+/* INPUT */
+/* */
+/* sets Destination for total number of */
+/* event flag sets */
+/* gets Destination for total number of */
+/* event flag gets */
+/* suspensions Destination for total number of */
+/* event flag suspensions */
+/* timeouts Destination for total number of */
+/* timeouts */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_event_flags_performance_system_info_get(ULONG *sets, ULONG *gets, ULONG *suspensions, ULONG *timeouts)
@@ -82,3 +85,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_set.c b/common_modules/module_lib/src/txm_event_flags_set.c
index 5d204ec0..a45cf4f2 100644
--- a/common_modules/module_lib/src/txm_event_flags_set.c
+++ b/common_modules/module_lib/src/txm_event_flags_set.c
@@ -22,48 +22,51 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_event_flags_set PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_SET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_event_flags_set PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the set event flags function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to group control block */
-/* flags_to_set Event flags to set */
-/* set_option Specified either AND or OR */
-/* operation on the event flags */
-/* */
-/* OUTPUT */
-/* */
-/* TX_GROUP_ERROR Invalid event flags group pointer */
-/* TX_OPTION_ERROR Invalid set option */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the set event flags function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to group control block */
+/* flags_to_set Event flags to set */
+/* set_option Specified either AND or OR */
+/* operation on the event flags */
+/* */
+/* OUTPUT */
+/* */
+/* TX_GROUP_ERROR Invalid event flags group pointer */
+/* TX_OPTION_ERROR Invalid set option */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set, UINT set_option)
@@ -77,3 +80,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_event_flags_set_notify.c b/common_modules/module_lib/src/txm_event_flags_set_notify.c
index f72b92a6..1bc05e36 100644
--- a/common_modules/module_lib/src/txm_event_flags_set_notify.c
+++ b/common_modules/module_lib/src/txm_event_flags_set_notify.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_event_flags_set_notify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_EVENT_FLAGS_SET_NOTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_event_flags_set_notify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the event flags set notify */
-/* callback function call. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Pointer to group control block*/
-/* group_put_notify Application callback function */
-/* (TX_NULL disables notify) */
-/* */
-/* OUTPUT */
-/* */
-/* status Service return status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the event flags set notify */
+/* callback function call. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Pointer to group control block*/
+/* group_put_notify Application callback function */
+/* (TX_NULL disables notify) */
+/* */
+/* OUTPUT */
+/* */
+/* status Service return status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_event_flags_set_notify(TX_EVENT_FLAGS_GROUP *group_ptr, VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *))
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_module_application_request.c b/common_modules/module_lib/src/txm_module_application_request.c
index b84b8f2c..c96ca715 100644
--- a/common_modules/module_lib/src/txm_module_application_request.c
+++ b/common_modules/module_lib/src/txm_module_application_request.c
@@ -10,69 +10,72 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* txm_module_application_request PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MODULE_APPLICATION_REQUEST_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* txm_module_application_request PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function sends an application-specific request to the resident */
-/* code. */
-/* */
-/* INPUT */
-/* */
-/* request Request ID (application defined) */
-/* param_1 First parameter */
-/* param_2 Second parameter */
-/* param_3 Third parameter */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function sends an application-specific request to the resident */
+/* code. */
+/* */
+/* INPUT */
+/* */
+/* request Request ID (application defined) */
+/* param_1 First parameter */
+/* param_2 Second parameter */
+/* param_3 Third parameter */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_kernel_call_dispatcher */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT txm_module_application_request(ULONG request, ALIGN_TYPE param_1, ALIGN_TYPE param_2, ALIGN_TYPE param_3)
{
UINT return_value;
-
+
/* Call module manager dispatcher. */
return_value = (UINT)(_txm_module_kernel_call_dispatcher)(TXM_APPLICATION_REQUEST_ID_BASE+request, param_1, param_2, param_3);
-
+
/* Return value to the caller. */
return(return_value);
}
-
+#endif
diff --git a/common_modules/module_lib/src/txm_module_callback_request_thread_entry.c b/common_modules/module_lib/src/txm_module_callback_request_thread_entry.c
index bbd51104..1a055d2b 100644
--- a/common_modules/module_lib/src/txm_module_callback_request_thread_entry.c
+++ b/common_modules/module_lib/src/txm_module_callback_request_thread_entry.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#ifndef TXM_MODULE
#define TXM_MODULE
@@ -35,54 +35,56 @@
#include "tx_queue.h"
-/* Define the global module entry pointer from the start thread of the module.
+/* Define the global module entry pointer from the start thread of the module.
This structure contains the pointer to the request queue as well as the
pointer to the callback response queue. */
extern TXM_MODULE_THREAD_ENTRY_INFO *_txm_module_entry_info;
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_callback_request_thread_entry PORTABLE C */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_callback_request_thread_entry PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes all module callback requests, transferred */
-/* by the resident code via the callback queue. When the callback is */
-/* complete, the response is sent back to the resident code to */
-/* acknowledge it. */
-/* */
-/* INPUT */
-/* */
-/* id Module thread ID */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* tx_queue_receive Receive callback request */
-/* */
-/* CALLED BY */
-/* */
-/* Initial thread stack frame */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes all module callback requests, transferred */
+/* by the resident code via the callback queue. When the callback is */
+/* complete, the response is sent back to the resident code to */
+/* acknowledge it. */
+/* */
+/* INPUT */
+/* */
+/* id Module thread ID */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* tx_queue_receive Receive callback request */
+/* */
+/* CALLED BY */
+/* */
+/* Initial thread stack frame */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-VOID _txm_module_callback_request_thread_entry(ULONG id)
+VOID _txm_module_callback_request_thread_entry(ULONG id)
{
TX_QUEUE *request_queue;
@@ -95,6 +97,8 @@ VOID (*queue_send_notify)(TX_QUEUE *);
VOID (*thread_entry_exit_notify)(TX_THREAD *, UINT);
UINT status;
+ /* Disable warning of parameter not used. */
+ TX_PARAMETER_NOT_USED(id);
/* Pickup pointer to the request queue. */
request_queue = _txm_module_entry_info -> txm_module_thread_entry_info_callback_request_queue;
@@ -102,7 +106,7 @@ UINT status;
/* Loop to process callback messages from the module manager. */
while(1)
{
-
+
/* Wait for the callback request for the module. */
status = _txe_queue_receive(request_queue, (VOID *) &callback_message, TX_WAIT_FOREVER);
@@ -113,21 +117,21 @@ UINT status;
/* This should not happen - get out of the loop. */
break;
}
-
+
/* Pickup the activation count in the message. */
activation_count = callback_message.txm_module_callback_message_activation_count;
-
+
/* Loop to call the callback function the correct number of times. */
while (activation_count)
{
-
+
/* Decrement the activation count. */
activation_count--;
/* Now dispatch the callback function. */
switch (callback_message.txm_module_callback_message_type)
{
-
+
case TXM_TIMER_CALLBACK:
/* Setup timer callback pointer. */
@@ -147,7 +151,7 @@ UINT status;
(events_set_notify)((TX_EVENT_FLAGS_GROUP *) callback_message.txm_module_callback_message_param_1);
break;
-
+
case TXM_QUEUE_SEND_CALLBACK:
/* Setup queue send callback pointer. */
@@ -175,70 +179,68 @@ UINT status;
/* Call thread entry/exit notify callback. */
(thread_entry_exit_notify)((TX_THREAD *) callback_message.txm_module_callback_message_param_1, (UINT) callback_message.txm_module_callback_message_param_2);
-
+
break;
default:
#ifdef TXM_MODULE_ENABLE_NETX
-
+
/* Determine if there is a NetX callback. */
if ((callback_message.txm_module_callback_message_type >= TXM_NETX_CALLBACKS_START) && (callback_message.txm_module_callback_message_type < TXM_NETX_CALLBACKS_END))
{
-
+
/* Call the NetX module callback function. */
_txm_module_netx_callback_request(&callback_message);
}
#endif
-
+
#ifdef TXM_MODULE_ENABLE_NETXDUO
-
+
/* Determine if there is a NetX Duo callback. */
if ((callback_message.txm_module_callback_message_type >= TXM_NETXDUO_CALLBACKS_START) && (callback_message.txm_module_callback_message_type < TXM_NETXDUO_CALLBACKS_END))
{
-
+
/* Call the NetX Duo module callback function. */
_txm_module_netxduo_callback_request(&callback_message);
}
#endif
-
+
#ifdef TXM_MODULE_ENABLE_FILEX
-
+
/* Determine if there is a FileX callback. */
if ((callback_message.txm_module_callback_message_type >= TXM_FILEX_CALLBACKS_START) && (callback_message.txm_module_callback_message_type < TXM_FILEX_CALLBACKS_END))
{
-
+
/* Call the FileX module callback function. */
_txm_module_filex_callback_request(&callback_message);
}
#endif
-
+
#ifdef TXM_MODULE_ENABLE_GUIX
-
+
/* Determine if there is a GUIX callback. */
if ((callback_message.txm_module_callback_message_type >= TXM_GUIX_CALLBACKS_START) && (callback_message.txm_module_callback_message_type < TXM_GUIX_CALLBACKS_END))
{
-
+
/* Call the GUIX module callback function. */
_txm_module_guix_callback_request(&callback_message);
}
#endif
-
+
#ifdef TXM_MODULE_ENABLE_USBX
-
+
/* Determine if there is a USBX callback. */
if ((callback_message.txm_module_callback_message_type >= TXM_USBX_CALLBACKS_START) && (callback_message.txm_module_callback_message_type < TXM_USBX_CALLBACKS_END))
{
-
+
/* Call the USBX callback function. */
_txm_module_usbx_callback_request(&callback_message);
}
#endif
-
+
break;
}
}
}
}
-
-
diff --git a/common_modules/module_lib/src/txm_module_object_allocate.c b/common_modules/module_lib/src/txm_module_object_allocate.c
index 32757926..0c85ae8f 100644
--- a/common_modules/module_lib/src/txm_module_object_allocate.c
+++ b/common_modules/module_lib/src/txm_module_object_allocate.c
@@ -22,29 +22,29 @@
#define TXM_MODULE
#include "txm_module.h"
-
+#ifndef TXM_MODULE_OBJECT_ALLOCATE_CALL_NOT_USED
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
-/* _txm_module_manager_object_allocate PORTABLE C */
-/* 6.1 */
+/* _txm_module_manager_object_allocate PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
-/* This function allocates memory for an object from the memory pool */
-/* supplied to txm_module_manager_initialize. */
+/* This function allocates memory for an object from the memory pool */
+/* supplied to txm_module_manager_initialize. */
/* */
/* INPUT */
/* */
-/* object_ptr Destination of object pointer on */
-/* successful allocation */
-/* object_size Size in bytes of the object to be */
-/* allocated */
-/* module_instance The module instance that the */
+/* object_ptr Destination of object pointer on */
+/* successful allocation */
+/* object_size Size in bytes of the object to be */
+/* allocated */
+/* module_instance The module instance that the */
/* object belongs to */
/* */
/* OUTPUT */
@@ -63,7 +63,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txm_module_object_allocate(VOID **object_ptr, ULONG object_size)
@@ -77,3 +80,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_module_object_deallocate.c b/common_modules/module_lib/src/txm_module_object_deallocate.c
index 3936a4d8..b31ac05b 100644
--- a/common_modules/module_lib/src/txm_module_object_deallocate.c
+++ b/common_modules/module_lib/src/txm_module_object_deallocate.c
@@ -22,24 +22,24 @@
#define TXM_MODULE
#include "txm_module.h"
-
+#ifndef TXM_MODULE_OBJECT_DEALLOCATE_CALL_NOT_USED
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txm_module_manager_object_deallocate PORTABLE C */
-/* 6.1 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
-/* This function deallocates a previously allocated object. */
+/* This function deallocates a previously allocated object. */
/* */
/* INPUT */
/* */
-/* object_ptr Object pointer to deallocate */
+/* object_ptr Object pointer to deallocate */
/* */
/* OUTPUT */
/* */
@@ -57,7 +57,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txm_module_object_deallocate(VOID *object_ptr)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_module_object_pointer_get.c b/common_modules/module_lib/src/txm_module_object_pointer_get.c
index 45160b09..d19742eb 100644
--- a/common_modules/module_lib/src/txm_module_object_pointer_get.c
+++ b/common_modules/module_lib/src/txm_module_object_pointer_get.c
@@ -22,13 +22,13 @@
#define TXM_MODULE
#include "txm_module.h"
-
+#ifndef TXM_MODULE_OBJECT_POINTER_GET_CALL_NOT_USED
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txm_module_manager_object_pointer_get PORTABLE C */
-/* 6.1 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -73,7 +73,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txm_module_object_pointer_get(UINT object_type, CHAR *name, VOID **object_ptr)
@@ -87,3 +90,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_module_object_pointer_get_extended.c b/common_modules/module_lib/src/txm_module_object_pointer_get_extended.c
index 350ca3af..9f9e5c96 100644
--- a/common_modules/module_lib/src/txm_module_object_pointer_get_extended.c
+++ b/common_modules/module_lib/src/txm_module_object_pointer_get_extended.c
@@ -22,13 +22,13 @@
#define TXM_MODULE
#include "txm_module.h"
-
+#ifndef TXM_MODULE_OBJECT_POINTER_GET_EXTENDED_CALL_NOT_USED
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _txm_module_manager_object_pointer_get_extended PORTABLE C */
-/* 6.1 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -76,7 +76,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txm_module_object_pointer_get_extended(UINT object_type, CHAR *name, UINT name_length, VOID **object_ptr)
@@ -94,3 +97,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_module_thread_system_suspend.c b/common_modules/module_lib/src/txm_module_thread_system_suspend.c
index 8121ca4a..287f4ab9 100644
--- a/common_modules/module_lib/src/txm_module_thread_system_suspend.c
+++ b/common_modules/module_lib/src/txm_module_thread_system_suspend.c
@@ -22,50 +22,53 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_system_suspend PORTABLE C */
-/* 6.1 */
-/* */
+#ifndef TXM_THREAD_SYSTEM_SUSPEND_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_suspend PORTABLE C */
+/* 6.1.10 */
+/* */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function suspends the specified thread and changes the thread */
-/* state to the value specified. Note: delayed suspension processing */
+/* DESCRIPTION */
+/* */
+/* This function suspends the specified thread and changes the thread */
+/* state to the value specified. Note: delayed suspension processing */
/* is handled outside of this routine. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to suspend */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to suspend */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_thread_priority_change Thread priority change */
-/* _tx_thread_shell_entry Thread shell function */
-/* _tx_thread_sleep Thread sleep */
-/* _tx_thread_suspend Application thread suspend */
-/* _tx_thread_terminate Thread terminate */
-/* Other ThreadX Components */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_priority_change Thread priority change */
+/* _tx_thread_shell_entry Thread shell function */
+/* _tx_thread_sleep Thread sleep */
+/* _tx_thread_suspend Application thread suspend */
+/* _tx_thread_terminate Thread terminate */
+/* Other ThreadX Components */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txm_module_thread_system_suspend(TX_THREAD *thread_ptr)
@@ -79,3 +82,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_create.c b/common_modules/module_lib/src/txm_mutex_create.c
index a3a89d69..f27a8ae0 100644
--- a/common_modules/module_lib/src/txm_mutex_create.c
+++ b/common_modules/module_lib/src/txm_mutex_create.c
@@ -22,49 +22,52 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_mutex_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_mutex_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the create mutex function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
-/* name_ptr Pointer to mutex name */
-/* inherit Initial mutex count */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the create mutex function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
+/* name_ptr Pointer to mutex name */
+/* inherit Initial mutex count */
/* mutex_control_block_size Size of mutex control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_MUTEX_ERROR Invalid mutex pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* TX_INHERIT_ERROR Invalid inherit option */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
+/* TX_MUTEX_ERROR Invalid mutex pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* TX_INHERIT_ERROR Invalid inherit option */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_mutex_create(TX_MUTEX *mutex_ptr, CHAR *name_ptr, UINT inherit, UINT mutex_control_block_size)
@@ -82,3 +85,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_delete.c b/common_modules/module_lib/src/txm_mutex_delete.c
index 6b720f3e..9ab9bbd6 100644
--- a/common_modules/module_lib/src/txm_mutex_delete.c
+++ b/common_modules/module_lib/src/txm_mutex_delete.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_mutex_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_mutex_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the mutex delete function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_MUTEX_ERROR Invalid mutex pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the mutex delete function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_MUTEX_ERROR Invalid mutex pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_mutex_delete(TX_MUTEX *mutex_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_get.c b/common_modules/module_lib/src/txm_mutex_get.c
index 2d7da6ba..039c8e8a 100644
--- a/common_modules/module_lib/src/txm_mutex_get.c
+++ b/common_modules/module_lib/src/txm_mutex_get.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_mutex_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_mutex_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the mutex get function call. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_MUTEX_ERROR Invalid mutex pointer */
-/* TX_WAIT_ERROR Invalid wait option */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the mutex get function call. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_MUTEX_ERROR Invalid mutex pointer */
+/* TX_WAIT_ERROR Invalid wait option */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_mutex_get(TX_MUTEX *mutex_ptr, ULONG wait_option)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_info_get.c b/common_modules/module_lib/src/txm_mutex_info_get.c
index ea1d1ceb..acc13400 100644
--- a/common_modules/module_lib/src/txm_mutex_info_get.c
+++ b/common_modules/module_lib/src/txm_mutex_info_get.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_mutex_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_mutex_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the mutex information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
-/* name Destination for the mutex name */
-/* count Destination for the owner count */
-/* owner Destination for the owner's */
-/* thread control block pointer */
-/* first_suspended Destination for pointer of first */
-/* thread suspended on the mutex */
-/* suspended_count Destination for suspended count */
-/* next_mutex Destination for pointer to next */
-/* mutex on the created list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_MUTEX_ERROR Invalid mutex pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the mutex information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
+/* name Destination for the mutex name */
+/* count Destination for the owner count */
+/* owner Destination for the owner's */
+/* thread control block pointer */
+/* first_suspended Destination for pointer of first */
+/* thread suspended on the mutex */
+/* suspended_count Destination for suspended count */
+/* next_mutex Destination for pointer to next */
+/* mutex on the created list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_MUTEX_ERROR Invalid mutex pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner, TX_THREAD **first_suspended, ULONG *suspended_count, TX_MUTEX **next_mutex)
@@ -89,3 +92,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_performance_info_get.c b/common_modules/module_lib/src/txm_mutex_performance_info_get.c
index f6c68e6b..b02e5f6d 100644
--- a/common_modules/module_lib/src/txm_mutex_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_mutex_performance_info_get.c
@@ -22,55 +22,58 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_mutex_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_mutex_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* mutex. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* mutex. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
/* puts Destination for the number of */
-/* puts on to this mutex */
-/* gets Destination for the number of */
-/* gets on this mutex */
-/* suspensions Destination for the number of */
-/* suspensions on this mutex */
-/* timeouts Destination for number of timeouts*/
-/* on this mutex */
-/* inversions Destination for number of priority*/
-/* inversions on this mutex */
-/* inheritances Destination for number of priority*/
-/* inheritances on this mutex */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* puts on to this mutex */
+/* gets Destination for the number of */
+/* gets on this mutex */
+/* suspensions Destination for the number of */
+/* suspensions on this mutex */
+/* timeouts Destination for number of timeouts*/
+/* on this mutex */
+/* inversions Destination for number of priority*/
+/* inversions on this mutex */
+/* inheritances Destination for number of priority*/
+/* inheritances on this mutex */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_mutex_performance_info_get(TX_MUTEX *mutex_ptr, ULONG *puts, ULONG *gets, ULONG *suspensions, ULONG *timeouts, ULONG *inversions, ULONG *inheritances)
@@ -91,3 +94,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_performance_system_info_get.c b/common_modules/module_lib/src/txm_mutex_performance_system_info_get.c
index 577b35e8..00a3353b 100644
--- a/common_modules/module_lib/src/txm_mutex_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_mutex_performance_system_info_get.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_mutex_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_mutex_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves system mutex performance information. */
-/* */
-/* INPUT */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves system mutex performance information. */
+/* */
+/* INPUT */
+/* */
/* puts Destination for total number of */
-/* mutex puts */
-/* gets Destination for total number of */
-/* mutex gets */
-/* suspensions Destination for total number of */
-/* mutex suspensions */
-/* timeouts Destination for total number of */
-/* mutex timeouts */
-/* inversions Destination for total number of */
-/* mutex priority inversions */
-/* inheritances Destination for total number of */
-/* mutex priority inheritances */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* mutex puts */
+/* gets Destination for total number of */
+/* mutex gets */
+/* suspensions Destination for total number of */
+/* mutex suspensions */
+/* timeouts Destination for total number of */
+/* mutex timeouts */
+/* inversions Destination for total number of */
+/* mutex priority inversions */
+/* inheritances Destination for total number of */
+/* mutex priority inheritances */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions, ULONG *timeouts, ULONG *inversions, ULONG *inheritances)
@@ -88,3 +91,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_prioritize.c b/common_modules/module_lib/src/txm_mutex_prioritize.c
index a15e8cbb..5a9d6b5c 100644
--- a/common_modules/module_lib/src/txm_mutex_prioritize.c
+++ b/common_modules/module_lib/src/txm_mutex_prioritize.c
@@ -22,42 +22,45 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_mutex_prioritize PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_PRIORITIZE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_mutex_prioritize PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the mutex prioritize call. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the mutex prioritize call. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_mutex_prioritize(TX_MUTEX *mutex_ptr)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_mutex_put.c b/common_modules/module_lib/src/txm_mutex_put.c
index 738d08c6..d2cf47e2 100644
--- a/common_modules/module_lib/src/txm_mutex_put.c
+++ b/common_modules/module_lib/src/txm_mutex_put.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_mutex_put PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MUTEX_PUT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_mutex_put PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the mutex put function call. */
-/* */
-/* INPUT */
-/* */
-/* mutex_ptr Pointer to mutex control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_MUTEX_ERROR Invalid mutex pointer */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the mutex put function call. */
+/* */
+/* INPUT */
+/* */
+/* mutex_ptr Pointer to mutex control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_MUTEX_ERROR Invalid mutex pointer */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_mutex_put(TX_MUTEX *mutex_ptr)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_create.c b/common_modules/module_lib/src/txm_queue_create.c
index a84e8398..e8aa4444 100644
--- a/common_modules/module_lib/src/txm_queue_create.c
+++ b/common_modules/module_lib/src/txm_queue_create.c
@@ -22,50 +22,53 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue create function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* name_ptr Pointer to queue name */
-/* message_size Size of each queue message */
-/* queue_start Starting address of the queue area*/
-/* queue_size Number of bytes in the queue */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue create function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* name_ptr Pointer to queue name */
+/* message_size Size of each queue message */
+/* queue_start Starting address of the queue area*/
+/* queue_size Number of bytes in the queue */
/* queue_control_block_size Size of queue control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* TX_PTR_ERROR Invalid starting address of queue */
-/* TX_SIZE_ERROR Invalid message queue size */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* TX_PTR_ERROR Invalid starting address of queue */
+/* TX_SIZE_ERROR Invalid message queue size */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size, VOID *queue_start, ULONG queue_size, UINT queue_control_block_size)
@@ -85,3 +88,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_delete.c b/common_modules/module_lib/src/txm_queue_delete.c
index aae1004e..fccd8d0d 100644
--- a/common_modules/module_lib/src/txm_queue_delete.c
+++ b/common_modules/module_lib/src/txm_queue_delete.c
@@ -22,44 +22,47 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue delete function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue delete function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_delete(TX_QUEUE *queue_ptr)
@@ -73,3 +76,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_flush.c b/common_modules/module_lib/src/txm_queue_flush.c
index c660e19f..caac5fcf 100644
--- a/common_modules/module_lib/src/txm_queue_flush.c
+++ b/common_modules/module_lib/src/txm_queue_flush.c
@@ -22,44 +22,47 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_flush PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_FLUSH_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_flush PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue flush function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue flush function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_flush(TX_QUEUE *queue_ptr)
@@ -73,3 +76,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_front_send.c b/common_modules/module_lib/src/txm_queue_front_send.c
index 47232930..2ba07744 100644
--- a/common_modules/module_lib/src/txm_queue_front_send.c
+++ b/common_modules/module_lib/src/txm_queue_front_send.c
@@ -22,47 +22,50 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_front_send PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_FRONT_SEND_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_front_send PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue send function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* source_ptr Pointer to message source */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* TX_PTR_ERROR Invalid source pointer - NULL */
-/* TX_WAIT_ERROR Invalid wait option - non thread */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue send function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* source_ptr Pointer to message source */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* TX_PTR_ERROR Invalid source pointer - NULL */
+/* TX_WAIT_ERROR Invalid wait option - non thread */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_front_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_option)
@@ -76,3 +79,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_info_get.c b/common_modules/module_lib/src/txm_queue_info_get.c
index 045dd26d..3fae1cbb 100644
--- a/common_modules/module_lib/src/txm_queue_info_get.c
+++ b/common_modules/module_lib/src/txm_queue_info_get.c
@@ -22,52 +22,55 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* name Destination for the queue name */
-/* enqueued Destination for enqueued count */
-/* available_storage Destination for available storage */
-/* first_suspended Destination for pointer of first */
-/* thread suspended on this queue */
-/* suspended_count Destination for suspended count */
-/* next_queue Destination for pointer to next */
-/* queue on the created list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* name Destination for the queue name */
+/* enqueued Destination for enqueued count */
+/* available_storage Destination for available storage */
+/* first_suspended Destination for pointer of first */
+/* thread suspended on this queue */
+/* suspended_count Destination for suspended count */
+/* next_queue Destination for pointer to next */
+/* queue on the created list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_info_get(TX_QUEUE *queue_ptr, CHAR **name, ULONG *enqueued, ULONG *available_storage, TX_THREAD **first_suspended, ULONG *suspended_count, TX_QUEUE **next_queue)
@@ -88,3 +91,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_performance_info_get.c b/common_modules/module_lib/src/txm_queue_performance_info_get.c
index d68998e9..f3840cc8 100644
--- a/common_modules/module_lib/src/txm_queue_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_queue_performance_info_get.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_queue_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_queue_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* queue. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* messages_sent Destination for messages sent */
-/* messages_received Destination for messages received */
-/* empty_suspensions Destination for number of empty */
-/* queue suspensions */
-/* full_suspensions Destination for number of full */
-/* queue suspensions */
-/* full_errors Destination for queue full errors */
-/* returned - no suspension */
-/* timeouts Destination for number of timeouts*/
-/* on this queue */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* queue. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* messages_sent Destination for messages sent */
+/* messages_received Destination for messages received */
+/* empty_suspensions Destination for number of empty */
+/* queue suspensions */
+/* full_suspensions Destination for number of full */
+/* queue suspensions */
+/* full_errors Destination for queue full errors */
+/* returned - no suspension */
+/* timeouts Destination for number of timeouts*/
+/* on this queue */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_queue_performance_info_get(TX_QUEUE *queue_ptr, ULONG *messages_sent, ULONG *messages_received, ULONG *empty_suspensions, ULONG *full_suspensions, ULONG *full_errors, ULONG *timeouts)
@@ -89,3 +92,4 @@ ALIGN_TYPE extra_parameters[5];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_performance_system_info_get.c b/common_modules/module_lib/src/txm_queue_performance_system_info_get.c
index 8ef7459f..18b33e39 100644
--- a/common_modules/module_lib/src/txm_queue_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_queue_performance_system_info_get.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_queue_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_queue_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves queue system performance information. */
-/* */
-/* INPUT */
-/* */
-/* messages_sent Destination for total messages */
-/* sent */
-/* messages_received Destination for total messages */
-/* received */
-/* empty_suspensions Destination for total empty */
-/* queue suspensions */
-/* full_suspensions Destination for total full */
-/* queue suspensions */
-/* full_errors Destination for total queue full */
-/* errors returned - no suspension */
-/* timeouts Destination for total number of */
-/* timeouts */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves queue system performance information. */
+/* */
+/* INPUT */
+/* */
+/* messages_sent Destination for total messages */
+/* sent */
+/* messages_received Destination for total messages */
+/* received */
+/* empty_suspensions Destination for total empty */
+/* queue suspensions */
+/* full_suspensions Destination for total full */
+/* queue suspensions */
+/* full_errors Destination for total queue full */
+/* errors returned - no suspension */
+/* timeouts Destination for total number of */
+/* timeouts */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_queue_performance_system_info_get(ULONG *messages_sent, ULONG *messages_received, ULONG *empty_suspensions, ULONG *full_suspensions, ULONG *full_errors, ULONG *timeouts)
@@ -88,3 +91,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_prioritize.c b/common_modules/module_lib/src/txm_queue_prioritize.c
index bd2c1d1d..3c08d3e7 100644
--- a/common_modules/module_lib/src/txm_queue_prioritize.c
+++ b/common_modules/module_lib/src/txm_queue_prioritize.c
@@ -22,42 +22,45 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_prioritize PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_PRIORITIZE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_prioritize PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue prioritize call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue prioritize call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_prioritize(TX_QUEUE *queue_ptr)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_receive.c b/common_modules/module_lib/src/txm_queue_receive.c
index eaa4fa77..a18b34db 100644
--- a/common_modules/module_lib/src/txm_queue_receive.c
+++ b/common_modules/module_lib/src/txm_queue_receive.c
@@ -22,49 +22,52 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_receive PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_RECEIVE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_receive PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue receive function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* destination_ptr Pointer to message destination */
-/* **** MUST BE LARGE ENOUGH TO */
-/* HOLD MESSAGE **** */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* TX_PTR_ERROR Invalid destination pointer (NULL)*/
-/* TX_WAIT_ERROR Invalid wait option */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue receive function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* destination_ptr Pointer to message destination */
+/* **** MUST BE LARGE ENOUGH TO */
+/* HOLD MESSAGE **** */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* TX_PTR_ERROR Invalid destination pointer (NULL)*/
+/* TX_WAIT_ERROR Invalid wait option */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_receive(TX_QUEUE *queue_ptr, VOID *destination_ptr, ULONG wait_option)
@@ -78,3 +81,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_send.c b/common_modules/module_lib/src/txm_queue_send.c
index 23f97fc2..9f26f078 100644
--- a/common_modules/module_lib/src/txm_queue_send.c
+++ b/common_modules/module_lib/src/txm_queue_send.c
@@ -22,47 +22,50 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_send PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_SEND_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_send PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue send function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block */
-/* source_ptr Pointer to message source */
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_QUEUE_ERROR Invalid queue pointer */
-/* TX_PTR_ERROR Invalid source pointer - NULL */
-/* TX_WAIT_ERROR Invalid wait option */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue send function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block */
+/* source_ptr Pointer to message source */
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_QUEUE_ERROR Invalid queue pointer */
+/* TX_PTR_ERROR Invalid source pointer - NULL */
+/* TX_WAIT_ERROR Invalid wait option */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_option)
@@ -76,3 +79,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_queue_send_notify.c b/common_modules/module_lib/src/txm_queue_send_notify.c
index fece5a5a..fb8091a1 100644
--- a/common_modules/module_lib/src/txm_queue_send_notify.c
+++ b/common_modules/module_lib/src/txm_queue_send_notify.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_queue_send_notify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_QUEUE_SEND_NOTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_queue_send_notify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the queue send notify */
-/* callback function call. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Pointer to queue control block*/
-/* queue_send_notify Application callback function */
-/* (TX_NULL disables notify) */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the queue send notify */
+/* callback function call. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Pointer to queue control block*/
+/* queue_send_notify Application callback function */
+/* (TX_NULL disables notify) */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_queue_send_notify(TX_QUEUE *queue_ptr, VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr))
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_ceiling_put.c b/common_modules/module_lib/src/txm_semaphore_ceiling_put.c
index 0b325d72..a596e68d 100644
--- a/common_modules/module_lib/src/txm_semaphore_ceiling_put.c
+++ b/common_modules/module_lib/src/txm_semaphore_ceiling_put.c
@@ -22,46 +22,49 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_ceiling_put PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_CEILING_PUT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_ceiling_put PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore ceiling put */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore */
-/* ceiling Maximum value of semaphore */
-/* */
-/* OUTPUT */
-/* */
-/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
-/* TX_INVALID_CEILING Invalid semaphore ceiling */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore ceiling put */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore */
+/* ceiling Maximum value of semaphore */
+/* */
+/* OUTPUT */
+/* */
+/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
+/* TX_INVALID_CEILING Invalid semaphore ceiling */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_ceiling_put(TX_SEMAPHORE *semaphore_ptr, ULONG ceiling)
@@ -75,3 +78,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_create.c b/common_modules/module_lib/src/txm_semaphore_create.c
index c20ac76d..43267e23 100644
--- a/common_modules/module_lib/src/txm_semaphore_create.c
+++ b/common_modules/module_lib/src/txm_semaphore_create.c
@@ -22,48 +22,51 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the create semaphore function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
-/* name_ptr Pointer to semaphore name */
-/* initial_count Initial semaphore count */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the create semaphore function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
+/* name_ptr Pointer to semaphore name */
+/* initial_count Initial semaphore count */
/* semaphore_control_block_size Size of semaphore control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
+/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_create(TX_SEMAPHORE *semaphore_ptr, CHAR *name_ptr, ULONG initial_count, UINT semaphore_control_block_size)
@@ -81,3 +84,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_delete.c b/common_modules/module_lib/src/txm_semaphore_delete.c
index 1fa5bf5f..1be358a7 100644
--- a/common_modules/module_lib/src/txm_semaphore_delete.c
+++ b/common_modules/module_lib/src/txm_semaphore_delete.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore delete function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
-/* */
-/* OUTPUT */
-/* */
-/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore delete function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
+/* */
+/* OUTPUT */
+/* */
+/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_delete(TX_SEMAPHORE *semaphore_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_get.c b/common_modules/module_lib/src/txm_semaphore_get.c
index f7ecd3e5..9b1d9d2a 100644
--- a/common_modules/module_lib/src/txm_semaphore_get.c
+++ b/common_modules/module_lib/src/txm_semaphore_get.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore get function call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
-/* wait_option Suspension option */
-/* */
-/* OUTPUT */
-/* */
-/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
-/* TX_WAIT_ERROR Invalid wait option */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore get function call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
+/* wait_option Suspension option */
+/* */
+/* OUTPUT */
+/* */
+/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
+/* TX_WAIT_ERROR Invalid wait option */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_info_get.c b/common_modules/module_lib/src/txm_semaphore_info_get.c
index cd2cac80..5a91bb7f 100644
--- a/common_modules/module_lib/src/txm_semaphore_info_get.c
+++ b/common_modules/module_lib/src/txm_semaphore_info_get.c
@@ -22,52 +22,55 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
-/* name Destination for the semaphore name*/
-/* current_value Destination for current value of */
-/* the semaphore */
-/* first_suspended Destination for pointer of first */
-/* thread suspended on semaphore */
-/* suspended_count Destination for suspended count */
-/* next_semaphore Destination for pointer to next */
-/* semaphore on the created list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
+/* name Destination for the semaphore name*/
+/* current_value Destination for current value of */
+/* the semaphore */
+/* first_suspended Destination for pointer of first */
+/* thread suspended on semaphore */
+/* suspended_count Destination for suspended count */
+/* next_semaphore Destination for pointer to next */
+/* semaphore on the created list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value, TX_THREAD **first_suspended, ULONG *suspended_count, TX_SEMAPHORE **next_semaphore)
@@ -87,3 +90,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_performance_info_get.c b/common_modules/module_lib/src/txm_semaphore_performance_info_get.c
index f227f84a..d294eaf7 100644
--- a/common_modules/module_lib/src/txm_semaphore_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_semaphore_performance_info_get.c
@@ -22,51 +22,54 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_semaphore_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_semaphore_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* semaphore. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* semaphore. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
/* puts Destination for the number of */
-/* puts on to this semaphore */
-/* gets Destination for the number of */
-/* gets on this semaphore */
-/* suspensions Destination for the number of */
-/* suspensions on this semaphore */
-/* timeouts Destination for number of timeouts*/
-/* on this semaphore */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* puts on to this semaphore */
+/* gets Destination for the number of */
+/* gets on this semaphore */
+/* suspensions Destination for the number of */
+/* suspensions on this semaphore */
+/* timeouts Destination for number of timeouts*/
+/* on this semaphore */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_semaphore_performance_info_get(TX_SEMAPHORE *semaphore_ptr, ULONG *puts, ULONG *gets, ULONG *suspensions, ULONG *timeouts)
@@ -85,3 +88,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_performance_system_info_get.c b/common_modules/module_lib/src/txm_semaphore_performance_system_info_get.c
index 372bab2c..faa2f877 100644
--- a/common_modules/module_lib/src/txm_semaphore_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_semaphore_performance_system_info_get.c
@@ -22,49 +22,52 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_semaphore_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_semaphore_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves system semaphore performance information. */
-/* */
-/* INPUT */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves system semaphore performance information. */
+/* */
+/* INPUT */
+/* */
/* puts Destination for total number of */
-/* semaphore puts */
-/* gets Destination for total number of */
-/* semaphore gets */
-/* suspensions Destination for total number of */
-/* semaphore suspensions */
-/* timeouts Destination for total number of */
-/* timeouts */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* semaphore puts */
+/* gets Destination for total number of */
+/* semaphore gets */
+/* suspensions Destination for total number of */
+/* semaphore suspensions */
+/* timeouts Destination for total number of */
+/* timeouts */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_semaphore_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions, ULONG *timeouts)
@@ -82,3 +85,4 @@ ALIGN_TYPE extra_parameters[2];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_prioritize.c b/common_modules/module_lib/src/txm_semaphore_prioritize.c
index 142696aa..c8e40279 100644
--- a/common_modules/module_lib/src/txm_semaphore_prioritize.c
+++ b/common_modules/module_lib/src/txm_semaphore_prioritize.c
@@ -22,42 +22,45 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_semaphore_prioritize PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_PRIORITIZE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_semaphore_prioritize PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore prioritize call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore prioritize call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_prioritize(TX_SEMAPHORE *semaphore_ptr)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_put.c b/common_modules/module_lib/src/txm_semaphore_put.c
index e92c459b..a59712a8 100644
--- a/common_modules/module_lib/src/txm_semaphore_put.c
+++ b/common_modules/module_lib/src/txm_semaphore_put.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_put PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_PUT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_put PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore put function call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore control block*/
-/* */
-/* OUTPUT */
-/* */
-/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore put function call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore control block*/
+/* */
+/* OUTPUT */
+/* */
+/* TX_SEMAPHORE_ERROR Invalid semaphore pointer */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_put(TX_SEMAPHORE *semaphore_ptr)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_semaphore_put_notify.c b/common_modules/module_lib/src/txm_semaphore_put_notify.c
index a58991e0..5b79df25 100644
--- a/common_modules/module_lib/src/txm_semaphore_put_notify.c
+++ b/common_modules/module_lib/src/txm_semaphore_put_notify.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_semaphore_put_notify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_SEMAPHORE_PUT_NOTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_semaphore_put_notify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the semaphore put notify */
-/* callback function call. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Pointer to semaphore */
-/* semaphore_put_notify Application callback function */
-/* (TX_NULL disables notify) */
-/* */
-/* OUTPUT */
-/* */
-/* status Service return status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the semaphore put notify */
+/* callback function call. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Pointer to semaphore */
+/* semaphore_put_notify Application callback function */
+/* (TX_NULL disables notify) */
+/* */
+/* OUTPUT */
+/* */
+/* status Service return status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_semaphore_put_notify(TX_SEMAPHORE *semaphore_ptr, VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr))
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_create.c b/common_modules/module_lib/src/txm_thread_create.c
index bd910e6b..e8cb124a 100644
--- a/common_modules/module_lib/src/txm_thread_create.c
+++ b/common_modules/module_lib/src/txm_thread_create.c
@@ -22,58 +22,61 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread create function call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Thread control block pointer */
-/* name Pointer to thread name string */
-/* entry_function Entry function of the thread */
-/* entry_input 32-bit input value to thread */
-/* stack_start Pointer to start of stack */
-/* stack_size Stack size in bytes */
-/* priority Priority of thread (0-31) */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread create function call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Thread control block pointer */
+/* name Pointer to thread name string */
+/* entry_function Entry function of the thread */
+/* entry_input 32-bit input value to thread */
+/* stack_start Pointer to start of stack */
+/* stack_size Stack size in bytes */
+/* priority Priority of thread (0-31) */
/* preempt_threshold Preemption threshold */
-/* time_slice Thread time-slice value */
-/* auto_start Automatic start selection */
+/* time_slice Thread time-slice value */
+/* auto_start Automatic start selection */
/* thread_control_block_size Size of thread control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_PTR_ERROR Invalid entry point or stack */
-/* address */
-/* TX_SIZE_ERROR Invalid stack size -too small */
-/* TX_PRIORITY_ERROR Invalid thread priority */
-/* TX_THRESH_ERROR Invalid preemption threshold */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_PTR_ERROR Invalid entry point or stack */
+/* address */
+/* TX_SIZE_ERROR Invalid stack size -too small */
+/* TX_PRIORITY_ERROR Invalid thread priority */
+/* TX_THRESH_ERROR Invalid preemption threshold */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr, VOID (*entry_function)(ULONG entry_input), ULONG entry_input, VOID *stack_start, ULONG stack_size, UINT priority, UINT preempt_threshold, ULONG time_slice, UINT auto_start, UINT thread_control_block_size)
@@ -98,3 +101,4 @@ ALIGN_TYPE extra_parameters[9];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_delete.c b/common_modules/module_lib/src/txm_thread_delete.c
index 6e1e6b76..8548d243 100644
--- a/common_modules/module_lib/src/txm_thread_delete.c
+++ b/common_modules/module_lib/src/txm_thread_delete.c
@@ -22,44 +22,47 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread delete function call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to suspend */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread delete function call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to suspend */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_delete(TX_THREAD *thread_ptr)
@@ -73,3 +76,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_entry_exit_notify.c b/common_modules/module_lib/src/txm_thread_entry_exit_notify.c
index e8fa8b08..ab652457 100644
--- a/common_modules/module_lib/src/txm_thread_entry_exit_notify.c
+++ b/common_modules/module_lib/src/txm_thread_entry_exit_notify.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_entry_exit_notify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_ENTRY_EXIT_NOTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_entry_exit_notify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread entry/exit notify */
-/* callback function call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread */
-/* thread_entry_exit_notify Pointer to notify callback */
-/* function, TX_NULL to disable*/
-/* */
-/* OUTPUT */
-/* */
-/* status Service return status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread entry/exit notify */
+/* callback function call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* thread_entry_exit_notify Pointer to notify callback */
+/* function, TX_NULL to disable*/
+/* */
+/* OUTPUT */
+/* */
+/* status Service return status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_entry_exit_notify(TX_THREAD *thread_ptr, VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type))
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_identify.c b/common_modules/module_lib/src/txm_thread_identify.c
index 7dc4297d..99b14a54 100644
--- a/common_modules/module_lib/src/txm_thread_identify.c
+++ b/common_modules/module_lib/src/txm_thread_identify.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_identify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_IDENTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_identify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function returns the control block pointer of the currently */
-/* executing thread. If the return value is NULL, no thread is */
-/* executing. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD * Pointer to control block of */
-/* currently executing thread */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function returns the control block pointer of the currently */
+/* executing thread. If the return value is NULL, no thread is */
+/* executing. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD * Pointer to control block of */
+/* currently executing thread */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
TX_THREAD *_tx_thread_identify(VOID)
@@ -74,3 +77,4 @@ TX_THREAD *return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_info_get.c b/common_modules/module_lib/src/txm_thread_info_get.c
index ca9f4684..4513dca3 100644
--- a/common_modules/module_lib/src/txm_thread_info_get.c
+++ b/common_modules/module_lib/src/txm_thread_info_get.c
@@ -22,55 +22,58 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread control block */
-/* name Destination for the thread name */
-/* state Destination for thread state */
-/* run_count Destination for thread run count */
-/* priority Destination for thread priority */
-/* preemption_threshold Destination for thread preemption-*/
-/* threshold */
-/* time_slice Destination for thread time-slice */
-/* next_thread Destination for next created */
-/* thread */
-/* next_suspended_thread Destination for next suspended */
-/* thread */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control block */
+/* name Destination for the thread name */
+/* state Destination for thread state */
+/* run_count Destination for thread run count */
+/* priority Destination for thread priority */
+/* preemption_threshold Destination for thread preemption-*/
+/* threshold */
+/* time_slice Destination for thread time-slice */
+/* next_thread Destination for next created */
+/* thread */
+/* next_suspended_thread Destination for next suspended */
+/* thread */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count, UINT *priority, UINT *preemption_threshold, ULONG *time_slice, TX_THREAD **next_thread, TX_THREAD **next_suspended_thread)
@@ -93,3 +96,4 @@ ALIGN_TYPE extra_parameters[7];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_interrupt_control.c b/common_modules/module_lib/src/txm_thread_interrupt_control.c
index 628672d3..1352fccc 100644
--- a/common_modules/module_lib/src/txm_thread_interrupt_control.c
+++ b/common_modules/module_lib/src/txm_thread_interrupt_control.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_control PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_INTERRUPT_CONTROL_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function is responsible for changing the interrupt lockout */
-/* posture of the system. */
-/* */
-/* INPUT */
-/* */
-/* new_posture New interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* status | old_posture Return status if feature not */
-/* enabled, old interrupt lockout */
-/* posture if feature enabled. */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* status | old_posture Return status if feature not */
+/* enabled, old interrupt lockout */
+/* posture if feature enabled. */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_thread_interrupt_control(UINT new_posture)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_performance_info_get.c b/common_modules/module_lib/src/txm_thread_performance_info_get.c
index c1cac33c..2b2fa4a1 100644
--- a/common_modules/module_lib/src/txm_thread_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_thread_performance_info_get.c
@@ -22,68 +22,71 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* thread. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread control block */
-/* resumptions Destination for number of times */
-/* thread was resumed */
-/* suspensions Destination for number of times */
-/* thread was suspended */
-/* solicited_preemptions Destination for number of times */
-/* thread called another service */
-/* that resulted in preemption */
-/* interrupt_preemptions Destination for number of times */
-/* thread was preempted by another */
-/* thread made ready in Interrupt */
-/* Service Routine (ISR) */
-/* priority_inversions Destination for number of times */
-/* a priority inversion was */
-/* detected for this thread */
-/* time_slices Destination for number of times */
-/* thread was time-sliced */
-/* relinquishes Destination for number of thread */
-/* relinquishes */
-/* timeouts Destination for number of timeouts*/
-/* for thread */
-/* wait_aborts Destination for number of wait */
-/* aborts for thread */
-/* last_preempted_by Destination for pointer of the */
-/* thread that last preempted this */
-/* thread */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* thread. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control block */
+/* resumptions Destination for number of times */
+/* thread was resumed */
+/* suspensions Destination for number of times */
+/* thread was suspended */
+/* solicited_preemptions Destination for number of times */
+/* thread called another service */
+/* that resulted in preemption */
+/* interrupt_preemptions Destination for number of times */
+/* thread was preempted by another */
+/* thread made ready in Interrupt */
+/* Service Routine (ISR) */
+/* priority_inversions Destination for number of times */
+/* a priority inversion was */
+/* detected for this thread */
+/* time_slices Destination for number of times */
+/* thread was time-sliced */
+/* relinquishes Destination for number of thread */
+/* relinquishes */
+/* timeouts Destination for number of timeouts*/
+/* for thread */
+/* wait_aborts Destination for number of wait */
+/* aborts for thread */
+/* last_preempted_by Destination for pointer of the */
+/* thread that last preempted this */
+/* thread */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions, ULONG *solicited_preemptions, ULONG *interrupt_preemptions, ULONG *priority_inversions, ULONG *time_slices, ULONG *relinquishes, ULONG *timeouts, ULONG *wait_aborts, TX_THREAD **last_preempted_by)
@@ -108,3 +111,4 @@ ALIGN_TYPE extra_parameters[9];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_performance_system_info_get.c b/common_modules/module_lib/src/txm_thread_performance_system_info_get.c
index 5e67e96e..7093a564 100644
--- a/common_modules/module_lib/src/txm_thread_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_thread_performance_system_info_get.c
@@ -22,68 +22,71 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves thread system performance information. */
-/* */
-/* INPUT */
-/* */
-/* resumptions Destination for total number of */
-/* thread resumptions */
-/* suspensions Destination for total number of */
-/* thread suspensions */
-/* solicited_preemptions Destination for total number of */
-/* thread preemption from thread */
-/* API calls */
-/* interrupt_preemptions Destination for total number of */
-/* thread preemptions as a result */
-/* of threads made ready inside of */
-/* Interrupt Service Routines */
-/* priority_inversions Destination for total number of */
-/* priority inversions */
-/* time_slices Destination for total number of */
-/* time-slices */
-/* relinquishes Destination for total number of */
-/* relinquishes */
-/* timeouts Destination for total number of */
-/* timeouts */
-/* wait_aborts Destination for total number of */
-/* wait aborts */
-/* non_idle_returns Destination for total number of */
-/* times threads return when */
-/* another thread is ready */
-/* idle_returns Destination for total number of */
-/* times threads return when no */
-/* other thread is ready */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves thread system performance information. */
+/* */
+/* INPUT */
+/* */
+/* resumptions Destination for total number of */
+/* thread resumptions */
+/* suspensions Destination for total number of */
+/* thread suspensions */
+/* solicited_preemptions Destination for total number of */
+/* thread preemption from thread */
+/* API calls */
+/* interrupt_preemptions Destination for total number of */
+/* thread preemptions as a result */
+/* of threads made ready inside of */
+/* Interrupt Service Routines */
+/* priority_inversions Destination for total number of */
+/* priority inversions */
+/* time_slices Destination for total number of */
+/* time-slices */
+/* relinquishes Destination for total number of */
+/* relinquishes */
+/* timeouts Destination for total number of */
+/* timeouts */
+/* wait_aborts Destination for total number of */
+/* wait aborts */
+/* non_idle_returns Destination for total number of */
+/* times threads return when */
+/* another thread is ready */
+/* idle_returns Destination for total number of */
+/* times threads return when no */
+/* other thread is ready */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_thread_performance_system_info_get(ULONG *resumptions, ULONG *suspensions, ULONG *solicited_preemptions, ULONG *interrupt_preemptions, ULONG *priority_inversions, ULONG *time_slices, ULONG *relinquishes, ULONG *timeouts, ULONG *wait_aborts, ULONG *non_idle_returns, ULONG *idle_returns)
@@ -108,3 +111,4 @@ ALIGN_TYPE extra_parameters[9];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_preemption_change.c b/common_modules/module_lib/src/txm_thread_preemption_change.c
index 9111c15f..5b6a7845 100644
--- a/common_modules/module_lib/src/txm_thread_preemption_change.c
+++ b/common_modules/module_lib/src/txm_thread_preemption_change.c
@@ -22,48 +22,51 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_preemption_change PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_PREEMPTION_CHANGE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_preemption_change PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the preemption threshold change */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread */
-/* new_threshold New preemption threshold */
-/* old_threshold Old preemption threshold */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_PTR_ERROR Invalid old threshold pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the preemption threshold change */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* new_threshold New preemption threshold */
+/* old_threshold Old preemption threshold */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_PTR_ERROR Invalid old threshold pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_preemption_change(TX_THREAD *thread_ptr, UINT new_threshold, UINT *old_threshold)
@@ -77,3 +80,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_priority_change.c b/common_modules/module_lib/src/txm_thread_priority_change.c
index 4ee47893..51d2b5f6 100644
--- a/common_modules/module_lib/src/txm_thread_priority_change.c
+++ b/common_modules/module_lib/src/txm_thread_priority_change.c
@@ -22,48 +22,51 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_priority_change PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_PRIORITY_CHANGE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_priority_change PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the change priority function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to suspend */
-/* new_priority New thread priority */
-/* old_priority Old thread priority */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_PTR_ERROR Invalid old priority pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the change priority function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to suspend */
+/* new_priority New thread priority */
+/* old_priority Old thread priority */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_PTR_ERROR Invalid old priority pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_priority_change(TX_THREAD *thread_ptr, UINT new_priority, UINT *old_priority)
@@ -77,3 +80,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_relinquish.c b/common_modules/module_lib/src/txm_thread_relinquish.c
index 8e34e235..83d4313d 100644
--- a/common_modules/module_lib/src/txm_thread_relinquish.c
+++ b/common_modules/module_lib/src/txm_thread_relinquish.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_relinquish PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_RELINQUISH_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_relinquish PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks to make sure a thread is executing before the */
-/* relinquish is executed. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks to make sure a thread is executing before the */
+/* relinquish is executed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _txe_thread_relinquish(VOID)
@@ -68,3 +71,4 @@ VOID _txe_thread_relinquish(VOID)
/* Call module manager dispatcher. */
(_txm_module_kernel_call_dispatcher)(TXM_THREAD_RELINQUISH_CALL, 0, 0, 0);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_reset.c b/common_modules/module_lib/src/txm_thread_reset.c
index 54ea25f7..6cae0bfe 100644
--- a/common_modules/module_lib/src/txm_thread_reset.c
+++ b/common_modules/module_lib/src/txm_thread_reset.c
@@ -22,44 +22,47 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_reset PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_RESET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_reset PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread reset function call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to reset */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Service return status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread reset function call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to reset */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Service return status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_reset(TX_THREAD *thread_ptr)
@@ -73,3 +76,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_resume.c b/common_modules/module_lib/src/txm_thread_resume.c
index 0c92b154..9ed41d11 100644
--- a/common_modules/module_lib/src/txm_thread_resume.c
+++ b/common_modules/module_lib/src/txm_thread_resume.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_resume PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_RESUME_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_resume PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the resume thread function call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to resume */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the resume thread function call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to resume */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_resume(TX_THREAD *thread_ptr)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_sleep.c b/common_modules/module_lib/src/txm_thread_sleep.c
index 2da82b7d..0e33cd43 100644
--- a/common_modules/module_lib/src/txm_thread_sleep.c
+++ b/common_modules/module_lib/src/txm_thread_sleep.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_sleep PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_SLEEP_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_sleep PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function handles application thread sleep requests. If the */
-/* sleep request was called from a non-thread, an error is returned. */
-/* */
-/* INPUT */
-/* */
-/* timer_ticks Number of timer ticks to sleep*/
-/* */
-/* OUTPUT */
-/* */
-/* status Return completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function handles application thread sleep requests. If the */
+/* sleep request was called from a non-thread, an error is returned. */
+/* */
+/* INPUT */
+/* */
+/* timer_ticks Number of timer ticks to sleep*/
+/* */
+/* OUTPUT */
+/* */
+/* status Return completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_thread_sleep(ULONG timer_ticks)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_stack_error_notify.c b/common_modules/module_lib/src/txm_thread_stack_error_notify.c
index fbad1da7..6946b234 100644
--- a/common_modules/module_lib/src/txm_thread_stack_error_notify.c
+++ b/common_modules/module_lib/src/txm_thread_stack_error_notify.c
@@ -22,47 +22,50 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_stack_error_notify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_STACK_ERROR_NOTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_error_notify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function registers an application stack error handler. If */
-/* ThreadX detects a stack error, this application handler is called. */
-/* */
-/* Note: stack checking must be enabled for this routine to serve any */
-/* purpose via the TX_ENABLE_STACK_CHECKING define. */
-/* */
-/* INPUT */
-/* */
-/* stack_error_handler Pointer to stack error */
-/* handler, TX_NULL to disable */
-/* */
-/* OUTPUT */
-/* */
-/* status Service return status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function registers an application stack error handler. If */
+/* ThreadX detects a stack error, this application handler is called. */
+/* */
+/* Note: stack checking must be enabled for this routine to serve any */
+/* purpose via the TX_ENABLE_STACK_CHECKING define. */
+/* */
+/* INPUT */
+/* */
+/* stack_error_handler Pointer to stack error */
+/* handler, TX_NULL to disable */
+/* */
+/* OUTPUT */
+/* */
+/* status Service return status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_thread_stack_error_notify(VOID (*stack_error_handler)(TX_THREAD *thread_ptr))
@@ -76,3 +79,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_suspend.c b/common_modules/module_lib/src/txm_thread_suspend.c
index a8a25ff1..a88f866c 100644
--- a/common_modules/module_lib/src/txm_thread_suspend.c
+++ b/common_modules/module_lib/src/txm_thread_suspend.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_suspend PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_SUSPEND_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_suspend PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread suspend function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to suspend */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread suspend function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to suspend */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_suspend(TX_THREAD *thread_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_terminate.c b/common_modules/module_lib/src/txm_thread_terminate.c
index 50acf59f..9947228c 100644
--- a/common_modules/module_lib/src/txm_thread_terminate.c
+++ b/common_modules/module_lib/src/txm_thread_terminate.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_terminate PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_TERMINATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_terminate PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread terminate function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to suspend */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread terminate function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to suspend */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_terminate(TX_THREAD *thread_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_time_slice_change.c b/common_modules/module_lib/src/txm_thread_time_slice_change.c
index 7183b4c8..29760f28 100644
--- a/common_modules/module_lib/src/txm_thread_time_slice_change.c
+++ b/common_modules/module_lib/src/txm_thread_time_slice_change.c
@@ -22,47 +22,50 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_time_slice_change PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_TIME_SLICE_CHANGE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_time_slice_change PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the time slice change function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread */
-/* new_time_slice New time slice */
-/* old_time_slice Old time slice */
-/* */
-/* OUTPUT */
-/* */
-/* TX_THREAD_ERROR Invalid thread pointer */
-/* TX_CALLER_ERROR Invalid caller of function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the time slice change function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* new_time_slice New time slice */
+/* old_time_slice Old time slice */
+/* */
+/* OUTPUT */
+/* */
+/* TX_THREAD_ERROR Invalid thread pointer */
+/* TX_CALLER_ERROR Invalid caller of function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_time_slice_change(TX_THREAD *thread_ptr, ULONG new_time_slice, ULONG *old_time_slice)
@@ -76,3 +79,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_thread_wait_abort.c b/common_modules/module_lib/src/txm_thread_wait_abort.c
index 3177165a..973c7983 100644
--- a/common_modules/module_lib/src/txm_thread_wait_abort.c
+++ b/common_modules/module_lib/src/txm_thread_wait_abort.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_thread_wait_abort PORTABLE C */
-/* 6.1 */
+#ifndef TXM_THREAD_WAIT_ABORT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_thread_wait_abort PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the thread wait abort function */
-/* call. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Thread to abort the wait on */
-/* */
-/* OUTPUT */
-/* */
-/* status Return completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the thread wait abort function */
+/* call. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Thread to abort the wait on */
+/* */
+/* OUTPUT */
+/* */
+/* status Return completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_thread_wait_abort(TX_THREAD *thread_ptr)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_time_get.c b/common_modules/module_lib/src/txm_time_get.c
index 0d4751c5..571565e2 100644
--- a/common_modules/module_lib/src/txm_time_get.c
+++ b/common_modules/module_lib/src/txm_time_get.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_time_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIME_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_time_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves the internal, free-running, system clock */
-/* and returns it to the caller. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* _tx_timer_system_clock Returns the system clock value */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves the internal, free-running, system clock */
+/* and returns it to the caller. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* _tx_timer_system_clock Returns the system clock value */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
ULONG _tx_time_get(VOID)
@@ -72,3 +75,4 @@ ULONG return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_time_set.c b/common_modules/module_lib/src/txm_time_set.c
index bb80fcb5..d93786ad 100644
--- a/common_modules/module_lib/src/txm_time_set.c
+++ b/common_modules/module_lib/src/txm_time_set.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_time_set PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIME_SET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_time_set PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function modifies the internal, free-running, system clock */
-/* as specified by the caller. */
-/* */
-/* INPUT */
-/* */
-/* new_time New time value */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function modifies the internal, free-running, system clock */
+/* as specified by the caller. */
+/* */
+/* INPUT */
+/* */
+/* new_time New time value */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_time_set(ULONG new_time)
@@ -68,3 +71,4 @@ VOID _tx_time_set(ULONG new_time)
/* Call module manager dispatcher. */
(_txm_module_kernel_call_dispatcher)(TXM_TIME_SET_CALL, (ALIGN_TYPE) new_time, 0, 0);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_activate.c b/common_modules/module_lib/src/txm_timer_activate.c
index 54d09673..22f716fd 100644
--- a/common_modules/module_lib/src/txm_timer_activate.c
+++ b/common_modules/module_lib/src/txm_timer_activate.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_timer_activate PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_ACTIVATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_timer_activate PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the activate application timer */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_TIMER_ERROR Invalid application timer */
-/* TX_ACTIVATE_ERROR Application timer already active */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the activate application timer */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_TIMER_ERROR Invalid application timer */
+/* TX_ACTIVATE_ERROR Application timer already active */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_timer_activate(TX_TIMER *timer_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_change.c b/common_modules/module_lib/src/txm_timer_change.c
index f3bf5c1a..a09162b9 100644
--- a/common_modules/module_lib/src/txm_timer_change.c
+++ b/common_modules/module_lib/src/txm_timer_change.c
@@ -22,48 +22,51 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_timer_change PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_CHANGE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_timer_change PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the application timer change */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* initial_ticks Initial expiration ticks */
-/* reschedule_ticks Reschedule ticks */
-/* */
-/* OUTPUT */
-/* */
-/* TX_TIMER_ERROR Invalid application timer pointer */
-/* TX_TICK_ERROR Invalid initial tick value of 0 */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the application timer change */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* initial_ticks Initial expiration ticks */
+/* reschedule_ticks Reschedule ticks */
+/* */
+/* OUTPUT */
+/* */
+/* TX_TIMER_ERROR Invalid application timer pointer */
+/* TX_TICK_ERROR Invalid initial tick value of 0 */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_timer_change(TX_TIMER *timer_ptr, ULONG initial_ticks, ULONG reschedule_ticks)
@@ -77,3 +80,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_create.c b/common_modules/module_lib/src/txm_timer_create.c
index 7e9958fd..d2a1f18f 100644
--- a/common_modules/module_lib/src/txm_timer_create.c
+++ b/common_modules/module_lib/src/txm_timer_create.c
@@ -22,53 +22,56 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_timer_create PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_CREATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_timer_create PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the create application timer */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* name_ptr Pointer to timer name */
-/* expiration_function Application expiration function */
-/* initial_ticks Initial expiration ticks */
-/* reschedule_ticks Reschedule ticks */
-/* auto_activate Automatic activation flag */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the create application timer */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* name_ptr Pointer to timer name */
+/* expiration_function Application expiration function */
+/* initial_ticks Initial expiration ticks */
+/* reschedule_ticks Reschedule ticks */
+/* auto_activate Automatic activation flag */
/* timer_control_block_size Size of timer control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_TIMER_ERROR Invalid timer control block */
-/* TX_TICK_ERROR Invalid initial expiration count */
-/* TX_ACTIVATE_ERROR Invalid timer activation option */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
+/* TX_TIMER_ERROR Invalid timer control block */
+/* TX_TICK_ERROR Invalid initial expiration count */
+/* TX_ACTIVATE_ERROR Invalid timer activation option */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr, VOID (*expiration_function)(ULONG), ULONG expiration_input, ULONG initial_ticks, ULONG reschedule_ticks, UINT auto_activate, UINT timer_control_block_size)
@@ -90,3 +93,4 @@ ALIGN_TYPE extra_parameters[6];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_deactivate.c b/common_modules/module_lib/src/txm_timer_deactivate.c
index 7ba926a9..e7344b30 100644
--- a/common_modules/module_lib/src/txm_timer_deactivate.c
+++ b/common_modules/module_lib/src/txm_timer_deactivate.c
@@ -22,44 +22,47 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_timer_deactivate PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_DEACTIVATE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_timer_deactivate PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the deactivate application timer */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_TIMER_ERROR Invalid application timer pointer */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the deactivate application timer */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_TIMER_ERROR Invalid application timer pointer */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_timer_deactivate(TX_TIMER *timer_ptr)
@@ -73,3 +76,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_delete.c b/common_modules/module_lib/src/txm_timer_delete.c
index 2127304f..f27174eb 100644
--- a/common_modules/module_lib/src/txm_timer_delete.c
+++ b/common_modules/module_lib/src/txm_timer_delete.c
@@ -22,45 +22,48 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_timer_delete PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_DELETE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_timer_delete PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the delete application timer */
-/* function call. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* */
-/* OUTPUT */
-/* */
-/* TX_TIMER_ERROR Invalid application timer pointer */
-/* TX_CALLER_ERROR Invalid caller of this function */
-/* status Actual completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the delete application timer */
+/* function call. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_TIMER_ERROR Invalid application timer pointer */
+/* TX_CALLER_ERROR Invalid caller of this function */
+/* status Actual completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_timer_delete(TX_TIMER *timer_ptr)
@@ -74,3 +77,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_info_get.c b/common_modules/module_lib/src/txm_timer_info_get.c
index f9f2c92d..50279f49 100644
--- a/common_modules/module_lib/src/txm_timer_info_get.c
+++ b/common_modules/module_lib/src/txm_timer_info_get.c
@@ -22,51 +22,54 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txe_timer_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txe_timer_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks for errors in the timer information get */
-/* service. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* name Destination for the timer name */
-/* active Destination for active flag */
-/* remaining_ticks Destination for remaining ticks */
-/* before expiration */
-/* reschedule_ticks Destination for reschedule ticks */
-/* next_timer Destination for next timer on the */
-/* created list */
-/* */
-/* OUTPUT */
-/* */
-/* TX_TIMER_ERROR Invalid timer pointer */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks for errors in the timer information get */
+/* service. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* name Destination for the timer name */
+/* active Destination for active flag */
+/* remaining_ticks Destination for remaining ticks */
+/* before expiration */
+/* reschedule_ticks Destination for reschedule ticks */
+/* next_timer Destination for next timer on the */
+/* created list */
+/* */
+/* OUTPUT */
+/* */
+/* TX_TIMER_ERROR Invalid timer pointer */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txe_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks, ULONG *reschedule_ticks, TX_TIMER **next_timer)
@@ -86,3 +89,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_performance_info_get.c b/common_modules/module_lib/src/txm_timer_performance_info_get.c
index b1900704..d2e243b3 100644
--- a/common_modules/module_lib/src/txm_timer_performance_info_get.c
+++ b/common_modules/module_lib/src/txm_timer_performance_info_get.c
@@ -22,54 +22,57 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_timer_performance_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_PERFORMANCE_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_performance_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves performance information from the specified */
-/* timer. */
-/* */
-/* INPUT */
-/* */
-/* timer_ptr Pointer to timer control block */
-/* activates Destination for the number of */
-/* activations of this timer */
-/* reactivates Destination for the number of */
-/* reactivations of this timer */
-/* deactivates Destination for the number of */
-/* deactivations of this timer */
-/* expirations Destination for the number of */
-/* expirations of this timer */
-/* expiration_adjusts Destination for the number of */
-/* expiration adjustments of this */
-/* timer */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves performance information from the specified */
+/* timer. */
+/* */
+/* INPUT */
+/* */
+/* timer_ptr Pointer to timer control block */
+/* activates Destination for the number of */
+/* activations of this timer */
+/* reactivates Destination for the number of */
+/* reactivations of this timer */
+/* deactivates Destination for the number of */
+/* deactivations of this timer */
+/* expirations Destination for the number of */
+/* expirations of this timer */
+/* expiration_adjusts Destination for the number of */
+/* expiration adjustments of this */
+/* timer */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_timer_performance_info_get(TX_TIMER *timer_ptr, ULONG *activates, ULONG *reactivates, ULONG *deactivates, ULONG *expirations, ULONG *expiration_adjusts)
@@ -89,3 +92,4 @@ ALIGN_TYPE extra_parameters[4];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_timer_performance_system_info_get.c b/common_modules/module_lib/src/txm_timer_performance_system_info_get.c
index 1a507602..678966fa 100644
--- a/common_modules/module_lib/src/txm_timer_performance_system_info_get.c
+++ b/common_modules/module_lib/src/txm_timer_performance_system_info_get.c
@@ -22,51 +22,54 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_timer_performance_system_info_get PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TIMER_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_performance_system_info_get PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function retrieves timer performance information. */
-/* */
-/* INPUT */
-/* */
-/* activates Destination for total number of */
-/* activations */
-/* reactivates Destination for total number of */
-/* reactivations */
-/* deactivates Destination for total number of */
-/* deactivations */
-/* expirations Destination for total number of */
-/* expirations */
-/* expiration_adjusts Destination for total number of */
-/* expiration adjustments */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function retrieves timer performance information. */
+/* */
+/* INPUT */
+/* */
+/* activates Destination for total number of */
+/* activations */
+/* reactivates Destination for total number of */
+/* reactivations */
+/* deactivates Destination for total number of */
+/* deactivations */
+/* expirations Destination for total number of */
+/* expirations */
+/* expiration_adjusts Destination for total number of */
+/* expiration adjustments */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_timer_performance_system_info_get(ULONG *activates, ULONG *reactivates, ULONG *deactivates, ULONG *expirations, ULONG *expiration_adjusts)
@@ -85,3 +88,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_buffer_full_notify.c b/common_modules/module_lib/src/txm_trace_buffer_full_notify.c
index f6641544..56658179 100644
--- a/common_modules/module_lib/src/txm_trace_buffer_full_notify.c
+++ b/common_modules/module_lib/src/txm_trace_buffer_full_notify.c
@@ -22,46 +22,49 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_buffer_full_notify PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_BUFFER_FULL_NOTIFY_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_buffer_full_notify PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function sets up the application callback function that is */
-/* called whenever the trace buffer becomes full. The application */
-/* can then swap to a new trace buffer in order not to lose any */
-/* events. */
-/* */
-/* INPUT */
-/* */
-/* full_buffer_callback Full trace buffer processing */
-/* function */
-/* */
-/* OUTPUT */
-/* */
-/* Completion Status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function sets up the application callback function that is */
+/* called whenever the trace buffer becomes full. The application */
+/* can then swap to a new trace buffer in order not to lose any */
+/* events. */
+/* */
+/* INPUT */
+/* */
+/* full_buffer_callback Full trace buffer processing */
+/* function */
+/* */
+/* OUTPUT */
+/* */
+/* Completion Status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_buffer_full_notify(VOID (*full_buffer_callback)(VOID *buffer))
@@ -75,3 +78,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_disable.c b/common_modules/module_lib/src/txm_trace_disable.c
index b958db9e..d13cfd5a 100644
--- a/common_modules/module_lib/src/txm_trace_disable.c
+++ b/common_modules/module_lib/src/txm_trace_disable.c
@@ -22,42 +22,45 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_disable PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_DISABLE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_disable PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function disables trace inside of ThreadX. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* Completion Status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function disables trace inside of ThreadX. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* Completion Status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_disable(VOID)
@@ -71,3 +74,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_enable.c b/common_modules/module_lib/src/txm_trace_enable.c
index 1960e0b4..09d96d26 100644
--- a/common_modules/module_lib/src/txm_trace_enable.c
+++ b/common_modules/module_lib/src/txm_trace_enable.c
@@ -22,46 +22,49 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_enable PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_ENABLE_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_enable PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function initializes the ThreadX trace buffer and the */
-/* associated control variables, enabling it for operation. */
-/* */
-/* INPUT */
-/* */
-/* trace_buffer_start Start of trace buffer */
-/* trace_buffer_size Size (bytes) of trace buffer */
-/* registry_entries Number of object registry */
-/* entries. */
-/* */
-/* OUTPUT */
-/* */
-/* Completion Status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function initializes the ThreadX trace buffer and the */
+/* associated control variables, enabling it for operation. */
+/* */
+/* INPUT */
+/* */
+/* trace_buffer_start Start of trace buffer */
+/* trace_buffer_size Size (bytes) of trace buffer */
+/* registry_entries Number of object registry */
+/* entries. */
+/* */
+/* OUTPUT */
+/* */
+/* Completion Status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_enable(VOID *trace_buffer_start, ULONG trace_buffer_size, ULONG registry_entries)
@@ -75,3 +78,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_event_filter.c b/common_modules/module_lib/src/txm_trace_event_filter.c
index 8a8b4dc9..b1df8557 100644
--- a/common_modules/module_lib/src/txm_trace_event_filter.c
+++ b/common_modules/module_lib/src/txm_trace_event_filter.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_event_filter PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_EVENT_FILTER_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_event_filter PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function sets up the event filter, which allows the */
-/* application to filter various trace events during run-time. */
-/* */
-/* INPUT */
-/* */
-/* event_filter_bits Trace filter event bit(s) */
-/* */
-/* OUTPUT */
-/* */
-/* Completion Status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function sets up the event filter, which allows the */
+/* application to filter various trace events during run-time. */
+/* */
+/* INPUT */
+/* */
+/* event_filter_bits Trace filter event bit(s) */
+/* */
+/* OUTPUT */
+/* */
+/* Completion Status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_event_filter(ULONG event_filter_bits)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_event_unfilter.c b/common_modules/module_lib/src/txm_trace_event_unfilter.c
index 5afa77a4..e579e5b5 100644
--- a/common_modules/module_lib/src/txm_trace_event_unfilter.c
+++ b/common_modules/module_lib/src/txm_trace_event_unfilter.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_event_unfilter PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_EVENT_UNFILTER_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_event_unfilter PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function removes the event filter, which allows the */
-/* application to un-filter various trace events during run-time. */
-/* */
-/* INPUT */
-/* */
-/* event_unfilter_bits Trace un-filter event bit(s) */
-/* */
-/* OUTPUT */
-/* */
-/* Completion Status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function removes the event filter, which allows the */
+/* application to un-filter various trace events during run-time. */
+/* */
+/* INPUT */
+/* */
+/* event_unfilter_bits Trace un-filter event bit(s) */
+/* */
+/* OUTPUT */
+/* */
+/* Completion Status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_event_unfilter(ULONG event_unfilter_bits)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_interrupt_control.c b/common_modules/module_lib/src/txm_trace_interrupt_control.c
index 6533ae28..d9bb5de4 100644
--- a/common_modules/module_lib/src/txm_trace_interrupt_control.c
+++ b/common_modules/module_lib/src/txm_trace_interrupt_control.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_interrupt_control PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_INTERRUPT_CONTROL_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_interrupt_control PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function provides a shell for the tx_interrupt_control */
-/* function so that a trace event can be logged for its use. */
-/* */
-/* INPUT */
-/* */
-/* new_posture New interrupt posture */
-/* */
-/* OUTPUT */
-/* */
-/* Previous Interrupt Posture */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function provides a shell for the tx_interrupt_control */
+/* function so that a trace event can be logged for its use. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt posture */
+/* */
+/* OUTPUT */
+/* */
+/* Previous Interrupt Posture */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_interrupt_control(UINT new_posture)
@@ -72,3 +75,4 @@ UINT return_value;
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_isr_enter_insert.c b/common_modules/module_lib/src/txm_trace_isr_enter_insert.c
index 730c8c9b..a57a2397 100644
--- a/common_modules/module_lib/src/txm_trace_isr_enter_insert.c
+++ b/common_modules/module_lib/src/txm_trace_isr_enter_insert.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_isr_enter_insert PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_ISR_ENTER_INSERT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_isr_enter_insert PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function provides inserts an ISR entry event into the trace */
-/* buffer. */
-/* */
-/* INPUT */
-/* */
-/* isr_id User defined ISR ID */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function provides inserts an ISR entry event into the trace */
+/* buffer. */
+/* */
+/* INPUT */
+/* */
+/* isr_id User defined ISR ID */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_trace_isr_enter_insert(ULONG isr_id)
@@ -68,3 +71,4 @@ VOID _tx_trace_isr_enter_insert(ULONG isr_id)
/* Call module manager dispatcher. */
(_txm_module_kernel_call_dispatcher)(TXM_TRACE_ISR_ENTER_INSERT_CALL, (ALIGN_TYPE) isr_id, 0, 0);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_isr_exit_insert.c b/common_modules/module_lib/src/txm_trace_isr_exit_insert.c
index 078831de..48d0340a 100644
--- a/common_modules/module_lib/src/txm_trace_isr_exit_insert.c
+++ b/common_modules/module_lib/src/txm_trace_isr_exit_insert.c
@@ -22,43 +22,46 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_isr_exit_insert PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_ISR_EXIT_INSERT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_isr_exit_insert PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function provides inserts an ISR exit event into the trace */
-/* buffer. */
-/* */
-/* INPUT */
-/* */
-/* isr_id User defined ISR ID */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function provides inserts an ISR exit event into the trace */
+/* buffer. */
+/* */
+/* INPUT */
+/* */
+/* isr_id User defined ISR ID */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_trace_isr_exit_insert(ULONG isr_id)
@@ -68,3 +71,4 @@ VOID _tx_trace_isr_exit_insert(ULONG isr_id)
/* Call module manager dispatcher. */
(_txm_module_kernel_call_dispatcher)(TXM_TRACE_ISR_EXIT_INSERT_CALL, (ALIGN_TYPE) isr_id, 0, 0);
}
+#endif
diff --git a/common_modules/module_lib/src/txm_trace_user_event_insert.c b/common_modules/module_lib/src/txm_trace_user_event_insert.c
index 56cc5554..592dab72 100644
--- a/common_modules/module_lib/src/txm_trace_user_event_insert.c
+++ b/common_modules/module_lib/src/txm_trace_user_event_insert.c
@@ -22,46 +22,49 @@
#define TXM_MODULE
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_trace_user_event_insert PORTABLE C */
-/* 6.1 */
+#ifndef TXM_TRACE_USER_EVENT_INSERT_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_trace_user_event_insert PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function inserts a user-defined event into the trace buffer. */
-/* */
-/* INPUT */
-/* */
-/* event_id User Event ID */
-/* info_field_1 First information field */
-/* info_field_2 First information field */
-/* info_field_3 First information field */
-/* info_field_4 First information field */
-/* */
-/* OUTPUT */
-/* */
-/* Completion Status */
-/* */
-/* CALLS */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function inserts a user-defined event into the trace buffer. */
+/* */
+/* INPUT */
+/* */
+/* event_id User Event ID */
+/* info_field_1 First information field */
+/* info_field_2 First information field */
+/* info_field_3 First information field */
+/* info_field_4 First information field */
+/* */
+/* OUTPUT */
+/* */
+/* Completion Status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_kernel_call_dispatcher */
-/* */
-/* CALLED BY */
-/* */
-/* Module application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Module application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _tx_trace_user_event_insert(ULONG event_id, ULONG info_field_1, ULONG info_field_2, ULONG info_field_3, ULONG info_field_4)
@@ -80,3 +83,4 @@ ALIGN_TYPE extra_parameters[3];
/* Return value to the caller. */
return(return_value);
}
+#endif
diff --git a/common_modules/module_manager/inc/txm_module_manager_dispatch.h b/common_modules/module_manager/inc/txm_module_manager_dispatch.h
index 644cc1c9..8cbd2f44 100644
--- a/common_modules/module_manager/inc/txm_module_manager_dispatch.h
+++ b/common_modules/module_manager/inc/txm_module_manager_dispatch.h
@@ -20,6 +20,7 @@
/**************************************************************************/
/**************************************************************************/
+#ifndef TXM_BLOCK_ALLOCATE_CALL_NOT_USED
/* UINT _txe_block_allocate(
TX_BLOCK_POOL *pool_ptr, -> param_0
VOID **block_ptr, -> param_1
@@ -46,7 +47,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_POOL_CREATE_CALL_NOT_USED
/* UINT _txe_block_pool_create(
TX_BLOCK_POOL *pool_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -85,7 +88,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_POOL_DELETE_CALL_NOT_USED
/* UINT _txe_block_pool_delete(
TX_BLOCK_POOL *pool_ptr -> param_0
); */
@@ -111,7 +116,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_POOL_INFO_GET_CALL_NOT_USED
/* UINT _txe_block_pool_info_get(
TX_BLOCK_POOL *pool_ptr, -> param_0
CHAR **name, -> param_1
@@ -164,7 +171,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_POOL_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_block_pool_performance_info_get(
TX_BLOCK_POOL *pool_ptr, -> param_0
ULONG *allocates, -> param_1
@@ -207,7 +216,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_block_pool_performance_system_info_get(
ULONG *allocates, -> param_0
ULONG *releases, -> param_1
@@ -245,7 +256,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_POOL_PRIORITIZE_CALL_NOT_USED
/* UINT _txe_block_pool_prioritize(
TX_BLOCK_POOL *pool_ptr -> param_0
); */
@@ -265,7 +278,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BLOCK_RELEASE_CALL_NOT_USED
/* UINT _txe_block_release(
VOID *block_ptr -> param_0
); */
@@ -304,7 +319,9 @@ ALIGN_TYPE block_header_start;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_ALLOCATE_CALL_NOT_USED
/* UINT _txe_byte_allocate(
TX_BYTE_POOL *pool_ptr, -> param_0
VOID **memory_ptr, -> param_1
@@ -336,7 +353,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_POOL_CREATE_CALL_NOT_USED
/* UINT _txe_byte_pool_create(
TX_BYTE_POOL *pool_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -373,7 +392,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_POOL_DELETE_CALL_NOT_USED
/* UINT _txe_byte_pool_delete(
TX_BYTE_POOL *pool_ptr -> param_0
); */
@@ -399,7 +420,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_POOL_INFO_GET_CALL_NOT_USED
/* UINT _txe_byte_pool_info_get(
TX_BYTE_POOL *pool_ptr, -> param_0
CHAR **name, -> param_1
@@ -452,7 +475,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_POOL_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_byte_pool_performance_info_get(
TX_BYTE_POOL *pool_ptr, -> param_0
ULONG *allocates, -> param_1
@@ -510,7 +535,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_byte_pool_performance_system_info_get(
ULONG *allocates, -> param_0
ULONG *releases, -> param_1
@@ -563,7 +590,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_POOL_PRIORITIZE_CALL_NOT_USED
/* UINT _txe_byte_pool_prioritize(
TX_BYTE_POOL *pool_ptr -> param_0
); */
@@ -583,7 +612,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_BYTE_RELEASE_CALL_NOT_USED
/* UINT _txe_byte_release(
VOID *memory_ptr -> param_0
); */
@@ -622,7 +653,9 @@ ALIGN_TYPE block_header_start;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_CREATE_CALL_NOT_USED
/* UINT _txe_event_flags_create(
TX_EVENT_FLAGS_GROUP *group_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -649,7 +682,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_DELETE_CALL_NOT_USED
/* UINT _txe_event_flags_delete(
TX_EVENT_FLAGS_GROUP *group_ptr -> param_0
); */
@@ -675,7 +710,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_GET_CALL_NOT_USED
/* UINT _txe_event_flags_get(
TX_EVENT_FLAGS_GROUP *group_ptr, -> param_0
ULONG requested_flags, -> param_1
@@ -709,7 +746,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_INFO_GET_CALL_NOT_USED
/* UINT _txe_event_flags_info_get(
TX_EVENT_FLAGS_GROUP *group_ptr, -> param_0
CHAR **name, -> param_1
@@ -757,7 +796,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_event_flags_performance_info_get(
TX_EVENT_FLAGS_GROUP *group_ptr, -> param_0
ULONG *sets, -> param_1
@@ -800,7 +841,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_event_flags_performance_system_info_get(
ULONG *sets, -> param_0
ULONG *gets, -> param_1
@@ -838,7 +881,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_SET_CALL_NOT_USED
/* UINT _txe_event_flags_set(
TX_EVENT_FLAGS_GROUP *group_ptr, -> param_0
ULONG flags_to_set, -> param_1
@@ -862,7 +907,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_EVENT_FLAGS_SET_NOTIFY_CALL_NOT_USED
/* UINT _txe_event_flags_set_notify(
TX_EVENT_FLAGS_GROUP *group_ptr, -> param_0
VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *) -> param_1
@@ -906,7 +953,9 @@ VOID (*events_set_notify)(TX_EVENT_FLAGS_GROUP *);
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_CREATE_CALL_NOT_USED
/* UINT _txe_mutex_create(
TX_MUTEX *mutex_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -938,7 +987,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_DELETE_CALL_NOT_USED
/* UINT _txe_mutex_delete(
TX_MUTEX *mutex_ptr -> param_0
); */
@@ -964,7 +1015,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_GET_CALL_NOT_USED
/* UINT _txe_mutex_get(
TX_MUTEX *mutex_ptr, -> param_0
ULONG wait_option -> param_1
@@ -986,7 +1039,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_INFO_GET_CALL_NOT_USED
/* UINT _txe_mutex_info_get(
TX_MUTEX *mutex_ptr, -> param_0
CHAR **name, -> param_1
@@ -1039,7 +1094,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_mutex_performance_info_get(
TX_MUTEX *mutex_ptr, -> param_0
ULONG *puts, -> param_1
@@ -1092,7 +1149,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_mutex_performance_system_info_get(
ULONG *puts, -> param_0
ULONG *gets, -> param_1
@@ -1140,7 +1199,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_PRIORITIZE_CALL_NOT_USED
/* UINT _txe_mutex_prioritize(
TX_MUTEX *mutex_ptr -> param_0
); */
@@ -1160,7 +1221,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MUTEX_PUT_CALL_NOT_USED
/* UINT _txe_mutex_put(
TX_MUTEX *mutex_ptr -> param_0
); */
@@ -1180,7 +1243,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_CREATE_CALL_NOT_USED
/* UINT _txe_queue_create(
TX_QUEUE *queue_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -1219,7 +1284,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_DELETE_CALL_NOT_USED
/* UINT _txe_queue_delete(
TX_QUEUE *queue_ptr -> param_0
); */
@@ -1245,7 +1312,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_FLUSH_CALL_NOT_USED
/* UINT _txe_queue_flush(
TX_QUEUE *queue_ptr -> param_0
); */
@@ -1265,7 +1334,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_FRONT_SEND_CALL_NOT_USED
/* UINT _txe_queue_front_send(
TX_QUEUE *queue_ptr, -> param_0
VOID *source_ptr, -> param_1
@@ -1283,7 +1354,7 @@ TX_QUEUE *queue_ptr;
return(TXM_MODULE_INVALID_MEMORY);
/* We need to get the size of the message from the queue. */
- queue_ptr = (TX_QUEUE *) param_0;
+ queue_ptr = (TX_QUEUE *) param_0;
if (!TXM_MODULE_MANAGER_PARAM_CHECK_BUFFER_READ(module_instance, param_1, queue_ptr -> tx_queue_message_size))
return(TXM_MODULE_INVALID_MEMORY);
}
@@ -1295,7 +1366,9 @@ TX_QUEUE *queue_ptr;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_INFO_GET_CALL_NOT_USED
/* UINT _txe_queue_info_get(
TX_QUEUE *queue_ptr, -> param_0
CHAR **name, -> param_1
@@ -1348,7 +1421,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_queue_performance_info_get(
TX_QUEUE *queue_ptr, -> param_0
ULONG *messages_sent, -> param_1
@@ -1401,7 +1476,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_queue_performance_system_info_get(
ULONG *messages_sent, -> param_0
ULONG *messages_received, -> param_1
@@ -1449,7 +1526,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_PRIORITIZE_CALL_NOT_USED
/* UINT _txe_queue_prioritize(
TX_QUEUE *queue_ptr -> param_0
); */
@@ -1469,7 +1548,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_RECEIVE_CALL_NOT_USED
/* UINT _txe_queue_receive(
TX_QUEUE *queue_ptr, -> param_0
VOID *destination_ptr, -> param_1
@@ -1487,7 +1568,7 @@ TX_QUEUE *queue_ptr;
return(TXM_MODULE_INVALID_MEMORY);
/* We need to get the max size of the buffer from the queue. */
- queue_ptr = (TX_QUEUE *) param_0;
+ queue_ptr = (TX_QUEUE *) param_0;
if (!TXM_MODULE_MANAGER_PARAM_CHECK_BUFFER_WRITE(module_instance, param_1, sizeof(ULONG)*queue_ptr -> tx_queue_message_size))
return(TXM_MODULE_INVALID_MEMORY);
}
@@ -1499,7 +1580,9 @@ TX_QUEUE *queue_ptr;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_SEND_CALL_NOT_USED
/* UINT _txe_queue_send(
TX_QUEUE *queue_ptr, -> param_0
VOID *source_ptr, -> param_1
@@ -1517,7 +1600,7 @@ TX_QUEUE *queue_ptr;
return(TXM_MODULE_INVALID_MEMORY);
/* We need to get the size of the message from the queue. */
- queue_ptr = (TX_QUEUE *) param_0;
+ queue_ptr = (TX_QUEUE *) param_0;
if (!TXM_MODULE_MANAGER_PARAM_CHECK_BUFFER_READ(module_instance, param_1, sizeof(ULONG)*queue_ptr -> tx_queue_message_size))
return(TXM_MODULE_INVALID_MEMORY);
}
@@ -1529,7 +1612,9 @@ TX_QUEUE *queue_ptr;
);
return(return_value);
}
+#endif
+#ifndef TXM_QUEUE_SEND_NOTIFY_CALL_NOT_USED
/* UINT _txe_queue_send_notify(
TX_QUEUE *queue_ptr, -> param_0
VOID (*queue_send_notify)(TX_QUEUE *notify_queue_ptr) -> param_1
@@ -1573,7 +1658,9 @@ VOID (*queue_send_notify)(TX_QUEUE *);
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_CEILING_PUT_CALL_NOT_USED
/* UINT _txe_semaphore_ceiling_put(
TX_SEMAPHORE *semaphore_ptr, -> param_0
ULONG ceiling -> param_1
@@ -1595,7 +1682,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_CREATE_CALL_NOT_USED
/* UINT _txe_semaphore_create(
TX_SEMAPHORE *semaphore_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -1627,7 +1716,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_DELETE_CALL_NOT_USED
/* UINT _txe_semaphore_delete(
TX_SEMAPHORE *semaphore_ptr -> param_0
); */
@@ -1653,7 +1744,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_GET_CALL_NOT_USED
/* UINT _txe_semaphore_get(
TX_SEMAPHORE *semaphore_ptr, -> param_0
ULONG wait_option -> param_1
@@ -1675,7 +1768,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_INFO_GET_CALL_NOT_USED
/* UINT _txe_semaphore_info_get(
TX_SEMAPHORE *semaphore_ptr, -> param_0
CHAR **name, -> param_1
@@ -1723,7 +1818,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_semaphore_performance_info_get(
TX_SEMAPHORE *semaphore_ptr, -> param_0
ULONG *puts, -> param_1
@@ -1766,7 +1863,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_semaphore_performance_system_info_get(
ULONG *puts, -> param_0
ULONG *gets, -> param_1
@@ -1804,7 +1903,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_PRIORITIZE_CALL_NOT_USED
/* UINT _txe_semaphore_prioritize(
TX_SEMAPHORE *semaphore_ptr -> param_0
); */
@@ -1824,7 +1925,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_PUT_CALL_NOT_USED
/* UINT _txe_semaphore_put(
TX_SEMAPHORE *semaphore_ptr -> param_0
); */
@@ -1844,7 +1947,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_SEMAPHORE_PUT_NOTIFY_CALL_NOT_USED
/* UINT _txe_semaphore_put_notify(
TX_SEMAPHORE *semaphore_ptr, -> param_0
VOID (*semaphore_put_notify)(TX_SEMAPHORE *notify_semaphore_ptr) -> param_1
@@ -1888,7 +1993,9 @@ VOID (*semaphore_put_notify)(TX_SEMAPHORE *);
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_CREATE_CALL_NOT_USED
/* UINT _txe_thread_create(
TX_THREAD *thread_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -1945,7 +2052,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_DELETE_CALL_NOT_USED
/* UINT _txe_thread_delete(
TX_THREAD *thread_ptr -> param_0
); */
@@ -1971,7 +2080,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_ENTRY_EXIT_NOTIFY_CALL_NOT_USED
/* UINT _txe_thread_entry_exit_notify(
TX_THREAD *thread_ptr, -> param_0
VOID (*thread_entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type) -> param_1
@@ -2020,7 +2131,9 @@ VOID (*thread_entry_exit_notify)(TX_THREAD *, UINT);
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_IDENTIFY_CALL_NOT_USED
/* TX_THREAD *_tx_thread_identify(); */
static ALIGN_TYPE _txm_module_manager_tx_thread_identify_dispatch(TXM_MODULE_INSTANCE *module_instance, ALIGN_TYPE param_0, ALIGN_TYPE param_1, ALIGN_TYPE *extra_parameters)
{
@@ -2030,7 +2143,9 @@ ALIGN_TYPE return_value;
return_value = (ALIGN_TYPE) _tx_thread_identify();
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_INFO_GET_CALL_NOT_USED
/* UINT _txe_thread_info_get(
TX_THREAD *thread_ptr, -> param_0
CHAR **name, -> param_1
@@ -2093,7 +2208,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_INTERRUPT_CONTROL_CALL_NOT_USED
/* UINT _tx_thread_interrupt_control(
UINT new_posture -> param_0
); */
@@ -2110,7 +2227,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_thread_performance_info_get(
TX_THREAD *thread_ptr, -> param_0
ULONG *resumptions, -> param_1
@@ -2183,7 +2302,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_thread_performance_system_info_get(
ULONG *resumptions, -> param_0
ULONG *suspensions, -> param_1
@@ -2256,7 +2377,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_PREEMPTION_CHANGE_CALL_NOT_USED
/* UINT _txe_thread_preemption_change(
TX_THREAD *thread_ptr, -> param_0
UINT new_threshold, -> param_1
@@ -2283,7 +2406,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_PRIORITY_CHANGE_CALL_NOT_USED
/* UINT _txe_thread_priority_change(
TX_THREAD *thread_ptr, -> param_0
UINT new_priority, -> param_1
@@ -2310,7 +2435,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_RELINQUISH_CALL_NOT_USED
/* VOID _txe_thread_relinquish(); */
static ALIGN_TYPE _txm_module_manager_tx_thread_relinquish_dispatch(TXM_MODULE_INSTANCE *module_instance, ALIGN_TYPE param_0, ALIGN_TYPE param_1, ALIGN_TYPE *extra_parameters)
{
@@ -2318,7 +2445,9 @@ static ALIGN_TYPE _txm_module_manager_tx_thread_relinquish_dispatch(TXM_MODULE_I
_txe_thread_relinquish();
return(TX_SUCCESS);
}
+#endif
+#ifndef TXM_THREAD_RESET_CALL_NOT_USED
/* UINT _txe_thread_reset(
TX_THREAD *thread_ptr -> param_0
); */
@@ -2338,7 +2467,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_RESUME_CALL_NOT_USED
/* UINT _txe_thread_resume(
TX_THREAD *thread_ptr -> param_0
); */
@@ -2358,7 +2489,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_SLEEP_CALL_NOT_USED
/* UINT _tx_thread_sleep(
ULONG timer_ticks -> param_0
); */
@@ -2372,7 +2505,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_STACK_ERROR_NOTIFY_CALL_NOT_USED
/* UINT _tx_thread_stack_error_notify(
VOID (*stack_error_handler)(TX_THREAD *thread_ptr) -> param_0
); */
@@ -2389,7 +2524,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_SUSPEND_CALL_NOT_USED
/* UINT _txe_thread_suspend(
TX_THREAD *thread_ptr -> param_0
); */
@@ -2409,7 +2546,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_SYSTEM_SUSPEND_CALL_NOT_USED
/* VOID _tx_thread_system_suspend(
TX_THREAD *thread_ptr -> param_0
); */
@@ -2464,7 +2603,9 @@ TX_THREAD *thread_ptr;
);
return(TX_SUCCESS);
}
+#endif
+#ifndef TXM_THREAD_TERMINATE_CALL_NOT_USED
/* UINT _txe_thread_terminate(
TX_THREAD *thread_ptr -> param_0
); */
@@ -2484,7 +2625,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_TIME_SLICE_CHANGE_CALL_NOT_USED
/* UINT _txe_thread_time_slice_change(
TX_THREAD *thread_ptr, -> param_0
ULONG new_time_slice, -> param_1
@@ -2511,7 +2654,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_THREAD_WAIT_ABORT_CALL_NOT_USED
/* UINT _txe_thread_wait_abort(
TX_THREAD *thread_ptr -> param_0
); */
@@ -2531,7 +2676,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TIME_GET_CALL_NOT_USED
/* ULONG _tx_time_get(); */
static ALIGN_TYPE _txm_module_manager_tx_time_get_dispatch(TXM_MODULE_INSTANCE *module_instance, ALIGN_TYPE param_0, ALIGN_TYPE param_1, ALIGN_TYPE *extra_parameters)
{
@@ -2541,7 +2688,9 @@ ALIGN_TYPE return_value;
return_value = (ALIGN_TYPE) _tx_time_get();
return(return_value);
}
+#endif
+#ifndef TXM_TIME_SET_CALL_NOT_USED
/* VOID _tx_time_set(
ULONG new_time -> param_0
); */
@@ -2553,7 +2702,9 @@ static ALIGN_TYPE _txm_module_manager_tx_time_set_dispatch(TXM_MODULE_INSTANCE *
);
return(TX_SUCCESS);
}
+#endif
+#ifndef TXM_TIMER_ACTIVATE_CALL_NOT_USED
/* UINT _txe_timer_activate(
TX_TIMER *timer_ptr -> param_0
); */
@@ -2573,7 +2724,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_CHANGE_CALL_NOT_USED
/* UINT _txe_timer_change(
TX_TIMER *timer_ptr, -> param_0
ULONG initial_ticks, -> param_1
@@ -2597,7 +2750,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_CREATE_CALL_NOT_USED
/* UINT _txe_timer_create(
TX_TIMER *timer_ptr, -> param_0
CHAR *name_ptr, -> param_1
@@ -2668,7 +2823,9 @@ VOID (*expiration_function)(ULONG);
}
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_DEACTIVATE_CALL_NOT_USED
/* UINT _txe_timer_deactivate(
TX_TIMER *timer_ptr -> param_0
); */
@@ -2688,7 +2845,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_DELETE_CALL_NOT_USED
/* UINT _txe_timer_delete(
TX_TIMER *timer_ptr -> param_0
); */
@@ -2714,7 +2873,9 @@ ALIGN_TYPE return_value;
}
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_INFO_GET_CALL_NOT_USED
/* UINT _txe_timer_info_get(
TX_TIMER *timer_ptr, -> param_0
CHAR **name, -> param_1
@@ -2762,7 +2923,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_PERFORMANCE_INFO_GET_CALL_NOT_USED
/* UINT _tx_timer_performance_info_get(
TX_TIMER *timer_ptr, -> param_0
ULONG *activates, -> param_1
@@ -2810,7 +2973,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TIMER_PERFORMANCE_SYSTEM_INFO_GET_CALL_NOT_USED
/* UINT _tx_timer_performance_system_info_get(
ULONG *activates, -> param_0
ULONG *reactivates, -> param_1
@@ -2853,7 +3018,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_BUFFER_FULL_NOTIFY_CALL_NOT_USED
/* UINT _tx_trace_buffer_full_notify(
VOID (*full_buffer_callback)(VOID *buffer) -> param_0
); */
@@ -2867,7 +3034,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_DISABLE_CALL_NOT_USED
/* UINT _tx_trace_disable(); */
static ALIGN_TYPE _txm_module_manager_tx_trace_disable_dispatch(TXM_MODULE_INSTANCE *module_instance, ALIGN_TYPE param_0, ALIGN_TYPE param_1, ALIGN_TYPE *extra_parameters)
{
@@ -2880,7 +3049,9 @@ ALIGN_TYPE return_value;
return_value = (ALIGN_TYPE) _tx_trace_disable();
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_ENABLE_CALL_NOT_USED
/* UINT _tx_trace_enable(
VOID *trace_buffer_start, -> param_0
ULONG trace_buffer_size, -> param_1
@@ -2901,7 +3072,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_EVENT_FILTER_CALL_NOT_USED
/* UINT _tx_trace_event_filter(
ULONG event_filter_bits -> param_0
); */
@@ -2915,7 +3088,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_EVENT_UNFILTER_CALL_NOT_USED
/* UINT _tx_trace_event_unfilter(
ULONG event_unfilter_bits -> param_0
); */
@@ -2929,7 +3104,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_INTERRUPT_CONTROL_CALL_NOT_USED
/* UINT _tx_trace_interrupt_control(
UINT new_posture -> param_0
); */
@@ -2946,7 +3123,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_TRACE_ISR_ENTER_INSERT_CALL_NOT_USED
/* VOID _tx_trace_isr_enter_insert(
ULONG isr_id -> param_0
); */
@@ -2961,7 +3140,9 @@ static ALIGN_TYPE _txm_module_manager_tx_trace_isr_enter_insert_dispatch(TXM_MOD
);
return(TX_SUCCESS);
}
+#endif
+#ifndef TXM_TRACE_ISR_EXIT_INSERT_CALL_NOT_USED
/* VOID _tx_trace_isr_exit_insert(
ULONG isr_id -> param_0
); */
@@ -2976,7 +3157,9 @@ static ALIGN_TYPE _txm_module_manager_tx_trace_isr_exit_insert_dispatch(TXM_MODU
);
return(TX_SUCCESS);
}
+#endif
+#ifndef TXM_TRACE_USER_EVENT_INSERT_CALL_NOT_USED
/* UINT _tx_trace_user_event_insert(
ULONG event_id, -> param_0
ULONG info_field_1, -> param_1
@@ -2994,7 +3177,7 @@ ALIGN_TYPE return_value;
if (!TXM_MODULE_MANAGER_ENSURE_INSIDE_MODULE_DATA(module_instance, (ALIGN_TYPE)extra_parameters, sizeof(ALIGN_TYPE[3])))
return(TXM_MODULE_INVALID_MEMORY);
}
-
+
return_value = (ALIGN_TYPE) _tx_trace_user_event_insert(
(ULONG) param_0,
(ULONG) param_1,
@@ -3004,7 +3187,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MODULE_OBJECT_ALLOCATE_CALL_NOT_USED
/* UINT _txm_module_object_allocate(
VOID **object_ptr, -> param_0
ULONG object_size -> param_1
@@ -3027,7 +3212,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MODULE_OBJECT_DEALLOCATE_CALL_NOT_USED
/* UINT _txm_module_object_deallocate(
VOID *object_ptr -> param_0
); */
@@ -3076,7 +3263,9 @@ ALIGN_TYPE object_pool_end;
);
return(return_value);
}
+#endif
+#ifndef TXM_MODULE_OBJECT_POINTER_GET_CALL_NOT_USED
/* UINT _txm_module_object_pointer_get(
UINT object_type, -> param_0
CHAR *name, -> param_1
@@ -3103,7 +3292,9 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
+#ifndef TXM_MODULE_OBJECT_POINTER_GET_EXTENDED_CALL_NOT_USED
/* UINT _txm_module_object_pointer_get_extended(
UINT object_type, -> param_0
CHAR *name, -> param_1
@@ -3135,3 +3326,4 @@ ALIGN_TYPE return_value;
);
return(return_value);
}
+#endif
diff --git a/common_modules/module_manager/src/txm_module_manager_application_request.c b/common_modules/module_manager/src/txm_module_manager_application_request.c
index 7e3491dd..53926e7a 100644
--- a/common_modules/module_manager/src/txm_module_manager_application_request.c
+++ b/common_modules/module_manager/src/txm_module_manager_application_request.c
@@ -10,70 +10,71 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
/* Include necessary system files. */
#include "tx_api.h"
#include "txm_module.h"
-
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_application_request PORTABLE C */
-/* 6.1 */
+#ifndef TXM_MODULE_APPLICATION_REQUEST_CALL_NOT_USED
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_application_request PORTABLE C */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes the application-specific module request. */
-/* The entire contents of the request structure is application */
-/* specific and thus the processing in this file is left to the */
-/* application to define. */
-/* */
-/* INPUT */
-/* */
-/* request_id Module request ID */
-/* param_1 First parameter */
-/* param_2 Second parameter */
-/* param_3 Third parameter */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the application-specific module request. */
+/* The entire contents of the request structure is application */
+/* specific and thus the processing in this file is left to the */
+/* application to define. */
+/* */
+/* INPUT */
+/* */
+/* request_id Module request ID */
+/* param_1 First parameter */
+/* param_2 Second parameter */
+/* param_3 Third parameter */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
UINT _txm_module_manager_application_request(ULONG request_id, ALIGN_TYPE param_1, ALIGN_TYPE param_2, ALIGN_TYPE param_3)
{
-
/* By default, simply return the status of not available. */
- return(TX_NOT_AVAILABLE);
+ return(TX_NOT_AVAILABLE);
}
-
+#endif
diff --git a/common_modules/module_manager/src/txm_module_manager_callback_request.c b/common_modules/module_manager/src/txm_module_manager_callback_request.c
index efb6f8b5..622f83a4 100644
--- a/common_modules/module_manager/src/txm_module_manager_callback_request.c
+++ b/common_modules/module_manager/src/txm_module_manager_callback_request.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -26,45 +26,45 @@
#include "tx_queue.h"
#include "tx_thread.h"
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_callback_request PORTABLE C */
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_callback_request PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function sends a notification callback function request to */
-/* the associated module. */
-/* */
-/* INPUT */
-/* */
-/* module_callback_queue Module callback request queue */
-/* callback_request Callback request */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* tx_queue_send Send module callback request */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function sends a notification callback function request to */
+/* the associated module. */
+/* */
+/* INPUT */
+/* */
+/* module_callback_queue Module callback request queue */
+/* callback_request Callback request */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* tx_queue_send Send module callback request */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
VOID _txm_module_manager_callback_request(TX_QUEUE *module_callback_queue, TXM_MODULE_CALLBACK_MESSAGE *callback_message)
@@ -80,26 +80,26 @@ UINT status;
/* Lockout interrupts. */
TX_DISABLE
-
+
/* Determine if the queue is valid. */
if ((module_callback_queue) && (module_callback_queue -> tx_queue_id == TX_QUEUE_ID))
{
/* Yes, the queue is valid. */
-
+
/* Pickup the current callback request in the queue. */
queued_message = (TXM_MODULE_CALLBACK_MESSAGE *) module_callback_queue -> tx_queue_read;
-
+
/* Pickup the number of items enqueued. */
enqueued = module_callback_queue -> tx_queue_enqueued;
-
+
/* Set the found flag to false. */
found = TX_FALSE;
/* Loop to look for duplicates in the queue. */
while (enqueued != 0)
{
-
+
/* Does this entry match the new callback message? */
if ((queued_message -> txm_module_callback_message_application_function == callback_message -> txm_module_callback_message_application_function) &&
(queued_message -> txm_module_callback_message_param_1 == callback_message -> txm_module_callback_message_param_1) &&
@@ -113,62 +113,62 @@ UINT status;
(queued_message -> txm_module_callback_message_reserved1 == callback_message -> txm_module_callback_message_reserved1) &&
(queued_message -> txm_module_callback_message_reserved2 == callback_message -> txm_module_callback_message_reserved2))
{
-
+
/* Update the activation count in the queued request. */
queued_message -> txm_module_callback_message_activation_count++;
-
+
/* Set the found flag to true. */
found = TX_TRUE;
-
+
/* Get out of the loop. */
break;
}
-
+
/* Decrease the number of messages to examine. */
enqueued--;
-
+
/* Move the callback message to the next message. */
queued_message++;
-
+
/* Check for wrap? */
if (((ULONG *) queued_message) >= module_callback_queue -> tx_queue_end)
{
-
+
/* Yes, set the queued message to the beginning of the queue. */
queued_message = (TXM_MODULE_CALLBACK_MESSAGE *) module_callback_queue -> tx_queue_start;
- }
+ }
}
/* Restore interrupts. */
TX_RESTORE
-
+
/* Determine if we need to send the new callback request. */
if (found == TX_FALSE)
{
-
+
/* Yes, send the message. */
status = _tx_queue_send(module_callback_queue, (VOID *) callback_message, TX_NO_WAIT);
-
+
/* Determine if an error was detected. */
if (status != TX_SUCCESS)
{
-
+
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
}
}
-
+
/* Increment the total number of callbacks. */
_txm_module_manager_callback_total_count++;
}
else
{
-
+
/* Module instance is not valid. */
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
-
+
/* Restore interrupts. */
TX_RESTORE
}
diff --git a/common_modules/module_manager/src/txm_module_manager_event_flags_notify_trampoline.c b/common_modules/module_manager/src/txm_module_manager_event_flags_notify_trampoline.c
index b86c9e32..7e86e7f7 100644
--- a/common_modules/module_manager/src/txm_module_manager_event_flags_notify_trampoline.c
+++ b/common_modules/module_manager/src/txm_module_manager_event_flags_notify_trampoline.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -27,45 +27,45 @@
#include "tx_event_flags.h"
#include "tx_thread.h"
#include "txm_module.h"
-
+
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_event_flags_notify_trampoline PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_event_flags_notify_trampoline PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes the event flags set notification call from */
-/* ThreadX. */
-/* */
-/* INPUT */
-/* */
-/* group_ptr Event flags group pointer */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_manager_callback_request Send module callback request */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the event flags set notification call from */
+/* ThreadX. */
+/* */
+/* INPUT */
+/* */
+/* group_ptr Event flags group pointer */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_manager_callback_request Send module callback request */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
VOID _txm_module_manager_event_flags_notify_trampoline(TX_EVENT_FLAGS_GROUP *group_ptr)
@@ -77,9 +77,9 @@ TXM_MODULE_INSTANCE *module_instance;
TXM_MODULE_CALLBACK_MESSAGE callback_message;
TX_QUEUE *module_callback_queue;
-
+
/* We now know the callback is for a module. */
-
+
/* Disable interrupts. */
TX_DISABLE
@@ -90,9 +90,9 @@ TX_QUEUE *module_callback_queue;
if ((module_instance) && (module_instance -> txm_module_instance_id == TXM_MODULE_ID) &&
(module_instance -> txm_module_instance_state == TXM_MODULE_STARTED))
{
-
- /* Yes, the module is still valid. */
-
+
+ /* Yes, the module is still valid. */
+
/* Pickup the module's callback message queue. */
module_callback_queue = &(module_instance -> txm_module_instance_callback_request_queue);
@@ -113,21 +113,21 @@ TX_QUEUE *module_callback_queue;
/* Restore interrupts. */
TX_RESTORE
-
- /* Call the general processing that will place the callback on the
+
+ /* Call the general processing that will place the callback on the
module's callback request queue. */
_txm_module_manager_callback_request(module_callback_queue, &callback_message);
}
else
{
-
+
/* Module no longer valid. */
-
+
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
/* Restore interrupts. */
TX_RESTORE
}
-}
+}
#endif
diff --git a/common_modules/module_manager/src/txm_module_manager_file_load.c b/common_modules/module_manager/src/txm_module_manager_file_load.c
index 34053393..b44a533b 100644
--- a/common_modules/module_manager/src/txm_module_manager_file_load.c
+++ b/common_modules/module_manager/src/txm_module_manager_file_load.c
@@ -80,7 +80,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_file_load(TXM_MODULE_INSTANCE *module_instance, CHAR *module_name, FX_MEDIA *media_ptr, CHAR *file_name)
diff --git a/common_modules/module_manager/src/txm_module_manager_in_place_load.c b/common_modules/module_manager/src/txm_module_manager_in_place_load.c
index ae482ec1..5826536f 100644
--- a/common_modules/module_manager/src/txm_module_manager_in_place_load.c
+++ b/common_modules/module_manager/src/txm_module_manager_in_place_load.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -31,46 +31,46 @@
#include "txm_module_manager_util.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_in_place_load PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_in_place_load PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
+/* DESCRIPTION */
+/* */
/* This function ensures the code-related parts of the module preamble */
/* are valid and calls _txm_module_manager_internal_load to load the */
/* data and prepare the module for execution. */
-/* */
-/* INPUT */
-/* */
-/* module_instance Module instance pointer */
-/* module_name Module name pointer */
-/* module_location Module code location */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
+/* */
+/* INPUT */
+/* */
+/* module_instance Module instance pointer */
+/* module_name Module name pointer */
+/* module_location Module code location */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
/* _txm_module_manager_internal_load Load data and prepare module for */
/* execution */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_in_place_load(TXM_MODULE_INSTANCE *module_instance, CHAR *module_name, VOID *module_location)
diff --git a/common_modules/module_manager/src/txm_module_manager_initialize.c b/common_modules/module_manager/src/txm_module_manager_initialize.c
index a9190454..8ea3e693 100644
--- a/common_modules/module_manager/src/txm_module_manager_initialize.c
+++ b/common_modules/module_manager/src/txm_module_manager_initialize.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
#define TX_MODULE_MANAGER_INIT
@@ -80,7 +80,7 @@ ULONG _txm_module_manger_loaded_count;
/* Define the ready flag, which is checked by other module manager APIs
to make sure the manager has been initialized. */
-
+
UINT _txm_module_manager_ready;
@@ -97,44 +97,44 @@ ULONG _txm_module_manager_callback_error_count;
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_initialize PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_initialize PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function initializes the module manager. */
-/* */
-/* INPUT */
-/* */
-/* module_memory_start Start of module area */
-/* module_memory_size Size in bytes of module area */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* _tx_byte_pool_create Create module memory byte pool */
-/* _tx_mutex_create Create module manager */
-/* protection mutex */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function initializes the module manager. */
+/* */
+/* INPUT */
+/* */
+/* module_memory_start Start of module area */
+/* module_memory_size Size in bytes of module area */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* _tx_byte_pool_create Create module memory byte pool */
+/* _tx_mutex_create Create module manager */
+/* protection mutex */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_initialize(VOID *module_memory_start, ULONG module_memory_size)
@@ -143,11 +143,11 @@ UINT _txm_module_manager_initialize(VOID *module_memory_start, ULONG module_mem
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != 0)
{
-
+
/* Now, make sure the call is from an interrupt and not initialization. */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
-
+
/* Invalid caller of this function, return appropriate error code. */
return(TX_CALLER_ERROR);
}
@@ -171,7 +171,7 @@ UINT _txm_module_manager_initialize(VOID *module_memory_start, ULONG module_mem
/* Create the module manager protection mutex. */
_tx_mutex_create(&_txm_module_manager_mutex, "Module Manager Protection Mutex", TX_NO_INHERIT);
- /* Create a byte pool for allocating RAM areas for modules. */
+ /* Create a byte pool for allocating RAM areas for modules. */
_tx_byte_pool_create(&_txm_module_manager_byte_pool, "Module Manager Byte Pool", module_memory_start, module_memory_size);
/* Indicate the module manager object pool has not been created. */
@@ -179,7 +179,7 @@ UINT _txm_module_manager_initialize(VOID *module_memory_start, ULONG module_mem
/* Mark the module manager as ready! */
_txm_module_manager_ready = TX_TRUE;
-
+
/* Return success. */
return(TX_SUCCESS);
}
diff --git a/common_modules/module_manager/src/txm_module_manager_internal_load.c b/common_modules/module_manager/src/txm_module_manager_internal_load.c
index 6b5ae54f..367b557e 100644
--- a/common_modules/module_manager/src/txm_module_manager_internal_load.c
+++ b/common_modules/module_manager/src/txm_module_manager_internal_load.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -31,49 +31,49 @@
#include "txm_module_manager_util.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_internal_load PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_internal_load PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function allocates data memory for module and prepares the */
-/* module for execution from the supplied code location. */
-/* */
-/* INPUT */
-/* */
-/* module_instance Module instance pointer */
-/* module_name Module name pointer */
-/* module_location Module code location */
-/* code_size Module code size */
-/* code_allocation_ptr Allocated code location */
-/* code_allocation_size Allocated code size */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* _tx_byte_allocate Allocate data area */
-/* _tx_mutex_get Get protection mutex */
-/* _tx_mutex_put Release protection mutex */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function allocates data memory for module and prepares the */
+/* module for execution from the supplied code location. */
+/* */
+/* INPUT */
+/* */
+/* module_instance Module instance pointer */
+/* module_name Module name pointer */
+/* module_location Module code location */
+/* code_size Module code size */
+/* code_allocation_ptr Allocated code location */
+/* code_allocation_size Allocated code size */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* _tx_byte_allocate Allocate data area */
+/* _tx_mutex_get Get protection mutex */
+/* _tx_mutex_put Release protection mutex */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_internal_load(TXM_MODULE_INSTANCE *module_instance, CHAR *module_name, VOID *module_location,
@@ -81,7 +81,7 @@ UINT _txm_module_manager_internal_load(TXM_MODULE_INSTANCE *module_instance, CH
{
TX_INTERRUPT_SAVE_AREA
-
+
TXM_MODULE_PREAMBLE *module_preamble;
TXM_MODULE_INSTANCE *next_module, *previous_module;
ULONG shell_function_adjust;
@@ -104,11 +104,11 @@ UINT status;
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != 0)
{
-
+
/* Now, make sure the call is from an interrupt and not initialization. */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
-
+
/* Invalid caller of this function, return appropriate error code. */
return(TX_CALLER_ERROR);
}
@@ -117,15 +117,15 @@ UINT status;
/* Determine if the module manager has not been initialized yet. */
if (_txm_module_manager_ready != TX_TRUE)
{
-
+
/* Module manager has not been initialized. */
- return(TX_NOT_AVAILABLE);
+ return(TX_NOT_AVAILABLE);
}
/* Determine if the module is valid. */
if (module_instance == TX_NULL)
{
-
+
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
@@ -150,7 +150,7 @@ UINT status;
/* Check to make sure there is a valid module to load. */
if (module_preamble -> txm_module_preamble_id != TXM_MODULE_ID)
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -175,12 +175,12 @@ UINT status;
/* Invalid properties. Return error. */
return(TXM_MODULE_INVALID_PROPERTIES);
}
-
+
/* Check for valid module entry offsets. */
if ((module_preamble -> txm_module_preamble_shell_entry_function == 0) ||
(module_preamble -> txm_module_preamble_start_function == 0))
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -194,7 +194,7 @@ UINT status;
(module_preamble -> txm_module_preamble_start_stop_stack_size == 0) ||
(module_preamble -> txm_module_preamble_callback_stack_size == 0))
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -227,11 +227,11 @@ UINT status;
/* Update the data size to account for the default thread stacks. */
TXM_MODULE_MANAGER_UTIL_MATH_ADD_ULONG(data_size, start_stop_stack_size, data_size);
TXM_MODULE_MANAGER_UTIL_MATH_ADD_ULONG(data_size, callback_stack_size, data_size);
-
+
/* Setup the default code and data alignments. */
data_alignment = (ULONG) TXM_MODULE_DATA_ALIGNMENT;
- /* Get the port-specific alignment for the data size. Note we only want data
+ /* Get the port-specific alignment for the data size. Note we only want data
so we pass values of 1 for code (to avoid any possible div by 0 errors). */
code_size_ignored = 1;
code_alignment_ignored = 1;
@@ -239,16 +239,16 @@ UINT status;
/* Calculate the module's total RAM memory requirement. This entire area is allocated from the module
manager's byte pool. The general layout is defined as follows:
-
+
Lowest Address: Start of start/stop thread stack
... [note: thread entry info is embedded near end of stack areas]
- End of start/stop thread stack
-
+ End of start/stop thread stack
+
Start of callback thread stack
... [note: thread entry info is embedded near end of stack areas]
- End of callback thread stack
-
- Module's Data Area
+ End of callback thread stack
+
+ Module's Data Area
...
End of Module's Data Area
Highest Address: */
@@ -258,11 +258,11 @@ UINT status;
/* Allocate memory for the module. */
status = _tx_byte_allocate(&_txm_module_manager_byte_pool, (VOID **) &memory_ptr, data_allocation_size, TX_NO_WAIT);
-
+
/* Determine if the module memory allocation was successful. */
if (status)
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -272,10 +272,10 @@ UINT status;
/* Clear the allocated memory. */
TX_MEMSET(memory_ptr, ((UCHAR) 0), data_allocation_size);
-
+
/* Disable interrupts. */
TX_DISABLE
-
+
/* Setup the module instance structure. */
module_instance -> txm_module_instance_id = TXM_MODULE_ID;
@@ -285,7 +285,7 @@ UINT status;
/* Save the module properties. */
module_instance -> txm_module_instance_property_flags = module_preamble -> txm_module_preamble_property_flags;
- /* Set the module data memory allocation. This is the address released
+ /* Set the module data memory allocation. This is the address released
when the module is unloaded. */
module_instance -> txm_module_instance_data_allocation_ptr = (VOID *) memory_ptr;
@@ -297,14 +297,14 @@ UINT status;
data_start = (data_start + (((ALIGN_TYPE)data_alignment) - 1)) & ~(((ALIGN_TYPE)data_alignment) - 1);
memory_ptr = (CHAR *) data_start;
module_instance -> txm_module_instance_data_start = (VOID *) memory_ptr;
-
+
/* Compute the end of the data memory allocation. */
module_instance -> txm_module_instance_data_end = (VOID *) (memory_ptr + (data_size - 1));
/* Save the size of the data area. */
module_instance -> txm_module_instance_data_size = data_size;
- /* Set the module code memory allocation. This is the address released
+ /* Set the module code memory allocation. This is the address released
when the module is unloaded. */
module_instance -> txm_module_instance_code_allocation_ptr = (VOID *) code_allocation_ptr;
@@ -329,7 +329,7 @@ UINT status;
/* Save the module application ID in the module instance. */
module_instance -> txm_module_instance_application_module_id = module_preamble -> txm_module_preamble_application_module_id;
-
+
/* Setup the module's start/stop thread stack area. */
module_instance -> txm_module_instance_start_stop_stack_start_address = (VOID *) (memory_ptr);
module_instance -> txm_module_instance_start_stop_stack_size = start_stop_stack_size;
@@ -358,36 +358,36 @@ UINT status;
/* Calculate the function adjustments based on the specific implementation of the module manager/module. */
TXM_MODULE_MANAGER_CALCULATE_ADJUSTMENTS(module_preamble -> txm_module_preamble_property_flags, shell_function_adjust, start_function_adjust, stop_function_adjust, callback_function_adjust)
- /* Build actual addresses based on load... Setup all the function pointers. Any adjustments needed to shell entry, start function, and callback function are defined in the
+ /* Build actual addresses based on load... Setup all the function pointers. Any adjustments needed to shell entry, start function, and callback function are defined in the
module preamble. */
- module_instance -> txm_module_instance_shell_entry_function = (VOID (*)(TX_THREAD *, TXM_MODULE_INSTANCE *)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
- (module_preamble -> txm_module_preamble_shell_entry_function) +
+ module_instance -> txm_module_instance_shell_entry_function = (VOID (*)(TX_THREAD *, TXM_MODULE_INSTANCE *)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
+ (module_preamble -> txm_module_preamble_shell_entry_function) +
(shell_function_adjust));
- module_instance -> txm_module_instance_start_thread_entry = (VOID (*)(ULONG)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
- (module_preamble -> txm_module_preamble_start_function) +
+ module_instance -> txm_module_instance_start_thread_entry = (VOID (*)(ULONG)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
+ (module_preamble -> txm_module_preamble_start_function) +
(start_function_adjust));
- module_instance -> txm_module_instance_callback_request_thread_entry = (VOID (*)(ULONG)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
+ module_instance -> txm_module_instance_callback_request_thread_entry = (VOID (*)(ULONG)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
(module_preamble -> txm_module_preamble_callback_function) +
- (callback_function_adjust));
+ (callback_function_adjust));
/* Determine if there is a stop function for this module. */
if (module_preamble -> txm_module_preamble_stop_function)
{
-
+
/* Yes, there is a stop function, build the address. */
- module_instance -> txm_module_instance_stop_thread_entry = (VOID (*)(ULONG)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
- (module_preamble -> txm_module_preamble_stop_function) +
+ module_instance -> txm_module_instance_stop_thread_entry = (VOID (*)(ULONG)) (((CHAR *) module_instance -> txm_module_instance_code_start) +
+ (module_preamble -> txm_module_preamble_stop_function) +
(stop_function_adjust));
}
else
{
-
+
/* No, there is no stop function. Just set the pointer to NULL. */
module_instance -> txm_module_instance_stop_thread_entry = TX_NULL;
}
-
+
/* Load the module control block with port-specific information. */
TXM_MODULE_MANAGER_MODULE_SETUP(module_instance);
-
+
/* Now add the module to the linked list of created modules. */
if (_txm_module_manger_loaded_count++ == 0)
{
@@ -410,7 +410,7 @@ UINT status;
/* Setup this module's created links. */
module_instance -> txm_module_instance_loaded_previous = previous_module;
- module_instance -> txm_module_instance_loaded_next = next_module;
+ module_instance -> txm_module_instance_loaded_next = next_module;
}
/* Restore interrupts. */
diff --git a/common_modules/module_manager/src/txm_module_manager_kernel_dispatch.c b/common_modules/module_manager/src/txm_module_manager_kernel_dispatch.c
index a0929621..6839194a 100644
--- a/common_modules/module_manager/src/txm_module_manager_kernel_dispatch.c
+++ b/common_modules/module_manager/src/txm_module_manager_kernel_dispatch.c
@@ -29,7 +29,7 @@
#include "tx_queue.h"
#include "tx_mutex.h"
#include "tx_semaphore.h"
-#include "tx_thread.h"
+#include "tx_thread.h"
#include "tx_timer.h"
#include "tx_trace.h"
#include "txm_module.h"
@@ -41,7 +41,7 @@
/* FUNCTION RELEASE */
/* */
/* _txm_module_manager_kernel_dispatch PORTABLE C */
-/* 6.1.6 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -89,6 +89,9 @@
/* added optional defines to */
/* remove unneeded functions, */
/* resulting in version 6.1.6 */
+/* 01-31-2022 Scott Larson Modified comments and added */
+/* CALL_NOT_USED option, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
ALIGN_TYPE _txm_module_manager_kernel_dispatch(ULONG kernel_request, ALIGN_TYPE param_0, ALIGN_TYPE param_1, ALIGN_TYPE param_2)
@@ -437,7 +440,7 @@ TXM_MODULE_INSTANCE *module_instance;
break;
}
#endif
-
+
#ifndef TXM_QUEUE_SEND_CALL_NOT_USED
case TXM_QUEUE_SEND_CALL:
{
@@ -888,14 +891,16 @@ TXM_MODULE_INSTANCE *module_instance;
return_value = (ALIGN_TYPE) _txm_module_manager_port_dispatch(module_instance, kernel_request, param_0, param_1, param_2);
}
#endif
-
- /* Determine if an application request is present. */
+
+ #ifndef TXM_MODULE_APPLICATION_REQUEST_CALL_NOT_USED
+ /* Determine if an application request is present. */
if (kernel_request >= TXM_APPLICATION_REQUEST_ID_BASE)
{
/* Yes, call the module manager function that the application defines in order to
support application-specific requests. */
return_value = (ALIGN_TYPE) _txm_module_manager_application_request(kernel_request-TXM_APPLICATION_REQUEST_ID_BASE, param_0, param_1, param_2);
}
+ #endif
#ifdef TXM_MODULE_ENABLE_NETX
/* Determine if there is a NetX request. */
diff --git a/common_modules/module_manager/src/txm_module_manager_maximum_module_priority_set.c b/common_modules/module_manager/src/txm_module_manager_maximum_module_priority_set.c
index d725faf2..94367983 100644
--- a/common_modules/module_manager/src/txm_module_manager_maximum_module_priority_set.c
+++ b/common_modules/module_manager/src/txm_module_manager_maximum_module_priority_set.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -27,43 +27,43 @@
#include "txm_module.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_maximum_module_priority_set PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_maximum_module_priority_set PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function sets the maximum thread priority allowed in a module. */
-/* */
-/* INPUT */
-/* */
-/* module_instance Module instance pointer */
-/* priority Maximum thread priority */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* _tx_mutex_get Get protection mutex */
-/* _tx_mutex_put Release protection mutex */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function sets the maximum thread priority allowed in a module. */
+/* */
+/* INPUT */
+/* */
+/* module_instance Module instance pointer */
+/* priority Maximum thread priority */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* _tx_mutex_get Get protection mutex */
+/* _tx_mutex_put Release protection mutex */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_maximum_module_priority_set(TXM_MODULE_INSTANCE *module_instance, UINT priority)
@@ -72,45 +72,45 @@ UINT _txm_module_manager_maximum_module_priority_set(TXM_MODULE_INSTANCE *module
if (_txm_module_manager_ready != TX_TRUE)
{
/* Module manager has not been initialized. */
- return(TX_NOT_AVAILABLE);
+ return(TX_NOT_AVAILABLE);
}
-
+
/* Determine if the module is valid. */
if (module_instance == TX_NULL)
{
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
-
+
/* Get module manager protection mutex. */
_tx_mutex_get(&_txm_module_manager_mutex, TX_WAIT_FOREVER);
-
+
/* Determine if the module instance is valid. */
if (module_instance -> txm_module_instance_id != TXM_MODULE_ID)
{
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
-
+
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
-
+
/* Determine if the module instance is in the loaded state. */
if ((module_instance -> txm_module_instance_state != TXM_MODULE_LOADED) && (module_instance -> txm_module_instance_state != TXM_MODULE_STOPPED))
{
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
-
+
/* Return error if the module is not ready. */
return(TX_START_ERROR);
}
-
-
+
+
/* Set module's maximum priority. */
module_instance->txm_module_instance_maximum_priority = priority;
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
-
+
return(TX_SUCCESS);
}
diff --git a/common_modules/module_manager/src/txm_module_manager_memory_load.c b/common_modules/module_manager/src/txm_module_manager_memory_load.c
index e35f01b9..8d35c554 100644
--- a/common_modules/module_manager/src/txm_module_manager_memory_load.c
+++ b/common_modules/module_manager/src/txm_module_manager_memory_load.c
@@ -71,7 +71,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_memory_load(TXM_MODULE_INSTANCE *module_instance, CHAR *module_name, VOID *module_location)
diff --git a/common_modules/module_manager/src/txm_module_manager_object_allocate.c b/common_modules/module_manager/src/txm_module_manager_object_allocate.c
index 829dbb1f..0482fbcf 100644
--- a/common_modules/module_manager/src/txm_module_manager_object_allocate.c
+++ b/common_modules/module_manager/src/txm_module_manager_object_allocate.c
@@ -28,7 +28,7 @@
/* */
/* FUNCTION RELEASE */
/* */
-/* _txm_module_manager_object_allocate PORTABLE C */
+/* _txm_module_manager_object_allocate PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
@@ -36,16 +36,16 @@
/* */
/* DESCRIPTION */
/* */
-/* This function allocates memory for an object from the memory pool */
-/* supplied to txm_module_manager_initialize. */
+/* This function allocates memory for an object from the memory pool */
+/* supplied to txm_module_manager_initialize. */
/* */
/* INPUT */
/* */
-/* object_ptr Destination of object pointer on */
-/* successful allocation */
-/* object_size Size in bytes of the object to be */
-/* allocated */
-/* module_instance The module instance that the */
+/* object_ptr Destination of object pointer on */
+/* successful allocation */
+/* object_size Size in bytes of the object to be */
+/* allocated */
+/* module_instance The module instance that the */
/* object belongs to */
/* */
/* OUTPUT */
@@ -66,7 +66,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_allocate(VOID **object_ptr_ptr, ULONG object_size, TXM_MODULE_INSTANCE *module_instance)
@@ -98,7 +98,7 @@ UINT return_value;
/* Allocate the object requested by the module - adding an extra ULONG in order to
store the module instance pointer. */
- return_value = (ULONG) _txe_byte_allocate(&_txm_module_manager_object_pool, (VOID **) &object_ptr,
+ return_value = (ULONG) _txe_byte_allocate(&_txm_module_manager_object_pool, (VOID **) &object_ptr,
(ULONG) (object_size + sizeof(TXM_MODULE_ALLOCATED_OBJECT)), TX_NO_WAIT);
/* Determine if the request was successful. */
diff --git a/common_modules/module_manager/src/txm_module_manager_object_deallocate.c b/common_modules/module_manager/src/txm_module_manager_object_deallocate.c
index 5b90b995..fd9fefdb 100644
--- a/common_modules/module_manager/src/txm_module_manager_object_deallocate.c
+++ b/common_modules/module_manager/src/txm_module_manager_object_deallocate.c
@@ -23,7 +23,7 @@
#define TX_SOURCE_CODE
#include "tx_api.h"
-#include "tx_thread.h"
+#include "tx_thread.h"
#include "txm_module.h"
/**************************************************************************/
@@ -38,11 +38,11 @@
/* */
/* DESCRIPTION */
/* */
-/* This function deallocates a previously allocated object. */
+/* This function deallocates a previously allocated object. */
/* */
/* INPUT */
/* */
-/* object_ptr Object pointer to deallocate */
+/* object_ptr Object pointer to deallocate */
/* */
/* OUTPUT */
/* */
@@ -63,7 +63,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_deallocate(VOID *object_ptr)
@@ -78,18 +78,18 @@ UINT return_value;
/* Determine if an object pool was created. */
if (_txm_module_manager_object_pool_created)
{
-
+
TXM_MODULE_ALLOCATED_OBJECT *next_object, *previous_object;
-
+
/* Pickup module instance pointer. */
module_instance = _tx_thread_current_ptr -> tx_thread_module_instance_ptr;
-
+
/* Setup the memory pointer. */
module_allocated_object_ptr = (TXM_MODULE_ALLOCATED_OBJECT *) object_ptr;
-
+
/* Position the object pointer backwards to position back to the module manager information. */
previous_object = module_allocated_object_ptr--;
-
+
/* Make sure the object is valid. */
if ((module_allocated_object_ptr == TX_NULL) || (module_allocated_object_ptr -> txm_module_allocated_object_module_instance != module_instance) || (module_instance -> txm_module_instance_object_list_count == 0))
{
@@ -98,8 +98,8 @@ UINT return_value;
}
else
{
-
- /* Unlink the node. */
+
+ /* Unlink the node. */
if ((--module_instance -> txm_module_instance_object_list_count) == 0)
{
/* Only allocated object, just set the allocated list to NULL. */
@@ -112,16 +112,16 @@ UINT return_value;
previous_object = module_allocated_object_ptr -> txm_module_allocated_object_previous;
next_object -> txm_module_allocated_object_previous = previous_object;
previous_object -> txm_module_allocated_object_next = next_object;
-
+
/* See if we have to update the allocated object list head pointer. */
if (module_instance -> txm_module_instance_object_list_head == module_allocated_object_ptr)
{
/* Yes, move the head pointer to the next link. */
- module_instance -> txm_module_instance_object_list_head = next_object;
+ module_instance -> txm_module_instance_object_list_head = next_object;
}
}
-
- /* Release the object memory. */
+
+ /* Release the object memory. */
return_value = (ULONG) _txe_byte_release((VOID *) module_allocated_object_ptr);
}
}
@@ -130,9 +130,9 @@ UINT return_value;
/* Set return value to not enabled. */
return_value = TX_NOT_AVAILABLE;
}
-
+
/* Release the protection mutex. */
_txe_mutex_put(&_txm_module_manager_mutex);
-
+
return(return_value);
}
diff --git a/common_modules/module_manager/src/txm_module_manager_object_pointer_get.c b/common_modules/module_manager/src/txm_module_manager_object_pointer_get.c
index 6a7927ea..dd58ff82 100644
--- a/common_modules/module_manager/src/txm_module_manager_object_pointer_get.c
+++ b/common_modules/module_manager/src/txm_module_manager_object_pointer_get.c
@@ -76,7 +76,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_pointer_get(UINT object_type, CHAR *name, VOID **object_ptr)
@@ -84,7 +84,7 @@ UINT _txm_module_manager_object_pointer_get(UINT object_type, CHAR *name, VOID
UINT status;
- /* Call the secure version of this function with the maximum length
+ /* Call the secure version of this function with the maximum length
possible since none was passed. */
status = _txm_module_manager_object_pointer_get_extended(object_type, name, TXM_MODULE_MANAGER_UTIL_MAX_VALUE_OF_TYPE_UNSIGNED(UINT), object_ptr);
return(status);
diff --git a/common_modules/module_manager/src/txm_module_manager_object_pointer_get_extended.c b/common_modules/module_manager/src/txm_module_manager_object_pointer_get_extended.c
index b62081ac..5ff38ccf 100644
--- a/common_modules/module_manager/src/txm_module_manager_object_pointer_get_extended.c
+++ b/common_modules/module_manager/src/txm_module_manager_object_pointer_get_extended.c
@@ -93,14 +93,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_pointer_get_extended(UINT object_type, CHAR *search_name, UINT search_name_length, VOID **object_ptr)
{
-
+
TX_INTERRUPT_SAVE_AREA
-
+
TX_THREAD *thread_ptr;
TX_TIMER *timer_ptr;
TX_QUEUE *queue_ptr;
@@ -117,7 +117,7 @@ TXM_MODULE_INSTANCE *module_instance;
/* Determine if the name or object pointer are NULL. */
if ((search_name == TX_NULL) || (object_ptr == TX_NULL))
{
-
+
/* Return error! */
return(TX_PTR_ERROR);
}
@@ -133,36 +133,36 @@ TXM_MODULE_INSTANCE *module_instance;
/* Temporarily disable preemption. This will keep other threads from creating and deleting threads. */
_tx_thread_preempt_disable++;
-
+
/* Restore interrupts. */
TX_RESTORE
-
+
/* Process relative to the object type. */
switch(object_type)
{
-
+
/* Determine if a thread object is requested. */
case TXM_THREAD_OBJECT:
- {
-
+ {
+
/* Loop to find the first matching thread. */
i = 0;
thread_ptr = _tx_thread_created_ptr;
while (i < _tx_thread_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, thread_ptr -> tx_thread_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) thread_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
@@ -171,32 +171,32 @@ TXM_MODULE_INSTANCE *module_instance;
}
break;
}
-
+
/* Determine if a timer object is requested. */
case TXM_TIMER_OBJECT:
{
-
+
/* Loop to find the first matching timer. */
i = 0;
timer_ptr = _tx_timer_created_ptr;
while (i < _tx_timer_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, timer_ptr -> tx_timer_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) timer_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
-
+
/* Move to next timer. */
timer_ptr = timer_ptr -> tx_timer_created_next;
}
@@ -212,22 +212,22 @@ TXM_MODULE_INSTANCE *module_instance;
queue_ptr = _tx_queue_created_ptr;
while (i < _tx_queue_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, queue_ptr -> tx_queue_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) queue_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
/* Increment the counter. */
i++;
-
+
/* Move to next queue. */
queue_ptr = queue_ptr -> tx_queue_created_next;
}
@@ -243,22 +243,22 @@ TXM_MODULE_INSTANCE *module_instance;
events_ptr = _tx_event_flags_created_ptr;
while (i < _tx_event_flags_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, events_ptr -> tx_event_flags_group_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) events_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
-
+
/* Move to next event flags group. */
events_ptr = events_ptr -> tx_event_flags_group_created_next;
}
@@ -274,22 +274,22 @@ TXM_MODULE_INSTANCE *module_instance;
semaphore_ptr = _tx_semaphore_created_ptr;
while (i < _tx_semaphore_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, semaphore_ptr -> tx_semaphore_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) semaphore_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
-
+
/* Move to next semaphore. */
semaphore_ptr = semaphore_ptr -> tx_semaphore_created_next;
}
@@ -305,22 +305,22 @@ TXM_MODULE_INSTANCE *module_instance;
mutex_ptr = _tx_mutex_created_ptr;
while (i < _tx_mutex_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, mutex_ptr -> tx_mutex_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) mutex_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
-
+
/* Move to next mutex. */
mutex_ptr = mutex_ptr -> tx_mutex_created_next;
}
@@ -353,22 +353,22 @@ TXM_MODULE_INSTANCE *module_instance;
block_pool_ptr = _tx_block_pool_created_ptr;
while (i < _tx_block_pool_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, block_pool_ptr -> tx_block_pool_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) block_pool_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
-
+
/* Move to next block pool. */
block_pool_ptr = block_pool_ptr -> tx_block_pool_created_next;
}
@@ -401,22 +401,22 @@ TXM_MODULE_INSTANCE *module_instance;
byte_pool_ptr = _tx_byte_pool_created_ptr;
while (i < _tx_byte_pool_created_count)
{
-
+
/* Do we have a match? */
if (_txm_module_manager_object_name_compare(search_name, search_name_length, byte_pool_ptr -> tx_byte_pool_name))
{
-
+
/* Yes, we found it - return the necessary info! */
*object_ptr = (VOID *) byte_pool_ptr;
-
+
/* Set the the status to success! */
- status = TX_SUCCESS;
+ status = TX_SUCCESS;
break;
}
-
+
/* Increment the counter. */
i++;
-
+
/* Move to next byte pool. */
byte_pool_ptr = byte_pool_ptr -> tx_byte_pool_created_next;
}
@@ -435,7 +435,7 @@ TXM_MODULE_INSTANCE *module_instance;
/* Determine if there is a NetX object get request. */
if ((object_type >= TXM_NETX_OBJECTS_START) && (object_type < TXM_NETX_OBJECTS_END))
{
-
+
/* Call the NetX module object get function. */
status = _txm_module_manager_netx_object_pointer_get(object_type, search_name, search_name_length, object_ptr);
}
@@ -446,7 +446,7 @@ TXM_MODULE_INSTANCE *module_instance;
/* Determine if there is a NetX Duo object get request. */
if ((object_type >= TXM_NETXDUO_OBJECTS_START) && (object_type < TXM_NETXDUO_OBJECTS_END))
{
-
+
/* Call the NetX Duo module object get function. */
status = _txm_module_manager_netxduo_object_pointer_get(object_type, search_name, search_name_length, object_ptr);
}
@@ -457,7 +457,7 @@ TXM_MODULE_INSTANCE *module_instance;
/* Determine if there is a FileX object get request. */
if ((object_type >= TXM_FILEX_OBJECTS_START) && (object_type < TXM_FILEX_OBJECTS_END))
{
-
+
/* Call the FileX module object get function. */
status = _txm_module_manager_filex_object_pointer_get(object_type, search_name, search_name_length, object_ptr);
}
@@ -469,7 +469,7 @@ TXM_MODULE_INSTANCE *module_instance;
/* Determine if there is a GUIX object get request. */
if ((object_type >= TXM_GUIX_OBJECTS_START) && (object_type < TXM_GUIX_OBJECTS_END))
{
-
+
/* Call the GUIX module object get function. */
status = _txm_module_manager_guix_object_pointer_get(object_type, search_name, search_name_length, object_ptr);
}
@@ -480,13 +480,13 @@ TXM_MODULE_INSTANCE *module_instance;
/* Determine if there is a USBX object get request. */
if ((object_type >= TXM_USBX_OBJECTS_START) && (object_type < TXM_USBX_OBJECTS_END))
{
-
+
/* Call the USBX object get function. */
status = _txm_module_manager_usbx_object_pointer_get(object_type, search_name, search_name_length, object_ptr);
}
#endif
- break;
+ break;
}
/* Disable interrupts. */
diff --git a/common_modules/module_manager/src/txm_module_manager_object_pool_create.c b/common_modules/module_manager/src/txm_module_manager_object_pool_create.c
index 25a91340..9090432a 100644
--- a/common_modules/module_manager/src/txm_module_manager_object_pool_create.c
+++ b/common_modules/module_manager/src/txm_module_manager_object_pool_create.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -26,56 +26,56 @@
#include "txm_module.h"
#include "tx_byte_pool.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* txm_module_manager_object_pool_create PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* txm_module_manager_object_pool_create PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function creates an object pool for the module manager, */
-/* which is used by modules to allocate system resources outside */
-/* the memory area of the module. This is especially useful in */
-/* memory protection. */
-/* */
-/* INPUT */
-/* */
-/* object_memory Object memory address */
-/* object_memory_size Size in bytes of memory area */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* _tx_byte_pool_create Create module memory byte pool */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function creates an object pool for the module manager, */
+/* which is used by modules to allocate system resources outside */
+/* the memory area of the module. This is especially useful in */
+/* memory protection. */
+/* */
+/* INPUT */
+/* */
+/* object_memory Object memory address */
+/* object_memory_size Size in bytes of memory area */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* _tx_byte_pool_create Create module memory byte pool */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_pool_create(VOID *object_memory, ULONG object_memory_size)
{
- /* Create a byte pool for allocating RAM areas for modules. */
+ /* Create a byte pool for allocating RAM areas for modules. */
_tx_byte_pool_create(&_txm_module_manager_object_pool, "Module Manager Object Pool", object_memory, object_memory_size);
/* Indicate the module manager object pool has been created. */
_txm_module_manager_object_pool_created = TX_TRUE;
-
+
/* Return success. */
return(TX_SUCCESS);
}
diff --git a/common_modules/module_manager/src/txm_module_manager_properties_get.c b/common_modules/module_manager/src/txm_module_manager_properties_get.c
index 3a91812d..d46afa75 100644
--- a/common_modules/module_manager/src/txm_module_manager_properties_get.c
+++ b/common_modules/module_manager/src/txm_module_manager_properties_get.c
@@ -10,57 +10,57 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
#include "txm_module.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_properties_get PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_properties_get PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function returns the properties of the specified module so they*/
-/* may be checked before executing the module. */
-/* */
-/* INPUT */
-/* */
-/* module_instance Module instance pointer */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function returns the properties of the specified module so they*/
+/* may be checked before executing the module. */
+/* */
+/* INPUT */
+/* */
+/* module_instance Module instance pointer */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_properties_get(TXM_MODULE_INSTANCE *module_instance, ULONG *module_properties_ptr)
@@ -69,15 +69,15 @@ UINT _txm_module_manager_properties_get(TXM_MODULE_INSTANCE *module_instance, U
/* Determine if the module manager has not been initialized yet. */
if (_txm_module_manager_ready != TX_TRUE)
{
-
+
/* Module manager has not been initialized. */
- return(TX_NOT_AVAILABLE);
+ return(TX_NOT_AVAILABLE);
}
/* Determine if the module is valid. */
if (module_instance == TX_NULL)
{
-
+
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
diff --git a/common_modules/module_manager/src/txm_module_manager_queue_notify_trampoline.c b/common_modules/module_manager/src/txm_module_manager_queue_notify_trampoline.c
index 436bec4e..453e88f0 100644
--- a/common_modules/module_manager/src/txm_module_manager_queue_notify_trampoline.c
+++ b/common_modules/module_manager/src/txm_module_manager_queue_notify_trampoline.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -28,41 +28,41 @@
#include "txm_module.h"
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_queue_notify_trampoline PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_queue_notify_trampoline PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes the queue notification call from ThreadX. */
-/* */
-/* INPUT */
-/* */
-/* queue_ptr Queue pointer */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_manager_callback_request Send module callback request */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the queue notification call from ThreadX. */
+/* */
+/* INPUT */
+/* */
+/* queue_ptr Queue pointer */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_manager_callback_request Send module callback request */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
VOID _txm_module_manager_queue_notify_trampoline(TX_QUEUE *queue_ptr)
@@ -74,9 +74,9 @@ TXM_MODULE_INSTANCE *module_instance;
TXM_MODULE_CALLBACK_MESSAGE callback_message;
TX_QUEUE *module_callback_queue;
-
+
/* We now know the callback is for a module. */
-
+
/* Disable interrupts. */
TX_DISABLE
@@ -87,9 +87,9 @@ TX_QUEUE *module_callback_queue;
if ((module_instance) && (module_instance -> txm_module_instance_id == TXM_MODULE_ID) &&
(module_instance -> txm_module_instance_state == TXM_MODULE_STARTED))
{
-
- /* Yes, the module is still valid. */
-
+
+ /* Yes, the module is still valid. */
+
/* Pickup the module's callback message queue. */
module_callback_queue = &(module_instance -> txm_module_instance_callback_request_queue);
@@ -107,26 +107,26 @@ TX_QUEUE *module_callback_queue;
callback_message.txm_module_callback_message_param_8 = 0;
callback_message.txm_module_callback_message_reserved1 = 0;
callback_message.txm_module_callback_message_reserved2 = 0;
-
+
/* Restore interrupts. */
TX_RESTORE
-
- /* Call the general processing that will place the callback on the
+
+ /* Call the general processing that will place the callback on the
module's callback request queue. */
_txm_module_manager_callback_request(module_callback_queue, &callback_message);
}
else
{
-
+
/* Module no longer valid. */
-
+
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
/* Restore interrupts. */
TX_RESTORE
}
-}
+}
#endif
-
+
diff --git a/common_modules/module_manager/src/txm_module_manager_semaphore_notify_trampoline.c b/common_modules/module_manager/src/txm_module_manager_semaphore_notify_trampoline.c
index e90fd28c..4992a0b8 100644
--- a/common_modules/module_manager/src/txm_module_manager_semaphore_notify_trampoline.c
+++ b/common_modules/module_manager/src/txm_module_manager_semaphore_notify_trampoline.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -28,42 +28,42 @@
#include "txm_module.h"
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_semaphore_notify_trampoline PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_semaphore_notify_trampoline PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes the semaphore put notification call from */
-/* ThreadX. */
-/* */
-/* INPUT */
-/* */
-/* semaphore_ptr Semaphore pointer */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_manager_callback_request Send module callback request */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the semaphore put notification call from */
+/* ThreadX. */
+/* */
+/* INPUT */
+/* */
+/* semaphore_ptr Semaphore pointer */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_manager_callback_request Send module callback request */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
VOID _txm_module_manager_semaphore_notify_trampoline(TX_SEMAPHORE *semaphore_ptr)
@@ -75,9 +75,9 @@ TXM_MODULE_INSTANCE *module_instance;
TXM_MODULE_CALLBACK_MESSAGE callback_message;
TX_QUEUE *module_callback_queue;
-
+
/* We now know the callback is for a module. */
-
+
/* Disable interrupts. */
TX_DISABLE
@@ -88,9 +88,9 @@ TX_QUEUE *module_callback_queue;
if ((module_instance) && (module_instance -> txm_module_instance_id == TXM_MODULE_ID) &&
(module_instance -> txm_module_instance_state == TXM_MODULE_STARTED))
{
-
- /* Yes, the module is still valid. */
-
+
+ /* Yes, the module is still valid. */
+
/* Pickup the module's callback message queue. */
module_callback_queue = &(module_instance -> txm_module_instance_callback_request_queue);
@@ -108,24 +108,24 @@ TX_QUEUE *module_callback_queue;
callback_message.txm_module_callback_message_param_8 = 0;
callback_message.txm_module_callback_message_reserved1 = 0;
callback_message.txm_module_callback_message_reserved2 = 0;
-
+
/* Restore interrupts. */
TX_RESTORE
-
- /* Call the general processing that will place the callback on the
+
+ /* Call the general processing that will place the callback on the
module's callback request queue. */
_txm_module_manager_callback_request(module_callback_queue, &callback_message);
}
else
{
-
+
/* Module no longer valid. */
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
-
+
/* Restore interrupts. */
TX_RESTORE
}
-}
+}
#endif
diff --git a/common_modules/module_manager/src/txm_module_manager_start.c b/common_modules/module_manager/src/txm_module_manager_start.c
index 365803b5..9d839624 100644
--- a/common_modules/module_manager/src/txm_module_manager_start.c
+++ b/common_modules/module_manager/src/txm_module_manager_start.c
@@ -75,22 +75,22 @@
/**************************************************************************/
UINT _txm_module_manager_start(TXM_MODULE_INSTANCE *module_instance)
{
-
+
UINT status;
/* Determine if the module manager has not been initialized yet. */
if (_txm_module_manager_ready != TX_TRUE)
{
-
+
/* Module manager has not been initialized. */
- return(TX_NOT_AVAILABLE);
+ return(TX_NOT_AVAILABLE);
}
/* Determine if the module is valid. */
if (module_instance == TX_NULL)
{
-
+
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
@@ -108,11 +108,11 @@ UINT status;
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
-
+
/* Determine if the module instance is in the loaded state. */
if ((module_instance -> txm_module_instance_state != TXM_MODULE_LOADED) && (module_instance -> txm_module_instance_state != TXM_MODULE_STOPPED))
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -124,7 +124,7 @@ UINT status;
if (module_instance -> txm_module_instance_start_stop_priority < module_instance -> txm_module_instance_maximum_priority ||
module_instance -> txm_module_instance_callback_priority < module_instance -> txm_module_instance_maximum_priority)
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -133,13 +133,13 @@ UINT status;
}
/* Create the module's callback request queue. */
- status = _tx_queue_create(&(module_instance -> txm_module_instance_callback_request_queue), "Module Callback Request Queue", (sizeof(TXM_MODULE_CALLBACK_MESSAGE)/sizeof(ULONG)),
+ status = _tx_queue_create(&(module_instance -> txm_module_instance_callback_request_queue), "Module Callback Request Queue", (sizeof(TXM_MODULE_CALLBACK_MESSAGE)/sizeof(ULONG)),
module_instance -> txm_module_instance_callback_request_queue_area, sizeof(module_instance -> txm_module_instance_callback_request_queue_area));
/* Determine if there was an error. */
if (status)
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -147,7 +147,7 @@ UINT status;
return(TX_START_ERROR);
}
- /* Create the module start thread. */
+ /* Create the module start thread. */
status = _txm_module_manager_thread_create(&(module_instance -> txm_module_instance_start_stop_thread),
"Module Start Thread",
module_instance -> txm_module_instance_shell_entry_function,
@@ -158,14 +158,14 @@ UINT status;
(UINT) module_instance -> txm_module_instance_start_stop_priority,
(UINT) module_instance -> txm_module_instance_start_stop_priority,
TXM_MODULE_TIME_SLICE,
- TX_DONT_START,
+ TX_DONT_START,
sizeof(TX_THREAD),
module_instance);
-
+
/* Determine if the thread create was successful. */
if (status != TX_SUCCESS)
{
-
+
/* Delete the callback notification queue. */
_tx_queue_delete(&(module_instance -> txm_module_instance_callback_request_queue));
@@ -173,10 +173,10 @@ UINT status;
_tx_mutex_put(&_txm_module_manager_mutex);
/* Return the error status. */
- return(status);
+ return(status);
}
- /* Create the module callback thread. */
+ /* Create the module callback thread. */
status = _txm_module_manager_thread_create(&(module_instance -> txm_module_instance_callback_request_thread),
"Module Callback Request Thread",
module_instance -> txm_module_instance_shell_entry_function,
@@ -187,20 +187,20 @@ UINT status;
(UINT) module_instance -> txm_module_instance_callback_priority,
(UINT) module_instance -> txm_module_instance_callback_priority,
TX_NO_TIME_SLICE,
- TX_DONT_START,
+ TX_DONT_START,
sizeof(TX_THREAD),
module_instance);
-
+
/* Determine if the thread create was successful. */
if (status != TX_SUCCESS)
{
/* Terminate the start thread. */
_tx_thread_terminate(&(module_instance -> txm_module_instance_start_stop_thread));
-
+
/* Delete the start thread. */
_tx_thread_delete(&(module_instance -> txm_module_instance_start_stop_thread));
-
+
/* Delete the callback notification queue. */
_tx_queue_delete(&(module_instance -> txm_module_instance_callback_request_queue));
@@ -208,7 +208,7 @@ UINT status;
_tx_mutex_put(&_txm_module_manager_mutex);
/* Return the error status. */
- return(status);
+ return(status);
}
diff --git a/common_modules/module_manager/src/txm_module_manager_thread_notify_trampoline.c b/common_modules/module_manager/src/txm_module_manager_thread_notify_trampoline.c
index d70ff718..daff01ce 100644
--- a/common_modules/module_manager/src/txm_module_manager_thread_notify_trampoline.c
+++ b/common_modules/module_manager/src/txm_module_manager_thread_notify_trampoline.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -28,43 +28,43 @@
#include "txm_module.h"
#ifndef TX_DISABLE_NOTIFY_CALLBACKS
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_thread_notify_trampoline PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_thread_notify_trampoline PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes the thread entry/exit notification call */
-/* from ThreadX. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Thread pointer */
-/* type Entry or exit type */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_manager_callback_request Send module callback request */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the thread entry/exit notification call */
+/* from ThreadX. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Thread pointer */
+/* type Entry or exit type */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_manager_callback_request Send module callback request */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
VOID _txm_module_manager_thread_notify_trampoline(TX_THREAD *thread_ptr, UINT type)
@@ -77,9 +77,9 @@ TXM_MODULE_CALLBACK_MESSAGE callback_message;
TX_QUEUE *module_callback_queue;
TXM_MODULE_THREAD_ENTRY_INFO *thread_info;
-
+
/* We now know the callback is for a module. */
-
+
/* Disable interrupts. */
TX_DISABLE
@@ -97,9 +97,9 @@ TXM_MODULE_THREAD_ENTRY_INFO *thread_info;
if ((module_instance) && (module_instance -> txm_module_instance_id == TXM_MODULE_ID) &&
(module_instance -> txm_module_instance_state == TXM_MODULE_STARTED))
{
-
- /* Yes, the module is still valid. */
-
+
+ /* Yes, the module is still valid. */
+
/* Pickup the module's callback message queue. */
module_callback_queue = &(module_instance -> txm_module_instance_callback_request_queue);
@@ -117,19 +117,19 @@ TXM_MODULE_THREAD_ENTRY_INFO *thread_info;
callback_message.txm_module_callback_message_param_8 = 0;
callback_message.txm_module_callback_message_reserved1 = 0;
callback_message.txm_module_callback_message_reserved2 = 0;
-
+
/* Restore interrupts. */
TX_RESTORE
-
- /* Call the general processing that will place the callback on the
+
+ /* Call the general processing that will place the callback on the
module's callback request queue. */
_txm_module_manager_callback_request(module_callback_queue, &callback_message);
}
else
{
-
+
/* Module no longer valid. */
-
+
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
@@ -139,15 +139,15 @@ TXM_MODULE_THREAD_ENTRY_INFO *thread_info;
}
else
{
-
+
/* Thread pointer is not valid. */
-
+
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
/* Restore interrupts. */
TX_RESTORE
}
-}
+}
#endif
-
+
diff --git a/common_modules/module_manager/src/txm_module_manager_thread_reset.c b/common_modules/module_manager/src/txm_module_manager_thread_reset.c
index 0fd0b2af..1a9d2ac8 100644
--- a/common_modules/module_manager/src/txm_module_manager_thread_reset.c
+++ b/common_modules/module_manager/src/txm_module_manager_thread_reset.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -27,45 +27,45 @@
#include "tx_thread.h"
#include "txm_module.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_thread_reset PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_thread_reset PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function prepares the thread to run again from the entry */
-/* point specified during thread creation. The application must */
-/* call tx_thread_resume after this call completes for the thread */
-/* to actually run. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread to reset */
-/* */
-/* OUTPUT */
-/* */
-/* status Service return status */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_manager_thread_stack_build Build initial thread */
-/* stack */
-/* */
-/* CALLED BY */
-/* */
-/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function prepares the thread to run again from the entry */
+/* point specified during thread creation. The application must */
+/* call tx_thread_resume after this call completes for the thread */
+/* to actually run. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread to reset */
+/* */
+/* OUTPUT */
+/* */
+/* status Service return status */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_manager_thread_stack_build Build initial thread */
+/* stack */
+/* */
+/* CALLED BY */
+/* */
+/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_thread_reset(TX_THREAD *thread_ptr)
@@ -101,7 +101,7 @@ TXM_MODULE_THREAD_ENTRY_INFO *thread_entry_info;
{
/* Check for proper status of this thread to reset. */
- if (thread_ptr -> tx_thread_state != TX_COMPLETED)
+ if (thread_ptr -> tx_thread_state != TX_COMPLETED)
{
/* Now check for terminated state. */
@@ -139,18 +139,18 @@ TXM_MODULE_THREAD_ENTRY_INFO *thread_entry_info;
#endif
/* Setup pointer to the thread entry information structure, which will live at the top of each
- module thread's stack. This will allow the module thread entry function to avoid direct
+ module thread's stack. This will allow the module thread entry function to avoid direct
access to the actual thread control block. */
thread_entry_info = (TXM_MODULE_THREAD_ENTRY_INFO *) (((UCHAR *) thread_ptr -> tx_thread_stack_end) + (2*sizeof(ULONG)) + 1);
thread_entry_info = (TXM_MODULE_THREAD_ENTRY_INFO *) (((ALIGN_TYPE)(thread_entry_info)) & (~0x3));
-
+
/* Place the thread entry information pointer in the thread control block so it can be picked up
in the following stack build function. This is supplied to the module's shell entry function
to avoid direct access to the actual thread control block. Note that this is overwritten
with the actual stack pointer at the end of stack build. */
thread_ptr -> tx_thread_stack_ptr = (VOID *) thread_entry_info;
-
- /* Call the target specific stack frame building routine to build the
+
+ /* Call the target specific stack frame building routine to build the
thread's initial stack and to setup the actual stack pointer in the
control block. */
_txm_module_manager_thread_stack_build(thread_ptr, module_instance -> txm_module_instance_shell_entry_function);
diff --git a/common_modules/module_manager/src/txm_module_manager_timer_notify_trampoline.c b/common_modules/module_manager/src/txm_module_manager_timer_notify_trampoline.c
index 9bcdd5d9..a7566c92 100644
--- a/common_modules/module_manager/src/txm_module_manager_timer_notify_trampoline.c
+++ b/common_modules/module_manager/src/txm_module_manager_timer_notify_trampoline.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -27,43 +27,43 @@
#include "tx_thread.h"
#include "tx_timer.h"
#include "txm_module.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_timer_notify_trampoline PORTABLE C */
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_timer_notify_trampoline PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function processes the timer expirations from ThreadX. */
-/* */
-/* INPUT */
-/* */
-/* id Timer ID */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _txm_module_manager_callback_request Send module callback request */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the timer expirations from ThreadX. */
+/* */
+/* INPUT */
+/* */
+/* id Timer ID */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _txm_module_manager_callback_request Send module callback request */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
VOID _txm_module_manager_timer_notify_trampoline(ULONG id)
@@ -77,20 +77,20 @@ TX_QUEUE *module_callback_queue;
TX_TIMER *timer_ptr;
CHAR *internal_ptr;
-
+
/* We now know the callback is for a module. */
-
+
/* Disable interrupts. */
TX_DISABLE
-
- /* Our expired timer pointer points to the internal timer,
+
+ /* Our expired timer pointer points to the internal timer,
* we need to get to the full timer pointer. */
/* Pickup the current internal timer pointer. */
- internal_ptr = (CHAR *) _tx_timer_expired_timer_ptr;
-
+ internal_ptr = (CHAR *) _tx_timer_expired_timer_ptr;
+
/* Get the timer pointer from the internal pointer. */
TX_USER_TIMER_POINTER_GET((TX_TIMER_INTERNAL *) internal_ptr, timer_ptr);
-
+
/* Pickup the module instance pointer. */
module_instance = (TXM_MODULE_INSTANCE *) timer_ptr -> tx_timer_module_instance;
@@ -98,9 +98,9 @@ CHAR *internal_ptr;
if ((module_instance) && (module_instance -> txm_module_instance_id == TXM_MODULE_ID) &&
(module_instance -> txm_module_instance_state == TXM_MODULE_STARTED))
{
-
- /* Yes, the module is still valid. */
-
+
+ /* Yes, the module is still valid. */
+
/* Pickup the module's callback message queue. */
module_callback_queue = &(module_instance -> txm_module_instance_callback_request_queue);
@@ -118,24 +118,24 @@ CHAR *internal_ptr;
callback_message.txm_module_callback_message_param_8 = 0;
callback_message.txm_module_callback_message_reserved1 = 0;
callback_message.txm_module_callback_message_reserved2 = 0;
-
+
/* Restore interrupts. */
TX_RESTORE
-
- /* Call the general processing that will place the callback on the
+
+ /* Call the general processing that will place the callback on the
module's callback request queue. */
_txm_module_manager_callback_request(module_callback_queue, &callback_message);
}
else
{
-
+
/* Module no longer valid. */
/* Error, increment the error counter and return. */
_txm_module_manager_callback_error_count++;
-
+
/* Restore interrupts. */
TX_RESTORE
}
-}
-
+}
+
diff --git a/common_modules/module_manager/src/txm_module_manager_unload.c b/common_modules/module_manager/src/txm_module_manager_unload.c
index 4f83f726..fd5f88af 100644
--- a/common_modules/module_manager/src/txm_module_manager_unload.c
+++ b/common_modules/module_manager/src/txm_module_manager_unload.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
#define TX_SOURCE_CODE
@@ -30,50 +30,50 @@
#include "txm_module.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_unload PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_unload PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function unloads a previously loaded module. */
-/* */
-/* INPUT */
-/* */
-/* module_instance Module instance pointer */
-/* */
-/* OUTPUT */
-/* */
-/* status Completion status */
-/* */
-/* CALLS */
-/* */
-/* _tx_byte_release Release data area */
-/* _tx_mutex_get Get protection mutex */
-/* _tx_mutex_put Release protection mutex */
-/* */
-/* CALLED BY */
-/* */
-/* Application code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function unloads a previously loaded module. */
+/* */
+/* INPUT */
+/* */
+/* module_instance Module instance pointer */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* _tx_byte_release Release data area */
+/* _tx_mutex_get Get protection mutex */
+/* _tx_mutex_put Release protection mutex */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_unload(TXM_MODULE_INSTANCE *module_instance)
{
TX_INTERRUPT_SAVE_AREA
-
+
TXM_MODULE_INSTANCE *next_module, *previous_module;
CHAR *memory_ptr;
@@ -81,11 +81,11 @@ CHAR *memory_ptr;
/* Check for interrupt call. */
if (TX_THREAD_GET_SYSTEM_STATE() != 0)
{
-
+
/* Now, make sure the call is from an interrupt and not initialization. */
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
-
+
/* Invalid caller of this function, return appropriate error code. */
return(TX_CALLER_ERROR);
}
@@ -94,15 +94,15 @@ CHAR *memory_ptr;
/* Determine if the module manager has not been initialized yet. */
if (_txm_module_manager_ready != TX_TRUE)
{
-
+
/* Module manager has not been initialized. */
- return(TX_NOT_AVAILABLE);
+ return(TX_NOT_AVAILABLE);
}
/* Determine if the module is valid. */
if (module_instance == TX_NULL)
{
-
+
/* Invalid module pointer. */
return(TX_PTR_ERROR);
}
@@ -124,7 +124,7 @@ CHAR *memory_ptr;
/* Determine if the module instance is in the state. */
if ((module_instance -> txm_module_instance_state != TXM_MODULE_LOADED) && (module_instance -> txm_module_instance_state != TXM_MODULE_STOPPED))
{
-
+
/* Release the protection mutex. */
_tx_mutex_put(&_txm_module_manager_mutex);
@@ -141,14 +141,14 @@ CHAR *memory_ptr;
/* Determine if there was memory allocated for the code. */
if (module_instance -> txm_module_instance_code_allocation_ptr)
{
-
+
/* Yes, release the module's code memory. */
memory_ptr = module_instance -> txm_module_instance_code_allocation_ptr;
/* Release the module's data memory. */
_tx_byte_release(memory_ptr);
}
-
+
/* Temporarily disable interrupts. */
TX_DISABLE
@@ -158,7 +158,7 @@ CHAR *memory_ptr;
/* Call port-specific unload function. */
TXM_MODULE_MANAGER_MODULE_UNLOAD(module_instance);
-
+
/* Remove the module from the linked list of loaded modules. */
/* See if the module is the only one on the list. */
@@ -180,9 +180,9 @@ CHAR *memory_ptr;
/* See if we have to update the created list head pointer. */
if (_txm_module_manager_loaded_list_ptr == module_instance)
{
-
+
/* Yes, move the head pointer to the next link. */
- _txm_module_manager_loaded_list_ptr = next_module;
+ _txm_module_manager_loaded_list_ptr = next_module;
}
}
diff --git a/common_modules/module_manager/src/txm_module_manager_util.c b/common_modules/module_manager/src/txm_module_manager_util.c
index 8511fce6..bd6c79eb 100644
--- a/common_modules/module_manager/src/txm_module_manager_util.c
+++ b/common_modules/module_manager/src/txm_module_manager_util.c
@@ -10,15 +10,15 @@
/**************************************************************************/
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** Module Manager */
-/** */
-/**************************************************************************/
-/**************************************************************************/
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
/* Include necessary system files. */
@@ -29,115 +29,115 @@
#include "txm_module_manager_util.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_object_memory_check PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_object_memory_check PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks if the object is inside a module's object pool */
-/* or, if it's a privileged module, inside the module's data area. */
-/* */
-/* INPUT */
-/* */
-/* module_instance Module instance that the object */
-/* belongs to */
-/* object_ptr Pointer to object to check */
-/* object_size Size of the object to check */
-/* */
-/* OUTPUT */
-/* */
-/* status Whether the object resides in a */
-/* valid location */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks if the object is inside a module's object pool */
+/* or, if it's a privileged module, inside the module's data area. */
+/* */
+/* INPUT */
+/* */
+/* module_instance Module instance that the object */
+/* belongs to */
+/* object_ptr Pointer to object to check */
+/* object_size Size of the object to check */
+/* */
+/* OUTPUT */
+/* */
+/* status Whether the object resides in a */
+/* valid location */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_memory_check(TXM_MODULE_INSTANCE *module_instance, ALIGN_TYPE object_ptr, ULONG object_size)
{
-
+
/* Is the object pointer from the module manager's object pool? */
- if ((_txm_module_manager_object_pool_created == TX_TRUE) &&
- (object_ptr >= (ALIGN_TYPE) _txm_module_manager_object_pool.tx_byte_pool_start) &&
+ if ((_txm_module_manager_object_pool_created == TX_TRUE) &&
+ (object_ptr >= (ALIGN_TYPE) _txm_module_manager_object_pool.tx_byte_pool_start) &&
((object_ptr+object_size) <= (ALIGN_TYPE) (_txm_module_manager_object_pool.tx_byte_pool_start + _txm_module_manager_object_pool.tx_byte_pool_size)))
{
/* Object is from manager object pool. */
return(TX_SUCCESS);
}
-
+
/* If memory protection is not required, check if object is in module data. */
else if (!(module_instance -> txm_module_instance_property_flags & TXM_MODULE_MEMORY_PROTECTION))
{
- if ((object_ptr >= (ALIGN_TYPE) module_instance -> txm_module_instance_data_start) &&
+ if ((object_ptr >= (ALIGN_TYPE) module_instance -> txm_module_instance_data_start) &&
((object_ptr+object_size) <= (ALIGN_TYPE) module_instance -> txm_module_instance_data_end))
{
/* Object is from the local module memory. */
return(TX_SUCCESS);
}
}
-
+
/* Object is from invalid memory. */
return(TXM_MODULE_INVALID_MEMORY);
-
+
}
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_created_object_check PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_created_object_check PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
+/* DESCRIPTION */
+/* */
/* This functions checks if the specified object was created by the */
-/* specified module */
-/* */
-/* INPUT */
-/* */
-/* module_instance The module instance to check */
-/* object_ptr The object to check */
-/* */
-/* OUTPUT */
-/* */
-/* status Whether the module created the */
-/* object */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* txm_module_manager*_stop Module manager stop functions */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* specified module */
+/* */
+/* INPUT */
+/* */
+/* module_instance The module instance to check */
+/* object_ptr The object to check */
+/* */
+/* OUTPUT */
+/* */
+/* status Whether the module created the */
+/* object */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* txm_module_manager*_stop Module manager stop functions */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UCHAR _txm_module_manager_created_object_check(TXM_MODULE_INSTANCE *module_instance, VOID *object_ptr)
@@ -179,51 +179,51 @@ TXM_MODULE_ALLOCATED_OBJECT *allocated_object_ptr;
}
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_object_size_check PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_object_size_check PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function checks if the specified object's size matches what is */
-/* inside the object pool. */
-/* */
-/* INPUT */
-/* */
-/* object_ptr Pointer to object to check */
-/* object_size Size of the object to check */
-/* */
-/* OUTPUT */
-/* */
-/* status Whether the object's size matches */
-/* what's inside the object pool */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function checks if the specified object's size matches what is */
+/* inside the object pool. */
+/* */
+/* INPUT */
+/* */
+/* object_ptr Pointer to object to check */
+/* object_size Size of the object to check */
+/* */
+/* OUTPUT */
+/* */
+/* status Whether the object's size matches */
+/* what's inside the object pool */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _txm_module_manager_kernel_dispatch Kernel dispatch function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_size_check(ALIGN_TYPE object_ptr, ULONG object_size)
{
TXM_MODULE_ALLOCATED_OBJECT *module_allocated_object_ptr;
UINT return_value;
-
+
/* Pickup the allocated object pointer. */
module_allocated_object_ptr = ((TXM_MODULE_ALLOCATED_OBJECT *) object_ptr) - 1;
@@ -232,50 +232,50 @@ UINT return_value;
return_value = TX_SUCCESS;
else
return_value = TXM_MODULE_INVALID_MEMORY;
-
+
return(return_value);
}
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_name_compare PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_name_compare PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function compares the specified object names. */
-/* */
-/* INPUT */
-/* */
-/* search_name String pointer to the object's */
-/* name being searched for */
-/* search_name_length Length of search_name */
-/* object_name String pointer to an object's name*/
-/* to compare the search name to */
-/* */
-/* OUTPUT */
-/* */
-/* status Whether the names are equal */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* *_object_pointer_get Kernel dispatch function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function compares the specified object names. */
+/* */
+/* INPUT */
+/* */
+/* search_name String pointer to the object's */
+/* name being searched for */
+/* search_name_length Length of search_name */
+/* object_name String pointer to an object's name*/
+/* to compare the search name to */
+/* */
+/* OUTPUT */
+/* */
+/* status Whether the names are equal */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* *_object_pointer_get Kernel dispatch function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_object_name_compare(CHAR *search_name, UINT search_name_length, CHAR *object_name)
@@ -337,48 +337,48 @@ CHAR object_name_char;
}
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _txm_module_manager_util_code_allocation_size_and_alignment_get */
-/* PORTABLE C */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _txm_module_manager_util_code_allocation_size_and_alignment_get */
+/* PORTABLE C */
/* 6.1 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
-/* DESCRIPTION */
-/* */
-/* This function returns the required alignment and allocation size */
-/* for a module's code area. */
-/* */
-/* INPUT */
-/* */
-/* module_preamble Preamble of module to return code */
-/* values for */
-/* code_alignment_dest Address to return code alignment */
-/* code_allocation_size_desk Address to return code allocation */
-/* size */
-/* */
-/* OUTPUT */
-/* */
-/* status Success if no math overflow */
-/* occurred during calculation */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* txm_module_manager_*_load Module load functions */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* DESCRIPTION */
+/* */
+/* This function returns the required alignment and allocation size */
+/* for a module's code area. */
+/* */
+/* INPUT */
+/* */
+/* module_preamble Preamble of module to return code */
+/* values for */
+/* code_alignment_dest Address to return code alignment */
+/* code_allocation_size_desk Address to return code allocation */
+/* size */
+/* */
+/* OUTPUT */
+/* */
+/* status Success if no math overflow */
+/* occurred during calculation */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* txm_module_manager_*_load Module load functions */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 Scott Larson Initial Version 6.1 */
+/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/
UINT _txm_module_manager_util_code_allocation_size_and_alignment_get(TXM_MODULE_PREAMBLE *module_preamble,
diff --git a/common_smp/inc/tx_api.h b/common_smp/inc/tx_api.h
index 5910c803..9591aee9 100644
--- a/common_smp/inc/tx_api.h
+++ b/common_smp/inc/tx_api.h
@@ -26,7 +26,7 @@
/* APPLICATION INTERFACE DEFINITION RELEASE */
/* */
/* tx_api.h PORTABLE SMP */
-/* 6.1.9 */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -67,6 +67,10 @@
/* 10-15-2021 Yuxin Zhou Modified comment(s), */
/* update patch number, */
/* resulting in version 6.1.9 */
+/* 01-31-2022 Scott Larson Modified comment(s), */
+/* add unused parameter macro, */
+/* update patch number, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -84,6 +88,10 @@ extern "C" {
#endif
+/* Disable warning of parameter not used. */
+#ifndef TX_PARAMETER_NOT_USED
+#define TX_PARAMETER_NOT_USED(p) ((void)(p))
+#endif /* TX_PARAMETER_NOT_USED */
/* Include the port-specific data type file. */
@@ -115,7 +123,7 @@ extern "C" {
#define AZURE_RTOS_THREADX
#define THREADX_MAJOR_VERSION 6
#define THREADX_MINOR_VERSION 1
-#define THREADX_PATCH_VERSION 9
+#define THREADX_PATCH_VERSION 10
/* Define the following symbol for backward compatibility */
#define EL_PRODUCT_THREADX
diff --git a/ports/arc_em/metaware/example_build/sample_threadx/tx_initialize_low_level.s b/ports/arc_em/metaware/example_build/sample_threadx/tx_initialize_low_level.s
index 69d42895..a7406360 100644
--- a/ports/arc_em/metaware/example_build/sample_threadx/tx_initialize_low_level.s
+++ b/ports/arc_em/metaware/example_build/sample_threadx/tx_initialize_low_level.s
@@ -60,7 +60,7 @@ _tx_system_stack_base_address:
;/* FUNCTION RELEASE */
;/* */
;/* _tx_initialize_low_level ARCv2_EM/MetaWare */
-;/* 6.1.9 */
+;/* 6.1.10 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -97,10 +97,16 @@ _tx_system_stack_base_address:
;/* 10-15-2021 Andres Mlinar Modified comment(s), optimized*/
;/* system stack usage, */
;/* resulting in version 6.1.9 */
+;/* 01-31-2022 Andres Mlinar Modified comments(s), */
+;/* initialize interrupts right */
+;/* before enabling the task */
+;/* scheduler, */
+;/* resulting in version 6.1.10 */
;/* */
;/**************************************************************************/
;VOID _tx_initialize_low_level(VOID)
;{
+ .align 4
.global _tx_initialize_low_level
.type _tx_initialize_low_level, @function
_tx_initialize_low_level:
@@ -131,6 +137,18 @@ _tx_initialize_low_level:
;
st r0, [gp, _tx_initialize_unused_memory@sda]
;
+; /* Done, return to caller. */
+;
+ j_s.d [blink] ; Return to caller
+ nop
+;}
+;
+;VOID _tx_initialize_start_interrupts(VOID)
+;{
+ .align 4
+ .global _tx_initialize_start_interrupts
+ .type _tx_initialize_start_interrupts, @function
+_tx_initialize_start_interrupts:
;
; /* Setup Timer 0 for periodic interrupts at interrupt vector 16. */
;
diff --git a/ports/arc_em/metaware/inc/tx_port.h b/ports/arc_em/metaware/inc/tx_port.h
index fd1ef746..5cc0be6b 100644
--- a/ports/arc_em/metaware/inc/tx_port.h
+++ b/ports/arc_em/metaware/inc/tx_port.h
@@ -48,9 +48,14 @@
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s), updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 01-31-2022 Andres Mlinar Modified comments(s), */
+/* initialize interrupts right */
+/* before enabling the task */
+/* scheduler, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -184,6 +189,12 @@ ULONG _tx_misra_time_stamp_get(VOID);
#define TX_INLINE_INITIALIZATION
#endif
+/* Define the ARC-specific initialization code that is expanded in the generic source. */
+
+void _tx_initialize_start_interrupts(void);
+
+#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION _tx_initialize_start_interrupts();
+
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
@@ -311,7 +322,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARCv2_EM/MetaWare Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARCv2_EM/MetaWare Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/arc_em/metaware/test_regression/threadx_regression/.cproject b/ports/arc_em/metaware/test_regression/threadx_regression/.cproject
deleted file mode 100644
index bba84f3c..00000000
--- a/ports/arc_em/metaware/test_regression/threadx_regression/.cproject
+++ /dev/null
@@ -1,157 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/arc_em/metaware/test_regression/threadx_regression/sample_threadx_regression.cmd b/ports/arc_em/metaware/test_regression/threadx_regression/sample_threadx_regression.cmd
deleted file mode 100644
index 78dc1f6e..00000000
--- a/ports/arc_em/metaware/test_regression/threadx_regression/sample_threadx_regression.cmd
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-// This is the linker script example (SRV3-style).
-// (c) Synopsys, 2013
-//
-//
-
-//number of exceptions and interrupts
-NUMBER_OF_EXCEPTIONS = 16;//it is fixed (16)
-NUMBER_OF_INTERRUPTS = 5;//depends on HW configuration
-
-//define Interrupt Vector Table size
-IVT_SIZE_ITEMS = (NUMBER_OF_EXCEPTIONS + NUMBER_OF_INTERRUPTS);//the total IVT size (in "items")
-IVT_SIZE_BYTES = IVT_SIZE_ITEMS * 4;//in bytes
-
-//define ICCM and DCCM locations
-MEMORY {
- ICCM: ORIGIN = 0x00000000, LENGTH = 128K
- DCCM: ORIGIN = 0x80000000, LENGTH = 128K
-}
-
-//define sections and groups
-SECTIONS {
- GROUP: {
- .ivt (TEXT) : # Interrupt table
- {
- ___ivt1 = .;
- * (.ivt)
- ___ivt2 = .;
- // Make the IVT at least IVT_SIZE_BYTES
- . += (___ivt2 - ___ivt1 < IVT_SIZE_BYTES) ? (IVT_SIZE_BYTES - (___ivt2 - ___ivt1)) : 0;
- }
- .ivh (TEXT) : // Interrupt handlers
-
- //TEXT sections
- .text? : { *('.text$crt*') }
- * (TEXT): {}
- //Literals
- * (LIT): {}
- } > ICCM
-
- GROUP: {
- //data sections
- .sdata?: {}
- .sbss?: {}
- *(DATA): {}
- *(BSS): {}
- //stack
- .stack_top: {}
- .stack ALIGN(4) SIZE(DEFINED _STACKSIZE?_STACKSIZE:4096): {}
- .stack_base: {}
- //heap (empty)
- .heap? ALIGN(4) SIZE(DEFINED _HEAPSIZE?_HEAPSIZE:0): {}
- .free_memory: {}
- } > DCCM
- }
diff --git a/ports/arc_em/metaware/test_regression/threadx_regression/tx_initialize_low_level.s b/ports/arc_em/metaware/test_regression/threadx_regression/tx_initialize_low_level.s
deleted file mode 100644
index bea01020..00000000
--- a/ports/arc_em/metaware/test_regression/threadx_regression/tx_initialize_low_level.s
+++ /dev/null
@@ -1,360 +0,0 @@
-;/**************************************************************************/
-;/* */
-;/* Copyright (c) Microsoft Corporation. All rights reserved. */
-;/* */
-;/* This software is licensed under the Microsoft Software License */
-;/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-;/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-;/* and in the root directory of this software. */
-;/* */
-;/**************************************************************************/
-
-;/**************************************************************************/
-;/**************************************************************************/
-;/** */
-;/** ThreadX Component */
-;/** */
-;/** Initialize */
-;/** */
-;/**************************************************************************/
-;/**************************************************************************/
-
- .equ IRQ_SELECT, 0x40B
- .equ KSTACK_TOP, 0x264
- .equ KSTACK_BASE, 0x265
- .equ STATUS32_SC, 0x4000
-
-;
-; /* Define section for placement after all linker allocated RAM memory. This
-; is used to calculate the first free address that is passed to
-; tx_appication_define, soley for the ThreadX application's use. */
-;
- .section ".free_memory","aw"
- .align 4
- .global _tx_first_free_address
-_tx_first_free_address:
- .space 4
-;
-; /* Define section for placement before the main stack area for setting
-; up the STACK_TOP address for hardware stack checking. */
-;
- .section ".stack_top","aw"
- .align 4
- .global _tx_system_stack_top_address
-_tx_system_stack_top_address:
- .space 4
-;
-; /* Define section for placement after the main stack area for setting
-; up the STACK_BASE address for hardware stack checking. */
-;
- .section ".stack_base","aw"
- .align 4
- .global _tx_system_stack_base_address
-_tx_system_stack_base_address:
- .space 4
-;
-;
- .text
-;/**************************************************************************/
-;/* */
-;/* FUNCTION RELEASE */
-;/* */
-;/* _tx_initialize_low_level ARCv2_EM/MetaWare */
-;/* 6.x */
-;/* AUTHOR */
-;/* */
-;/* William E. Lamie, Microsoft Corporation */
-;/* */
-;/* DESCRIPTION */
-;/* */
-;/* This function is responsible for any low-level processor */
-;/* initialization, including setting up interrupt vectors, setting */
-;/* up a periodic timer interrupt source, saving the system stack */
-;/* pointer for use in ISR processing later, and finding the first */
-;/* available RAM memory address for tx_application_define. */
-;/* */
-;/* INPUT */
-;/* */
-;/* None */
-;/* */
-;/* OUTPUT */
-;/* */
-;/* None */
-;/* */
-;/* CALLS */
-;/* */
-;/* None */
-;/* */
-;/* CALLED BY */
-;/* */
-;/* _tx_initialize_kernel_enter ThreadX entry function */
-;/* */
-;/* RELEASE HISTORY */
-;/* */
-;/* DATE NAME DESCRIPTION */
-;/* */
-;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-;/* */
-;/**************************************************************************/
-;VOID _tx_initialize_low_level(VOID)
-;{
- .global _tx_initialize_low_level
- .type _tx_initialize_low_level, @function
-_tx_initialize_low_level:
-
- .ifdef TX_ENABLE_HW_STACK_CHECKING
- mov r0, _tx_system_stack_top_address ; Pickup top of system stack (lowest memory address)
- sr r0, [KSTACK_TOP] ; Setup KSTACK_TOP
- mov r0, _tx_system_stack_base_address ; Pickup base of system stack (highest memory address)
- sr r0, [KSTACK_BASE] ; Setup KSTACK_BASE
- lr r0, [status32] ; Pickup current STATUS32
- or r0, r0, STATUS32_SC ; Or in hardware stack checking enable bit (SC)
- kflag r0 ; Enable hardware stack checking
- .endif
-;
-; /* Save the system stack pointer. */
-; _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-;
- mov r0, _estack ; Pickup the end of stack address
- st r0, [gp, _tx_thread_system_stack_ptr@sda] ; Save system stack pointer
-;
-;
-; /* Pickup the first available memory address. */
-;
- mov r0, _tx_first_free_address ; Pickup first free memory address
-;
-; /* Save the first available memory address. */
-; _tx_initialize_unused_memory = (VOID_PTR) _end;
-;
- st r0, [gp, _tx_initialize_unused_memory@sda]
-;
-;
-; /* Setup Timer 0 for periodic interrupts at interrupt vector 16. */
-;
- mov r0, 0 ; Disable additional ISR reg saving/restoring
- sr r0, [AUX_IRQ_CTRL] ;
-
- mov r0, 16 ; Select timer 0
- sr r0, [IRQ_SELECT] ;
- mov r0, 15 ; Set timer 0 to priority 15
- sr r0, [IRQ_PRIORITY] ;
- mov r0, 1 ; Enable this interrupt
- sr r0, [IRQ_ENABLE] ;
- mov r0, 0x10000 ; Setup timer period
- sr r0, [LIMIT0] ;
- mov r0, 0 ; Clear timer 0 current count
- sr r0, [COUNT0] ;
- mov r0, 3 ; Enable timer 0
- sr r0, [CONTROL0] ;
-
- .ifdef TX_TIMER_1_SETUP
- mov r0, 17 ; Select timer 1
- sr r0, [IRQ_SELECT] ;
- mov r0, 2 ; Set timer 1 to priority 14
- sr r0, [IRQ_PRIORITY] ;
- mov r0, 1 ; Enable this interrupt
- sr r0, [IRQ_ENABLE] ;
- mov r0, 0x10020 ; Setup timer period
- sr r0, [LIMIT1] ;
- mov r0, 0 ; Clear timer 0 current count
- sr r0, [COUNT1] ;
- mov r0, 3 ; Enable timer 0
- sr r0, [CONTROL1] ;
- .endif
-;
-; /* Done, return to caller. */
-;
- j_s.d [blink] ; Return to caller
- nop
-;}
-;
-;
-; /* Define default vector table entries. */
-;
- .global _tx_memory_error
-_tx_memory_error:
- flag 1
- nop
- nop
- nop
- b _tx_memory_error
-
- .global _tx_instruction_error
-_tx_instruction_error:
- flag 1
- nop
- nop
- nop
- b _tx_instruction_error
-
- .global _tx_ev_machine_check
-_tx_ev_machine_check:
- flag 1
- nop
- nop
- nop
- b _tx_ev_machine_check
-
- .global _tx_ev_tblmiss_inst
-_tx_ev_tblmiss_inst:
- flag 1
- nop
- nop
- nop
- b _tx_ev_tblmiss_inst
-
- .global _tx_ev_tblmiss_data
-_tx_ev_tblmiss_data:
- flag 1
- nop
- nop
- nop
- b _tx_ev_tblmiss_data
-
- .global _tx_ev_protection_viol
-_tx_ev_protection_viol:
- flag 1
- nop
- nop
- nop
- b _tx_ev_protection_viol
-
- .global _tx_ev_privilege_viol
-_tx_ev_privilege_viol:
- flag 1
- nop
- nop
- nop
- b _tx_ev_privilege_viol
-
- .global _tx_ev_software_int
-_tx_ev_software_int:
- flag 1
- nop
- nop
- nop
- b _tx_ev_software_int
-
- .global _tx_ev_trap
-_tx_ev_trap:
- flag 1
- nop
- nop
- nop
- b _tx_ev_trap
-
- .global _tx_ev_extension
-_tx_ev_extension:
- flag 1
- nop
- nop
- nop
- b _tx_ev_extension
-
- .global _tx_ev_divide_by_zero
-_tx_ev_divide_by_zero:
- flag 1
- nop
- nop
- nop
- b _tx_ev_divide_by_zero
-
- .global _tx_ev_dc_error
-_tx_ev_dc_error:
- flag 1
- nop
- nop
- nop
- b _tx_ev_dc_error
-
- .global _tx_ev_maligned
-_tx_ev_maligned:
- flag 1
- nop
- nop
- nop
- b _tx_ev_maligned
-
- .global _tx_unsued_0
-_tx_unsued_0:
- flag 1
- nop
- nop
- nop
- b _tx_unsued_0
-
- .global _tx_unused_1
-_tx_unused_1:
- flag 1
- nop
- nop
- nop
- b _tx_unused_1
-
- .global _tx_timer_0
-_tx_timer_0:
-;
-; /* By default, setup Timer 0 as the ThreadX timer interrupt. */
-;
- sub sp, sp, 160 ; Allocate an interrupt stack frame
- st r0, [sp, 0] ; Save r0
- st r1, [sp, 4] ; Save r1
- st r2, [sp, 8] ; Save r2
- mov r0, 3
- sr r0, [CONTROL0]
-
- b _tx_timer_interrupt ; Jump to generic ThreadX timer interrupt
- ; handler
-; flag 1
-; nop
-; nop
-; nop
-; b _tx_timer_0
-
- .global _tx_timer_1
-_tx_timer_1:
- sub sp, sp, 160 ; Allocate an interrupt stack frame
- st blink, [sp, 16] ; Save blink
- bl _tx_thread_context_save ; Call context save
-;
-; /* ISR processing goes here. If the applications wishes to re-enable
-; interrupts, the SETI instruction can be used here. Also note that
-; register usage in assembly code must be confined to the compiler
-; scratch registers. */
-;
- mov r0, 3
- sr r0, [CONTROL1]
-;
- b _tx_thread_context_restore ; Call context restore
-
-; flag 1
-; nop
-; nop
-; nop
-; b _tx_timer_1
-
- .global _tx_undefined_0
-_tx_undefined_0:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_0
-
- .global _tx_undefined_1
-_tx_undefined_1:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_1
-
- .global _tx_undefined_2
-_tx_undefined_2:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_2
-
- .end
diff --git a/ports/arc_em/metaware/test_regression/threadx_regression/vectors.s b/ports/arc_em/metaware/test_regression/threadx_regression/vectors.s
deleted file mode 100644
index c6cbc893..00000000
--- a/ports/arc_em/metaware/test_regression/threadx_regression/vectors.s
+++ /dev/null
@@ -1,29 +0,0 @@
-
-.file "vectors.s"
-.section .ivt,text
-;; This directive forces this section to stay resident even if stripped out by the -zpurgetext linker option
-.sectflag .ivt,include
-
-;// handler's name type number name offset in IVT (hex/dec)
-.long _start ; exception 0 program entry point offset 0x0 0
-.long _tx_memory_error ; exception 1 memory_error offset 0x4 4
-.long _tx_instruction_error ; exception 2 instruction_error offset 0x8 8
-.long _tx_ev_machine_check ; exception 3 EV_MachineCheck offset 0xC 12
-.long _tx_ev_tblmiss_inst ; exception 4 EV_TLBMissI offset 0x10 16
-.long _tx_ev_tblmiss_data ; exception 5 EV_TLBMissD offset 0x14 20
-.long _tx_ev_protection_viol ; exception 6 EV_ProtV offset 0x18 24
-.long _tx_ev_privilege_viol ; exception 7 EV_PrivilegeV offset 0x1C 28
-.long _tx_ev_software_int ; exception 8 EV_SWI offset 0x20 32
-.long _tx_ev_trap ; exception 9 EV_Trap offset 0x24 36
-.long _tx_ev_extension ; exception 10 EV_Extension offset 0x28 40
-.long _tx_ev_divide_by_zero ; exception 11 EV_DivZero offset 0x2C 44
-.long _tx_ev_dc_error ; exception 12 EV_DCError offset 0x30 48
-.long _tx_ev_maligned ; exception 13 EV_Maligned offset 0x34 52
-.long _tx_unsued_0 ; exception 14 unused offset 0x38 56
-.long _tx_unused_1 ; exception 15 unused offset 0x3C 60
-.long _tx_timer_0 ; IRQ 16 Timer 0 offset 0x40 64
-.long _tx_timer_1 ; IRQ 17 Timer 1 offset 0x44 68
-.long _tx_undefined_0 ; IRQ 18 offset 0x48 72
-.long _tx_undefined_1 ; IRQ 19 offset 0x4C 76
-.long _tx_undefined_2 ; IRQ 20 offset 0x50 80
-
diff --git a/ports/arc_em/metaware/test_regression/tx/.cproject b/ports/arc_em/metaware/test_regression/tx/.cproject
deleted file mode 100644
index b9202ea5..00000000
--- a/ports/arc_em/metaware/test_regression/tx/.cproject
+++ /dev/null
@@ -1,141 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/arc_em/metaware/test_regression/tx/tx_user.h b/ports/arc_em/metaware/test_regression/tx/tx_user.h
deleted file mode 100644
index 1b5bc378..00000000
--- a/ports/arc_em/metaware/test_regression/tx/tx_user.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/**************************************************************************/
-/* */
-/* Copyright (c) Microsoft Corporation. All rights reserved. */
-/* */
-/* This software is licensed under the Microsoft Software License */
-/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-/* and in the root directory of this software. */
-/* */
-/**************************************************************************/
-
-
-/**************************************************************************/
-/**************************************************************************/
-/** */
-/** ThreadX Component */
-/** */
-/** User Specific */
-/** */
-/**************************************************************************/
-/**************************************************************************/
-
-
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_user.h PORTABLE C */
-/* 6.x */
-/* */
-/* AUTHOR */
-/* */
-/* William E. Lamie, Microsoft Corporation */
-/* */
-/* DESCRIPTION */
-/* */
-/* This file contains user defines for configuring ThreadX in specific */
-/* ways. This file will have an effect only if the application and */
-/* ThreadX library are built with TX_INCLUDE_USER_DEFINE_FILE defined. */
-/* Note that all the defines in this file may also be made on the */
-/* command line when building ThreadX library and application objects. */
-/* */
-/* RELEASE HISTORY */
-/* */
-/* DATE NAME DESCRIPTION */
-/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 Yuxin Zhou Modified comment(s), */
-/* resulting in version 6.1 */
-/* xx-xx-xxxx Scott Larson Modified comment(s), */
-/* added option to remove */
-/* FileX pointer, */
-/* resulting in version 6.x */
-/* */
-/**************************************************************************/
-
-#ifndef TX_USER_H
-#define TX_USER_H
-
-
-#define TX_REGRESSION_TEST
-#define TEST_STACK_SIZE_PRINTF (4096)
-
-
-/* Define automated coverage test extensions... These are required for the
- ThreadX regression test. */
-
-typedef unsigned int TEST_FLAG;
-extern TEST_FLAG threadx_byte_allocate_loop_test;
-extern TEST_FLAG threadx_byte_release_loop_test;
-extern TEST_FLAG threadx_mutex_suspension_put_test;
-extern TEST_FLAG threadx_mutex_suspension_priority_test;
-#ifndef TX_TIMER_PROCESS_IN_ISR
-extern TEST_FLAG threadx_delete_timer_thread;
-#endif
-
-extern void abort_and_resume_byte_allocating_thread(void);
-extern void abort_all_threads_suspended_on_mutex(void);
-extern void suspend_lowest_priority(void);
-#ifndef TX_TIMER_PROCESS_IN_ISR
-extern void delete_timer_thread(void);
-#endif
-extern TEST_FLAG test_stack_analyze_flag;
-extern TEST_FLAG test_initialize_flag;
-extern TEST_FLAG test_forced_mutex_timeout;
-
-
-#ifdef TX_REGRESSION_TEST
-
-/* Define extension macros for automated coverage tests. */
-
-
-#define TX_BYTE_ALLOCATE_EXTENSION if (threadx_byte_allocate_loop_test == ((TEST_FLAG) 1)) \
- { \
- pool_ptr -> tx_byte_pool_owner = TX_NULL; \
- threadx_byte_allocate_loop_test = ((TEST_FLAG) 0); \
- }
-
-#define TX_BYTE_RELEASE_EXTENSION if (threadx_byte_release_loop_test == ((TEST_FLAG) 1)) \
- { \
- threadx_byte_release_loop_test = ((TEST_FLAG) 0); \
- abort_and_resume_byte_allocating_thread(); \
- }
-
-#define TX_MUTEX_PUT_EXTENSION_1 if (threadx_mutex_suspension_put_test == ((TEST_FLAG) 1)) \
- { \
- threadx_mutex_suspension_put_test = ((TEST_FLAG) 0); \
- abort_all_threads_suspended_on_mutex(); \
- }
-
-
-#define TX_MUTEX_PUT_EXTENSION_2 if (test_forced_mutex_timeout == ((TEST_FLAG) 1)) \
- { \
- test_forced_mutex_timeout = ((TEST_FLAG) 0); \
- _tx_thread_wait_abort(mutex_ptr -> tx_mutex_suspension_list); \
- }
-
-
-#define TX_MUTEX_PRIORITY_CHANGE_EXTENSION if (threadx_mutex_suspension_priority_test == ((TEST_FLAG) 1)) \
- { \
- threadx_mutex_suspension_priority_test = ((TEST_FLAG) 0); \
- suspend_lowest_priority(); \
- }
-
-#endif /* TX_REGRESSION_TEST */
-
-
-/* Define various build options for the ThreadX port. The application should either make changes
- here by commenting or un-commenting the conditional compilation defined OR supply the defines
- though the compiler's equivalent of the -D option.
-
- For maximum speed, the following should be defined:
-
- TX_MAX_PRIORITIES 32
- TX_DISABLE_PREEMPTION_THRESHOLD
- TX_DISABLE_REDUNDANT_CLEARING
- TX_DISABLE_NOTIFY_CALLBACKS
- TX_NOT_INTERRUPTABLE
- TX_TIMER_PROCESS_IN_ISR
- TX_REACTIVATE_INLINE
- TX_DISABLE_STACK_FILLING
- TX_INLINE_THREAD_RESUME_SUSPEND
-
- For minimum size, the following should be defined:
-
- TX_MAX_PRIORITIES 32
- TX_DISABLE_PREEMPTION_THRESHOLD
- TX_DISABLE_REDUNDANT_CLEARING
- TX_DISABLE_NOTIFY_CALLBACKS
- TX_NO_FILEX_POINTER
- TX_NOT_INTERRUPTABLE
- TX_TIMER_PROCESS_IN_ISR
-
- Of course, many of these defines reduce functionality and/or change the behavior of the
- system in ways that may not be worth the trade-off. For example, the TX_TIMER_PROCESS_IN_ISR
- results in faster and smaller code, however, it increases the amount of processing in the ISR.
- In addition, some services that are available in timers are not available from ISRs and will
- therefore return an error if this option is used. This may or may not be desirable for a
- given application. */
-
-
-/* Override various options with default values already assigned in tx_port.h. Please also refer
- to tx_port.h for descriptions on each of these options. */
-
-/*
-#define TX_MAX_PRIORITIES 32
-#define TX_MINIMUM_STACK ????
-#define TX_THREAD_USER_EXTENSION ????
-#define TX_TIMER_THREAD_STACK_SIZE ????
-#define TX_TIMER_THREAD_PRIORITY ????
-*/
-
-/* Determine if there is a FileX pointer in the thread control block.
- By default, the pointer is there for legacy/backwards compatibility.
- The pointer must also be there for applications using FileX.
- Define this to save space in the thread control block.
-*/
-
-/*
-#define TX_NO_FILEX_POINTER
-*/
-
-/* Determine if timer expirations (application timers, timeouts, and tx_thread_sleep calls
- should be processed within the a system timer thread or directly in the timer ISR.
- By default, the timer thread is used. When the following is defined, the timer expiration
- processing is done directly from the timer ISR, thereby eliminating the timer thread control
- block, stack, and context switching to activate it. */
-
-/*
-#define TX_TIMER_PROCESS_IN_ISR
-*/
-
-/* Determine if in-line timer reactivation should be used within the timer expiration processing.
- By default, this is disabled and a function call is used. When the following is defined,
- reactivating is performed in-line resulting in faster timer processing but slightly larger
- code size. */
-
-/*
-#define TX_REACTIVATE_INLINE
-*/
-
-/* Determine is stack filling is enabled. By default, ThreadX stack filling is enabled,
- which places an 0xEF pattern in each byte of each thread's stack. This is used by
- debuggers with ThreadX-awareness and by the ThreadX run-time stack checking feature. */
-
-/*
-#define TX_DISABLE_STACK_FILLING
-*/
-
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
- disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
- checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
- define is negated, thereby forcing the stack fill which is necessary for the stack checking
- logic. */
-
-/*
-#define TX_ENABLE_STACK_CHECKING
-*/
-
-/* Determine if preemption-threshold should be disabled. By default, preemption-threshold is
- enabled. If the application does not use preemption-threshold, it may be disabled to reduce
- code size and improve performance. */
-
-/*
-#define TX_DISABLE_PREEMPTION_THRESHOLD
-*/
-
-/* Determine if global ThreadX variables should be cleared. If the compiler startup code clears
- the .bss section prior to ThreadX running, the define can be used to eliminate unnecessary
- clearing of ThreadX global variables. */
-
-/*
-#define TX_DISABLE_REDUNDANT_CLEARING
-*/
-
-/* Determine if no timer processing is required. This option will help eliminate the timer
- processing when not needed. The user will also have to comment out the call to
- tx_timer_interrupt, which is typically made from assembly language in
- tx_initialize_low_level. Note: if TX_NO_TIMER is used, the define TX_TIMER_PROCESS_IN_ISR
- must also be used and tx_timer_initialize must be removed from ThreadX library. */
-
-/*
-#define TX_NO_TIMER
-#ifndef TX_TIMER_PROCESS_IN_ISR
-#define TX_TIMER_PROCESS_IN_ISR
-#endif
-*/
-
-/* Determine if the notify callback option should be disabled. By default, notify callbacks are
- enabled. If the application does not use notify callbacks, they may be disabled to reduce
- code size and improve performance. */
-
-/*
-#define TX_DISABLE_NOTIFY_CALLBACKS
-*/
-
-
-/* Determine if the tx_thread_resume and tx_thread_suspend services should have their internal
- code in-line. This results in a larger image, but improves the performance of the thread
- resume and suspend services. */
-
-/*
-#define TX_INLINE_THREAD_RESUME_SUSPEND
-*/
-
-
-/* Determine if the internal ThreadX code is non-interruptable. This results in smaller code
- size and less processing overhead, but increases the interrupt lockout time. */
-
-/*
-#define TX_NOT_INTERRUPTABLE
-*/
-
-
-/* Determine if the trace event logging code should be enabled. This causes slight increases in
- code size and overhead, but provides the ability to generate system trace information which
- is available for viewing in TraceX. */
-
-/*
-#define TX_ENABLE_EVENT_TRACE
-*/
-
-
-/* Determine if block pool performance gathering is required by the application. When the following is
- defined, ThreadX gathers various block pool performance information. */
-
-/*
-#define TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if byte pool performance gathering is required by the application. When the following is
- defined, ThreadX gathers various byte pool performance information. */
-
-/*
-#define TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if event flags performance gathering is required by the application. When the following is
- defined, ThreadX gathers various event flags performance information. */
-
-/*
-#define TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if mutex performance gathering is required by the application. When the following is
- defined, ThreadX gathers various mutex performance information. */
-
-/*
-#define TX_MUTEX_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if queue performance gathering is required by the application. When the following is
- defined, ThreadX gathers various queue performance information. */
-
-/*
-#define TX_QUEUE_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if semaphore performance gathering is required by the application. When the following is
- defined, ThreadX gathers various semaphore performance information. */
-
-/*
-#define TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if thread performance gathering is required by the application. When the following is
- defined, ThreadX gathers various thread performance information. */
-
-/*
-#define TX_THREAD_ENABLE_PERFORMANCE_INFO
-*/
-
-/* Determine if timer performance gathering is required by the application. When the following is
- defined, ThreadX gathers various timer performance information. */
-
-/*
-#define TX_TIMER_ENABLE_PERFORMANCE_INFO
-*/
-
-
-#endif /* TX_USER_H */
diff --git a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/.cproject b/ports/arc_em/metaware/test_sandbox/threadx_sandbox/.cproject
deleted file mode 100644
index 84623631..00000000
--- a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/.cproject
+++ /dev/null
@@ -1,145 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/sample_threadx_validation.c b/ports/arc_em/metaware/test_sandbox/threadx_sandbox/sample_threadx_validation.c
deleted file mode 100644
index a750646e..00000000
--- a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/sample_threadx_validation.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
- byte pool, and block pool. */
-
-#include "tx_api.h"
-
-#define TX_TEST_STACK_SIZE (1024 * 2)
-#define TX_TEST_QUEUE_SIZE (16)
-#define TX_TEST_BYTE_POOL_SIZE (1024 * 4)
-#define TX_TEST_BLOCK_POOL_SIZE (1024 * 4)
-
-
-/* Define the ThreadX object control blocks... */
-
-TX_THREAD tx_test_thread_0;
-TX_THREAD tx_test_thread_1;
-TX_THREAD tx_test_thread_2;
-TX_QUEUE tx_test_queue_0;
-TX_SEMAPHORE tx_test_semaphore_0;
-TX_MUTEX tx_test_mutex_0;
-TX_EVENT_FLAGS_GROUP tx_test_event_flags_0;
-TX_BYTE_POOL tx_test_byte_pool_0;
-TX_BLOCK_POOL tx_test_block_pool_0;
-
-
-/* Define the counters used in the demo application... */
-
-ULONG tx_test_thread_0_counter;
-ULONG tx_test_thread_1_counter;
-ULONG tx_test_thread_2_counter;
-
-
-/* Define thread prototypes. */
-
-void tx_test_thread_0_entry(ULONG thread_input);
-void tx_test_thread_1_entry(ULONG thread_input);
-void tx_test_thread_2_entry(ULONG thread_input);
-
-
-/* Define the thread stacks. */
-
-ULONG tx_test_thread_0_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_1_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_2_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-
-
-/* Define other buffers used by the test code. */
-
-ULONG tx_test_queue_0_buffer[TX_TEST_QUEUE_SIZE];
-ULONG tx_test_byte_pool_0_buffer[TX_TEST_BYTE_POOL_SIZE / sizeof(ULONG)];
-ULONG tx_test_block_pool_0_buffer[TX_TEST_BLOCK_POOL_SIZE / sizeof(ULONG)];
-
-
-/* Define test function prototypes. */
-
-void tx_test_sleep();
-void tx_test_busy_loop();
-
-int tx_test_sum_many_params(
- void (*f)(),
- int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8,
- int p9, int p10, int p11, int p12, int p13, int p14, int p15, int p16,
- int p17, int p18, int p19, int p20, int p21, int p22, int p23, int p24,
- int p25, int p26, int p27, int p28, int p29, int p30, int p31, int p32,
- int p33, int p34, int p35, int p36, int p37, int p38, int p39, int p40,
- int p41, int p42, int p43, int p44, int p45, int p46, int p47, int p48,
- int p49, int p50, int p51, int p52, int p53, int p54, int p55, int p56,
- int p57, int p58, int p59, int p60, int p61, int p62, int p63, int p64,
- int p65, int p66, int p67, int p68, int p69, int p70, int p71, int p72);
-
-int tx_test_multiply_many_params(
- void (*f)(),
- int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8,
- int p9, int p10, int p11, int p12, int p13, int p14, int p15, int p16,
- int p17, int p18, int p19, int p20, int p21, int p22, int p23, int p24,
- int p25, int p26, int p27, int p28, int p29, int p30, int p31, int p32,
- int p33, int p34, int p35, int p36, int p37, int p38, int p39, int p40,
- int p41, int p42, int p43, int p44, int p45, int p46, int p47, int p48,
- int p49, int p50, int p51, int p52, int p53, int p54, int p55, int p56,
- int p57, int p58, int p59, int p60, int p61, int p62, int p63, int p64,
- int p65, int p66, int p67, int p68, int p69, int p70, int p71, int p72);
-
-
-/**/
-
-void tx_test_error_handler()
-{
- for (;;)
- {
- /* Stay here forever */
- }
-}
-
-/* Define main entry point. */
-
-int main()
-{
-
- /* Enter the ThreadX kernel. */
- tx_kernel_enter();
-
- return(0);
-}
-
-
-/* Define what the initial system looks like. */
-
-void tx_application_define(void *first_unused_memory)
-{
-
-UINT status = TX_SUCCESS;
-
-
- /* Create the ThreadX test thread 0. */
- status = tx_thread_create(
- &tx_test_thread_0, "ThreadX test thread 0",
- tx_test_thread_0_entry, 0,
- tx_test_thread_0_stack,
- TX_TEST_STACK_SIZE,
- 5, 5,
- TX_NO_TIME_SLICE,
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 1. */
- status = tx_thread_create(
- &tx_test_thread_1, "ThreadX test thread 1",
- tx_test_thread_1_entry, 1,
- tx_test_thread_1_stack,
- TX_TEST_STACK_SIZE,
- 7, 7,
- 3, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 2. */
- status = tx_thread_create(
- &tx_test_thread_2, "ThreadX test thread 2",
- tx_test_thread_2_entry, 2,
- tx_test_thread_2_stack,
- TX_TEST_STACK_SIZE,
- 5, 7,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the message queue shared by threads 1 and 2. */
- status = tx_queue_create(&tx_test_queue_0, "ThreadX test queue 0", TX_1_ULONG, tx_test_queue_0_buffer, TX_TEST_QUEUE_SIZE*sizeof(ULONG));
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the semaphore used for testing. */
- status = tx_semaphore_create(&tx_test_semaphore_0, "ThreadX test semaphore 0", 1);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the event flags group used by threads 1 and 5. */
- status = tx_event_flags_create(&tx_test_event_flags_0, "ThreadX test event flags 0");
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the mutex used for testing without priority inheritance. */
- status = tx_mutex_create(&tx_test_mutex_0, "ThreadX test mutex 0", TX_NO_INHERIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create a byte memory pool for testing. */
- status = tx_byte_pool_create(&tx_test_byte_pool_0, "ThreadX test byte pool 0", tx_test_byte_pool_0_buffer, TX_TEST_BYTE_POOL_SIZE);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Quick test of the byte pool during initialization. */
- {
- CHAR *pointer = TX_NULL;
-
- /* Allocate a block and release the block memory. */
- status = tx_byte_allocate(&tx_test_byte_pool_0, (VOID **) &pointer, 123, TX_NO_WAIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Release the block back to the pool. */
- status = tx_block_release(pointer);
- if (status != TX_SUCCESS) tx_test_error_handler();
- }
-
- /* Create a block memory pool to allocate a message buffer from. */
- status = tx_block_pool_create(&tx_test_block_pool_0, "block pool 0", sizeof(ULONG), tx_test_block_pool_0_buffer, TX_TEST_BLOCK_POOL_SIZE);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Quick test of the block pool during initialization. */
- {
- CHAR *pointer = TX_NULL;
-
- /* Allocate a block and release the block memory. */
- status = tx_block_allocate(&tx_test_block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Release the block back to the pool. */
- status = tx_block_release(pointer);
- if (status != TX_SUCCESS) tx_test_error_handler();
- }
-}
-
-
-
-/* Define the test threads. */
-
-void tx_test_thread_0_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This thread simply sits in while-forever-sleep loop. */
- while(1)
- {
-
- /* Increment the thread counter. */
- tx_test_thread_0_counter++;
-
- /* Sleep for 10 ticks. */
- tx_thread_sleep(10);
-
- /* Set event flag 0 to wakeup thread 5. */
- status = tx_event_flags_set(&tx_test_event_flags_0, 0x1, TX_OR);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
-
-
-void tx_test_thread_1_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This thread simply sends messages to a queue shared by thread 2. */
- while(1)
- {
-
- /* Increment the thread counter. */
- tx_test_thread_1_counter++;
-
- /* Send message to queue 0. */
- status = tx_queue_send(&tx_test_queue_0, &tx_test_thread_1_counter, TX_WAIT_FOREVER);
-
- /* Check completion status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
-
-
-void tx_test_thread_2_entry(ULONG thread_input)
-{
-
-ULONG received_message;
-UINT status;
-
- /* This thread retrieves messages placed on the queue by thread 1. */
- while(1)
- {
-
- /* Increment the thread counter. */
- tx_test_thread_2_counter++;
-
- /* Retrieve a message from the queue. */
- status = tx_queue_receive(&tx_test_queue_0, &received_message, TX_WAIT_FOREVER);
-
- /* Check completion status and make sure the message is what we
- expected. */
- if ((status != TX_SUCCESS) || (received_message != tx_test_thread_1_counter))
- break;
- }
-}
-
-
-void thread_3_and_4_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This function is executed from thread 3 and thread 4. As the loop
- below shows, these function compete for ownership of semaphore_0. */
- while(1)
- {
-
- /* Get the semaphore with suspension. */
- status = tx_semaphore_get(&tx_test_semaphore_0, TX_WAIT_FOREVER);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Sleep for 2 ticks to hold the semaphore. */
- tx_thread_sleep(2);
-
- /* Release the semaphore. */
- status = tx_semaphore_put(&tx_test_semaphore_0);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
-
-
-void thread_5_entry(ULONG thread_input)
-{
-
-UINT status;
-ULONG actual_flags;
-
-
- /* This thread simply waits for an event in a forever loop. */
- while(1)
- {
-
- /* Wait for event flag 0. */
- status = tx_event_flags_get(&tx_test_event_flags_0, 0x1, TX_OR_CLEAR,
- &actual_flags, TX_WAIT_FOREVER);
-
- /* Check status. */
- if ((status != TX_SUCCESS) || (actual_flags != 0x1))
- break;
- }
-}
-
-
-void thread_6_and_7_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This function is executed from thread 6 and thread 7. As the loop
- below shows, these function compete for ownership of mutex_0. */
- while(1)
- {
-
- /* Get the mutex with suspension. */
- status = tx_mutex_get(&tx_test_mutex_0, TX_WAIT_FOREVER);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Get the mutex again with suspension. This shows
- that an owning thread may retrieve the mutex it
- owns multiple times. */
- status = tx_mutex_get(&tx_test_mutex_0, TX_WAIT_FOREVER);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Sleep for 2 ticks to hold the mutex. */
- tx_thread_sleep(2);
-
- /* Release the mutex. */
- status = tx_mutex_put(&tx_test_mutex_0);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Release the mutex again. This will actually
- release ownership since it was obtained twice. */
- status = tx_mutex_put(&tx_test_mutex_0);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
diff --git a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/sample_threadx_validation.cmd b/ports/arc_em/metaware/test_sandbox/threadx_sandbox/sample_threadx_validation.cmd
deleted file mode 100644
index 78dc1f6e..00000000
--- a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/sample_threadx_validation.cmd
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-// This is the linker script example (SRV3-style).
-// (c) Synopsys, 2013
-//
-//
-
-//number of exceptions and interrupts
-NUMBER_OF_EXCEPTIONS = 16;//it is fixed (16)
-NUMBER_OF_INTERRUPTS = 5;//depends on HW configuration
-
-//define Interrupt Vector Table size
-IVT_SIZE_ITEMS = (NUMBER_OF_EXCEPTIONS + NUMBER_OF_INTERRUPTS);//the total IVT size (in "items")
-IVT_SIZE_BYTES = IVT_SIZE_ITEMS * 4;//in bytes
-
-//define ICCM and DCCM locations
-MEMORY {
- ICCM: ORIGIN = 0x00000000, LENGTH = 128K
- DCCM: ORIGIN = 0x80000000, LENGTH = 128K
-}
-
-//define sections and groups
-SECTIONS {
- GROUP: {
- .ivt (TEXT) : # Interrupt table
- {
- ___ivt1 = .;
- * (.ivt)
- ___ivt2 = .;
- // Make the IVT at least IVT_SIZE_BYTES
- . += (___ivt2 - ___ivt1 < IVT_SIZE_BYTES) ? (IVT_SIZE_BYTES - (___ivt2 - ___ivt1)) : 0;
- }
- .ivh (TEXT) : // Interrupt handlers
-
- //TEXT sections
- .text? : { *('.text$crt*') }
- * (TEXT): {}
- //Literals
- * (LIT): {}
- } > ICCM
-
- GROUP: {
- //data sections
- .sdata?: {}
- .sbss?: {}
- *(DATA): {}
- *(BSS): {}
- //stack
- .stack_top: {}
- .stack ALIGN(4) SIZE(DEFINED _STACKSIZE?_STACKSIZE:4096): {}
- .stack_base: {}
- //heap (empty)
- .heap? ALIGN(4) SIZE(DEFINED _HEAPSIZE?_HEAPSIZE:0): {}
- .free_memory: {}
- } > DCCM
- }
diff --git a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/tx_initialize_low_level.s b/ports/arc_em/metaware/test_sandbox/threadx_sandbox/tx_initialize_low_level.s
deleted file mode 100644
index bea01020..00000000
--- a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/tx_initialize_low_level.s
+++ /dev/null
@@ -1,360 +0,0 @@
-;/**************************************************************************/
-;/* */
-;/* Copyright (c) Microsoft Corporation. All rights reserved. */
-;/* */
-;/* This software is licensed under the Microsoft Software License */
-;/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-;/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-;/* and in the root directory of this software. */
-;/* */
-;/**************************************************************************/
-
-;/**************************************************************************/
-;/**************************************************************************/
-;/** */
-;/** ThreadX Component */
-;/** */
-;/** Initialize */
-;/** */
-;/**************************************************************************/
-;/**************************************************************************/
-
- .equ IRQ_SELECT, 0x40B
- .equ KSTACK_TOP, 0x264
- .equ KSTACK_BASE, 0x265
- .equ STATUS32_SC, 0x4000
-
-;
-; /* Define section for placement after all linker allocated RAM memory. This
-; is used to calculate the first free address that is passed to
-; tx_appication_define, soley for the ThreadX application's use. */
-;
- .section ".free_memory","aw"
- .align 4
- .global _tx_first_free_address
-_tx_first_free_address:
- .space 4
-;
-; /* Define section for placement before the main stack area for setting
-; up the STACK_TOP address for hardware stack checking. */
-;
- .section ".stack_top","aw"
- .align 4
- .global _tx_system_stack_top_address
-_tx_system_stack_top_address:
- .space 4
-;
-; /* Define section for placement after the main stack area for setting
-; up the STACK_BASE address for hardware stack checking. */
-;
- .section ".stack_base","aw"
- .align 4
- .global _tx_system_stack_base_address
-_tx_system_stack_base_address:
- .space 4
-;
-;
- .text
-;/**************************************************************************/
-;/* */
-;/* FUNCTION RELEASE */
-;/* */
-;/* _tx_initialize_low_level ARCv2_EM/MetaWare */
-;/* 6.x */
-;/* AUTHOR */
-;/* */
-;/* William E. Lamie, Microsoft Corporation */
-;/* */
-;/* DESCRIPTION */
-;/* */
-;/* This function is responsible for any low-level processor */
-;/* initialization, including setting up interrupt vectors, setting */
-;/* up a periodic timer interrupt source, saving the system stack */
-;/* pointer for use in ISR processing later, and finding the first */
-;/* available RAM memory address for tx_application_define. */
-;/* */
-;/* INPUT */
-;/* */
-;/* None */
-;/* */
-;/* OUTPUT */
-;/* */
-;/* None */
-;/* */
-;/* CALLS */
-;/* */
-;/* None */
-;/* */
-;/* CALLED BY */
-;/* */
-;/* _tx_initialize_kernel_enter ThreadX entry function */
-;/* */
-;/* RELEASE HISTORY */
-;/* */
-;/* DATE NAME DESCRIPTION */
-;/* */
-;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-;/* */
-;/**************************************************************************/
-;VOID _tx_initialize_low_level(VOID)
-;{
- .global _tx_initialize_low_level
- .type _tx_initialize_low_level, @function
-_tx_initialize_low_level:
-
- .ifdef TX_ENABLE_HW_STACK_CHECKING
- mov r0, _tx_system_stack_top_address ; Pickup top of system stack (lowest memory address)
- sr r0, [KSTACK_TOP] ; Setup KSTACK_TOP
- mov r0, _tx_system_stack_base_address ; Pickup base of system stack (highest memory address)
- sr r0, [KSTACK_BASE] ; Setup KSTACK_BASE
- lr r0, [status32] ; Pickup current STATUS32
- or r0, r0, STATUS32_SC ; Or in hardware stack checking enable bit (SC)
- kflag r0 ; Enable hardware stack checking
- .endif
-;
-; /* Save the system stack pointer. */
-; _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-;
- mov r0, _estack ; Pickup the end of stack address
- st r0, [gp, _tx_thread_system_stack_ptr@sda] ; Save system stack pointer
-;
-;
-; /* Pickup the first available memory address. */
-;
- mov r0, _tx_first_free_address ; Pickup first free memory address
-;
-; /* Save the first available memory address. */
-; _tx_initialize_unused_memory = (VOID_PTR) _end;
-;
- st r0, [gp, _tx_initialize_unused_memory@sda]
-;
-;
-; /* Setup Timer 0 for periodic interrupts at interrupt vector 16. */
-;
- mov r0, 0 ; Disable additional ISR reg saving/restoring
- sr r0, [AUX_IRQ_CTRL] ;
-
- mov r0, 16 ; Select timer 0
- sr r0, [IRQ_SELECT] ;
- mov r0, 15 ; Set timer 0 to priority 15
- sr r0, [IRQ_PRIORITY] ;
- mov r0, 1 ; Enable this interrupt
- sr r0, [IRQ_ENABLE] ;
- mov r0, 0x10000 ; Setup timer period
- sr r0, [LIMIT0] ;
- mov r0, 0 ; Clear timer 0 current count
- sr r0, [COUNT0] ;
- mov r0, 3 ; Enable timer 0
- sr r0, [CONTROL0] ;
-
- .ifdef TX_TIMER_1_SETUP
- mov r0, 17 ; Select timer 1
- sr r0, [IRQ_SELECT] ;
- mov r0, 2 ; Set timer 1 to priority 14
- sr r0, [IRQ_PRIORITY] ;
- mov r0, 1 ; Enable this interrupt
- sr r0, [IRQ_ENABLE] ;
- mov r0, 0x10020 ; Setup timer period
- sr r0, [LIMIT1] ;
- mov r0, 0 ; Clear timer 0 current count
- sr r0, [COUNT1] ;
- mov r0, 3 ; Enable timer 0
- sr r0, [CONTROL1] ;
- .endif
-;
-; /* Done, return to caller. */
-;
- j_s.d [blink] ; Return to caller
- nop
-;}
-;
-;
-; /* Define default vector table entries. */
-;
- .global _tx_memory_error
-_tx_memory_error:
- flag 1
- nop
- nop
- nop
- b _tx_memory_error
-
- .global _tx_instruction_error
-_tx_instruction_error:
- flag 1
- nop
- nop
- nop
- b _tx_instruction_error
-
- .global _tx_ev_machine_check
-_tx_ev_machine_check:
- flag 1
- nop
- nop
- nop
- b _tx_ev_machine_check
-
- .global _tx_ev_tblmiss_inst
-_tx_ev_tblmiss_inst:
- flag 1
- nop
- nop
- nop
- b _tx_ev_tblmiss_inst
-
- .global _tx_ev_tblmiss_data
-_tx_ev_tblmiss_data:
- flag 1
- nop
- nop
- nop
- b _tx_ev_tblmiss_data
-
- .global _tx_ev_protection_viol
-_tx_ev_protection_viol:
- flag 1
- nop
- nop
- nop
- b _tx_ev_protection_viol
-
- .global _tx_ev_privilege_viol
-_tx_ev_privilege_viol:
- flag 1
- nop
- nop
- nop
- b _tx_ev_privilege_viol
-
- .global _tx_ev_software_int
-_tx_ev_software_int:
- flag 1
- nop
- nop
- nop
- b _tx_ev_software_int
-
- .global _tx_ev_trap
-_tx_ev_trap:
- flag 1
- nop
- nop
- nop
- b _tx_ev_trap
-
- .global _tx_ev_extension
-_tx_ev_extension:
- flag 1
- nop
- nop
- nop
- b _tx_ev_extension
-
- .global _tx_ev_divide_by_zero
-_tx_ev_divide_by_zero:
- flag 1
- nop
- nop
- nop
- b _tx_ev_divide_by_zero
-
- .global _tx_ev_dc_error
-_tx_ev_dc_error:
- flag 1
- nop
- nop
- nop
- b _tx_ev_dc_error
-
- .global _tx_ev_maligned
-_tx_ev_maligned:
- flag 1
- nop
- nop
- nop
- b _tx_ev_maligned
-
- .global _tx_unsued_0
-_tx_unsued_0:
- flag 1
- nop
- nop
- nop
- b _tx_unsued_0
-
- .global _tx_unused_1
-_tx_unused_1:
- flag 1
- nop
- nop
- nop
- b _tx_unused_1
-
- .global _tx_timer_0
-_tx_timer_0:
-;
-; /* By default, setup Timer 0 as the ThreadX timer interrupt. */
-;
- sub sp, sp, 160 ; Allocate an interrupt stack frame
- st r0, [sp, 0] ; Save r0
- st r1, [sp, 4] ; Save r1
- st r2, [sp, 8] ; Save r2
- mov r0, 3
- sr r0, [CONTROL0]
-
- b _tx_timer_interrupt ; Jump to generic ThreadX timer interrupt
- ; handler
-; flag 1
-; nop
-; nop
-; nop
-; b _tx_timer_0
-
- .global _tx_timer_1
-_tx_timer_1:
- sub sp, sp, 160 ; Allocate an interrupt stack frame
- st blink, [sp, 16] ; Save blink
- bl _tx_thread_context_save ; Call context save
-;
-; /* ISR processing goes here. If the applications wishes to re-enable
-; interrupts, the SETI instruction can be used here. Also note that
-; register usage in assembly code must be confined to the compiler
-; scratch registers. */
-;
- mov r0, 3
- sr r0, [CONTROL1]
-;
- b _tx_thread_context_restore ; Call context restore
-
-; flag 1
-; nop
-; nop
-; nop
-; b _tx_timer_1
-
- .global _tx_undefined_0
-_tx_undefined_0:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_0
-
- .global _tx_undefined_1
-_tx_undefined_1:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_1
-
- .global _tx_undefined_2
-_tx_undefined_2:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_2
-
- .end
diff --git a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/vectors.s b/ports/arc_em/metaware/test_sandbox/threadx_sandbox/vectors.s
deleted file mode 100644
index c6cbc893..00000000
--- a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/vectors.s
+++ /dev/null
@@ -1,29 +0,0 @@
-
-.file "vectors.s"
-.section .ivt,text
-;; This directive forces this section to stay resident even if stripped out by the -zpurgetext linker option
-.sectflag .ivt,include
-
-;// handler's name type number name offset in IVT (hex/dec)
-.long _start ; exception 0 program entry point offset 0x0 0
-.long _tx_memory_error ; exception 1 memory_error offset 0x4 4
-.long _tx_instruction_error ; exception 2 instruction_error offset 0x8 8
-.long _tx_ev_machine_check ; exception 3 EV_MachineCheck offset 0xC 12
-.long _tx_ev_tblmiss_inst ; exception 4 EV_TLBMissI offset 0x10 16
-.long _tx_ev_tblmiss_data ; exception 5 EV_TLBMissD offset 0x14 20
-.long _tx_ev_protection_viol ; exception 6 EV_ProtV offset 0x18 24
-.long _tx_ev_privilege_viol ; exception 7 EV_PrivilegeV offset 0x1C 28
-.long _tx_ev_software_int ; exception 8 EV_SWI offset 0x20 32
-.long _tx_ev_trap ; exception 9 EV_Trap offset 0x24 36
-.long _tx_ev_extension ; exception 10 EV_Extension offset 0x28 40
-.long _tx_ev_divide_by_zero ; exception 11 EV_DivZero offset 0x2C 44
-.long _tx_ev_dc_error ; exception 12 EV_DCError offset 0x30 48
-.long _tx_ev_maligned ; exception 13 EV_Maligned offset 0x34 52
-.long _tx_unsued_0 ; exception 14 unused offset 0x38 56
-.long _tx_unused_1 ; exception 15 unused offset 0x3C 60
-.long _tx_timer_0 ; IRQ 16 Timer 0 offset 0x40 64
-.long _tx_timer_1 ; IRQ 17 Timer 1 offset 0x44 68
-.long _tx_undefined_0 ; IRQ 18 offset 0x48 72
-.long _tx_undefined_1 ; IRQ 19 offset 0x4C 76
-.long _tx_undefined_2 ; IRQ 20 offset 0x50 80
-
diff --git a/ports/arc_em/metaware/test_sandbox/tx/.cproject b/ports/arc_em/metaware/test_sandbox/tx/.cproject
deleted file mode 100644
index ce329051..00000000
--- a/ports/arc_em/metaware/test_sandbox/tx/.cproject
+++ /dev/null
@@ -1,137 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/arc_em/metaware/test_validation/threadx_validation/.cproject b/ports/arc_em/metaware/test_validation/threadx_validation/.cproject
deleted file mode 100644
index 84623631..00000000
--- a/ports/arc_em/metaware/test_validation/threadx_validation/.cproject
+++ /dev/null
@@ -1,145 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/arc_em/metaware/test_validation/threadx_validation/sample_threadx_validation.c b/ports/arc_em/metaware/test_validation/threadx_validation/sample_threadx_validation.c
deleted file mode 100644
index 48451374..00000000
--- a/ports/arc_em/metaware/test_validation/threadx_validation/sample_threadx_validation.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
- byte pool, and block pool. */
-
-#include "tx_api.h"
-
-#define TX_TEST_STACK_SIZE (1024 * 2)
-#define TX_TEST_QUEUE_SIZE (16)
-#define TX_TEST_BYTE_POOL_SIZE (1024 * 4)
-#define TX_TEST_BLOCK_POOL_SIZE (1024 * 4)
-
-
-/* Define the ThreadX object control blocks... */
-
-TX_THREAD tx_test_thread_0;
-TX_THREAD tx_test_thread_1;
-TX_THREAD tx_test_thread_2;
-TX_THREAD tx_test_thread_3;
-TX_THREAD tx_test_thread_4;
-TX_THREAD tx_test_thread_5;
-TX_THREAD tx_test_thread_6;
-TX_THREAD tx_test_thread_7;
-TX_QUEUE tx_test_queue_0;
-TX_SEMAPHORE tx_test_semaphore_0;
-TX_MUTEX tx_test_mutex_0;
-TX_EVENT_FLAGS_GROUP tx_test_event_flags_0;
-TX_BYTE_POOL tx_test_byte_pool_0;
-TX_BLOCK_POOL tx_test_block_pool_0;
-
-
-/* Define the counters used in the demo application... */
-
-ULONG tx_test_error_counter;
-ULONG tx_test_thread_0_counter;
-ULONG tx_test_thread_1_counter;
-ULONG tx_test_thread_2_counter;
-
-
-/* Define thread prototypes. */
-
-void tx_test_thread_0_entry(ULONG thread_input);
-void tx_test_thread_1_entry(ULONG thread_input);
-void tx_test_thread_2_entry(ULONG thread_input);
-void tx_test_thread_3_and_4_entry(ULONG thread_input);
-void tx_test_thread_5_entry(ULONG thread_input);
-void tx_test_thread_6_and_7_entry(ULONG thread_input);
-
-
-/* Define the thread stacks. */
-
-ULONG tx_test_thread_0_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_1_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_2_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_3_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_4_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_5_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_6_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-ULONG tx_test_thread_7_stack[TX_TEST_STACK_SIZE / sizeof(ULONG)];
-
-
-/* Define other buffers used by the test code. */
-
-ULONG tx_test_queue_0_buffer[TX_TEST_QUEUE_SIZE];
-ULONG tx_test_byte_pool_0_buffer[TX_TEST_BYTE_POOL_SIZE / sizeof(ULONG)];
-ULONG tx_test_block_pool_0_buffer[TX_TEST_BLOCK_POOL_SIZE / sizeof(ULONG)];
-
-
-/* Define test function prototypes. */
-
-void tx_test_sleep();
-void tx_test_busy_loop();
-void tx_test_byte_alloc_and_free();
-
-int tx_test_sum_many_params(
- void (*f)(),
- int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8,
- int p9, int p10, int p11, int p12, int p13, int p14, int p15, int p16,
- int p17, int p18, int p19, int p20, int p21, int p22, int p23, int p24,
- int p25, int p26, int p27, int p28, int p29, int p30, int p31, int p32,
- int p33, int p34, int p35, int p36, int p37, int p38, int p39, int p40,
- int p41, int p42, int p43, int p44, int p45, int p46, int p47, int p48,
- int p49, int p50, int p51, int p52, int p53, int p54, int p55, int p56,
- int p57, int p58, int p59, int p60, int p61, int p62, int p63, int p64,
- int p65, int p66, int p67, int p68, int p69, int p70, int p71, int p72);
-
-int tx_test_xor_many_params(
- void (*f)(),
- int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8,
- int p9, int p10, int p11, int p12, int p13, int p14, int p15, int p16,
- int p17, int p18, int p19, int p20, int p21, int p22, int p23, int p24,
- int p25, int p26, int p27, int p28, int p29, int p30, int p31, int p32,
- int p33, int p34, int p35, int p36, int p37, int p38, int p39, int p40,
- int p41, int p42, int p43, int p44, int p45, int p46, int p47, int p48,
- int p49, int p50, int p51, int p52, int p53, int p54, int p55, int p56,
- int p57, int p58, int p59, int p60, int p61, int p62, int p63, int p64,
- int p65, int p66, int p67, int p68, int p69, int p70, int p71, int p72);
-
-
-/* Define the error handler. */
-
-void tx_test_error_handler()
-{
- tx_test_error_counter++;
-
- for (;;)
- {
- /* Stay here forever */
- }
-}
-
-/* Define main entry point. */
-
-int main()
-{
-
- /* Enter the ThreadX kernel. */
- tx_kernel_enter();
-
- return(0);
-}
-
-
-/* Define what the initial system looks like. */
-
-void tx_application_define(void *first_unused_memory)
-{
-
-UINT status = TX_SUCCESS;
-
-
- /* Create the ThreadX test thread 0. */
- status = tx_thread_create(
- &tx_test_thread_0, "ThreadX test thread 0",
- tx_test_thread_0_entry, 0,
- tx_test_thread_0_stack,
- TX_TEST_STACK_SIZE,
- 5, 5,
- TX_NO_TIME_SLICE,
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 1. */
- status = tx_thread_create(
- &tx_test_thread_1, "ThreadX test thread 1",
- tx_test_thread_1_entry, 1,
- tx_test_thread_1_stack,
- TX_TEST_STACK_SIZE,
- 7, 7,
- 3, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 2. */
- status = tx_thread_create(
- &tx_test_thread_2, "ThreadX test thread 2",
- tx_test_thread_2_entry, 2,
- tx_test_thread_2_stack,
- TX_TEST_STACK_SIZE,
- 7, 5,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 3. */
- status = tx_thread_create(
- &tx_test_thread_3, "ThreadX test thread 3",
- tx_test_thread_3_and_4_entry, 3,
- tx_test_thread_3_stack,
- TX_TEST_STACK_SIZE,
- 7, 5,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 4. */
- status = tx_thread_create(
- &tx_test_thread_4, "ThreadX test thread 4",
- tx_test_thread_3_and_4_entry, 4,
- tx_test_thread_4_stack,
- TX_TEST_STACK_SIZE,
- 7, 7,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 5. */
- status = tx_thread_create(
- &tx_test_thread_5, "ThreadX test thread 5",
- tx_test_thread_5_entry, 5,
- tx_test_thread_5_stack,
- TX_TEST_STACK_SIZE,
- 7, 7,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 6. */
- status = tx_thread_create(
- &tx_test_thread_6, "ThreadX test thread 6",
- tx_test_thread_6_and_7_entry, 6,
- tx_test_thread_6_stack,
- TX_TEST_STACK_SIZE,
- 7, 7,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the ThreadX test thread 2. */
- status = tx_thread_create(
- &tx_test_thread_7, "ThreadX test thread 7",
- tx_test_thread_6_and_7_entry, 7,
- tx_test_thread_7_stack,
- TX_TEST_STACK_SIZE,
- 7, 7,
- 4, /* time slice */
- TX_AUTO_START);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
-
- /* Create the message queue shared by threads 1 and 2. */
- status = tx_queue_create(&tx_test_queue_0, "ThreadX test queue 0", TX_1_ULONG, tx_test_queue_0_buffer, TX_TEST_QUEUE_SIZE*sizeof(ULONG));
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the semaphore used for testing. */
- status = tx_semaphore_create(&tx_test_semaphore_0, "ThreadX test semaphore 0", 1);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the event flags group used by threads 1 and 5. */
- status = tx_event_flags_create(&tx_test_event_flags_0, "ThreadX test event flags 0");
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create the mutex used for testing without priority inheritance. */
- status = tx_mutex_create(&tx_test_mutex_0, "ThreadX test mutex 0", TX_NO_INHERIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Create a byte memory pool for testing. */
- status = tx_byte_pool_create(&tx_test_byte_pool_0, "ThreadX test byte pool 0", tx_test_byte_pool_0_buffer, TX_TEST_BYTE_POOL_SIZE);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Quick test of the byte pool during initialization. */
- {
- CHAR *pointer = TX_NULL;
-
- /* Allocate a block and release the block memory. */
- status = tx_byte_allocate(&tx_test_byte_pool_0, (VOID **) &pointer, 123, TX_NO_WAIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Release the block back to the pool. */
- status = tx_byte_release(pointer);
- if (status != TX_SUCCESS) tx_test_error_handler();
- }
-
- /* Create a block memory pool to allocate a message buffer from. */
- status = tx_block_pool_create(&tx_test_block_pool_0, "ThreadX test block pool 0", 128, tx_test_block_pool_0_buffer, TX_TEST_BLOCK_POOL_SIZE);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Quick test of the block pool during initialization. */
- {
- CHAR *pointer = TX_NULL;
-
- /* Allocate a block and release the block memory. */
- status = tx_block_allocate(&tx_test_block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Release the block back to the pool. */
- status = tx_block_release(pointer);
- if (status != TX_SUCCESS) tx_test_error_handler();
- }
-}
-
-
-/* Define the test functions. */
-
-void tx_test_sleep()
-{
- tx_thread_sleep(10);
-}
-
-void tx_test_busy_loop()
-{
- unsigned i;
- static volatile unsigned x = 0;
- for (i = 0; i < (1024 * 4); ++i)
- {
- x++;
- }
-}
-
-void tx_test_byte_alloc_and_free()
-{
- UINT status;
-
- /* Quick test of the byte pool during initialization. */
- {
- CHAR *pointer = TX_NULL;
-
- /* Allocate a block and release the block memory. */
- status = tx_byte_allocate(&tx_test_byte_pool_0, (VOID **) &pointer, 123, TX_NO_WAIT);
- if (status != TX_SUCCESS) tx_test_error_handler();
-
- /* Release the block back to the pool. */
- status = tx_byte_release(pointer);
- if (status != TX_SUCCESS) tx_test_error_handler();
- }
-
-}
-
-int tx_test_sum_many_params(
- void (*f)(),
- int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8,
- int p9, int p10, int p11, int p12, int p13, int p14, int p15, int p16,
- int p17, int p18, int p19, int p20, int p21, int p22, int p23, int p24,
- int p25, int p26, int p27, int p28, int p29, int p30, int p31, int p32,
- int p33, int p34, int p35, int p36, int p37, int p38, int p39, int p40,
- int p41, int p42, int p43, int p44, int p45, int p46, int p47, int p48,
- int p49, int p50, int p51, int p52, int p53, int p54, int p55, int p56,
- int p57, int p58, int p59, int p60, int p61, int p62, int p63, int p64,
- int p65, int p66, int p67, int p68, int p69, int p70, int p71, int p72)
-{
- volatile int a,b;
-
- a =
- p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 +
- p9 + p10 + p11 + p12 + p13 + p14 + p15 + p16 +
- p17 + p18 + p19 + p20 + p21 + p22 + p23 + p24 +
- p25 + p26 + p27 + p28 + p29 + p30 + p31 + p32 +
- p33 + p34 + p35 + p36 + p37 + p38 + p39 + p40 +
- p41 + p42 + p43 + p44 + p45 + p46 + p47 + p48 +
- p49 + p50 + p51 + p52 + p53 + p54 + p55 + p56 +
- p57 + p58 + p59 + p60 + p61 + p62 + p63 + p64 +
- p65 + p66 + p67 + p68 + p69 + p70 + p71 + p72;
-
- f();
-
- b =
- p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 +
- p9 + p10 + p11 + p12 + p13 + p14 + p15 + p16 +
- p17 + p18 + p19 + p20 + p21 + p22 + p23 + p24 +
- p25 + p26 + p27 + p28 + p29 + p30 + p31 + p32 +
- p33 + p34 + p35 + p36 + p37 + p38 + p39 + p40 +
- p41 + p42 + p43 + p44 + p45 + p46 + p47 + p48 +
- p49 + p50 + p51 + p52 + p53 + p54 + p55 + p56 +
- p57 + p58 + p59 + p60 + p61 + p62 + p63 + p64 +
- p65 + p66 + p67 + p68 + p69 + p70 + p71 + p72;
-
- if (a != b)
- {
- tx_test_error_handler();
- }
-
- return a;
-}
-
-int tx_test_xor_many_params(
- void (*f)(),
- int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8,
- int p9, int p10, int p11, int p12, int p13, int p14, int p15, int p16,
- int p17, int p18, int p19, int p20, int p21, int p22, int p23, int p24,
- int p25, int p26, int p27, int p28, int p29, int p30, int p31, int p32,
- int p33, int p34, int p35, int p36, int p37, int p38, int p39, int p40,
- int p41, int p42, int p43, int p44, int p45, int p46, int p47, int p48,
- int p49, int p50, int p51, int p52, int p53, int p54, int p55, int p56,
- int p57, int p58, int p59, int p60, int p61, int p62, int p63, int p64,
- int p65, int p66, int p67, int p68, int p69, int p70, int p71, int p72)
-{
- volatile int a,b;
-
- a =
- p1 ^ p2 ^ p3 ^ p4 ^ p5 ^ p6 ^ p7 ^ p8 ^
- p9 ^ p10 ^ p11 ^ p12 ^ p13 ^ p14 ^ p15 ^ p16 ^
- p17 ^ p18 ^ p19 ^ p20 ^ p21 ^ p22 ^ p23 ^ p24 ^
- p25 ^ p26 ^ p27 ^ p28 ^ p29 ^ p30 ^ p31 ^ p32 ^
- p33 ^ p34 ^ p35 ^ p36 ^ p37 ^ p38 ^ p39 ^ p40 ^
- p41 ^ p42 ^ p43 ^ p44 ^ p45 ^ p46 ^ p47 ^ p48 ^
- p49 ^ p50 ^ p51 ^ p52 ^ p53 ^ p54 ^ p55 ^ p56 ^
- p57 ^ p58 ^ p59 ^ p60 ^ p61 ^ p62 ^ p63 ^ p64 ^
- p65 ^ p66 ^ p67 ^ p68 ^ p69 ^ p70 ^ p71 ^ p72;
-
- f();
-
- b =
- p1 ^ p2 ^ p3 ^ p4 ^ p5 ^ p6 ^ p7 ^ p8 ^
- p9 ^ p10 ^ p11 ^ p12 ^ p13 ^ p14 ^ p15 ^ p16 ^
- p17 ^ p18 ^ p19 ^ p20 ^ p21 ^ p22 ^ p23 ^ p24 ^
- p25 ^ p26 ^ p27 ^ p28 ^ p29 ^ p30 ^ p31 ^ p32 ^
- p33 ^ p34 ^ p35 ^ p36 ^ p37 ^ p38 ^ p39 ^ p40 ^
- p41 ^ p42 ^ p43 ^ p44 ^ p45 ^ p46 ^ p47 ^ p48 ^
- p49 ^ p50 ^ p51 ^ p52 ^ p53 ^ p54 ^ p55 ^ p56 ^
- p57 ^ p58 ^ p59 ^ p60 ^ p61 ^ p62 ^ p63 ^ p64 ^
- p65 ^ p66 ^ p67 ^ p68 ^ p69 ^ p70 ^ p71 ^ p72;
-
- if (a != b)
- {
- tx_test_error_handler();
- }
-
- return a;
-}
-
-
-
-/* Define the test threads. */
-
-void tx_test_thread_0_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This thread simply sits in while-forever-sleep loop. */
- while(1)
- {
- volatile int a, b, c, d;
-
- int p1; int p2; int p3; int p4; int p5; int p6; int p7; int p8;
- int p9; int p10; int p11; int p12; int p13; int p14; int p15; int p16;
- int p17; int p18; int p19; int p20; int p21; int p22; int p23; int p24;
- int p25; int p26; int p27; int p28; int p29; int p30; int p31; int p32;
- int p33; int p34; int p35; int p36; int p37; int p38; int p39; int p40;
- int p41; int p42; int p43; int p44; int p45; int p46; int p47; int p48;
- int p49; int p50; int p51; int p52; int p53; int p54; int p55; int p56;
- int p57; int p58; int p59; int p60; int p61; int p62; int p63; int p64;
- int p65; int p66; int p67; int p68; int p69; int p70; int p71; int p72;
-
- p1 = 1; p2 = 2; p3 = 3; p4 = 4; p5 = 5; p6 = 6; p7 = 7; p8 = 8;
- p9 = 9; p10 = 10; p11 = 11; p12 = 12; p13 = 13; p14 = 14; p15 = 15; p16 = 16;
- p17 = 17; p18 = 18; p19 = 19; p20 = 20; p21 = 21; p22 = 22; p23 = 23; p24 = 24;
- p25 = 25; p26 = 26; p27 = 27; p28 = 28; p29 = 29; p30 = 30; p31 = 31; p32 = 32;
- p33 = 33; p34 = 34; p35 = 35; p36 = 36; p37 = 37; p38 = 38; p39 = 39; p40 = 40;
- p41 = 41; p42 = 42; p43 = 43; p44 = 44; p45 = 45; p46 = 46; p47 = 47; p48 = 48;
- p49 = 49; p50 = 50; p51 = 51; p52 = 52; p53 = 53; p54 = 54; p55 = 55; p56 = 56;
- p57 = 57; p58 = 58; p59 = 59; p60 = 60; p61 = 61; p62 = 62; p63 = 63; p64 = 64;
- p65 = 65; p66 = 66; p67 = 67; p68 = 68; p69 = 69; p70 = 70; p71 = 71; p72 = 72;
-
- a = tx_test_sum_many_params(
- tx_test_sleep,
- 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62, 63, 64,
- 65, 66, 67, 68, 69, 70, 71, 72);
-
- b = tx_test_xor_many_params(
- tx_test_sleep,
- 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62, 63, 64,
- 65, 66, 67, 68, 69, 70, 71, 72);
-
- /* Increment the thread counter. */
- tx_test_thread_0_counter++;
-
- /* Sleep for 10 ticks. */
- tx_thread_sleep(10);
-
- c = tx_test_sum_many_params(
- tx_test_sleep,
- p1, p2, p3, p4, p5, p6, p7, p8,
- p9, p10, p11, p12, p13, p14, p15, p16,
- p17, p18, p19, p20, p21, p22, p23, p24,
- p25, p26, p27, p28, p29, p30, p31, p32,
- p33, p34, p35, p36, p37, p38, p39, p40,
- p41, p42, p43, p44, p45, p46, p47, p48,
- p49, p50, p51, p52, p53, p54, p55, p56,
- p57, p58, p59, p60, p61, p62, p63, p64,
- p65, p66, p67, p68, p69, p70, p71, p72);
-
- d = tx_test_xor_many_params(
- tx_test_sleep,
- p1, p2, p3, p4, p5, p6, p7, p8,
- p9, p10, p11, p12, p13, p14, p15, p16,
- p17, p18, p19, p20, p21, p22, p23, p24,
- p25, p26, p27, p28, p29, p30, p31, p32,
- p33, p34, p35, p36, p37, p38, p39, p40,
- p41, p42, p43, p44, p45, p46, p47, p48,
- p49, p50, p51, p52, p53, p54, p55, p56,
- p57, p58, p59, p60, p61, p62, p63, p64,
- p65, p66, p67, p68, p69, p70, p71, p72);
-
- if (a != c)
- {
- tx_test_error_handler();
- }
-
- if (b != d)
- {
- tx_test_error_handler();
- }
-
- /* Set event flag 0 to wakeup thread 5. */
- status = tx_event_flags_set(&tx_test_event_flags_0, 0x1, TX_OR);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
-
-
-void tx_test_thread_1_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This thread simply sends messages to a queue shared by thread 2. */
- while(1)
- {
-
- /* Increment the thread counter. */
- tx_test_thread_1_counter++;
-
- /* Send message to queue 0. */
- status = tx_queue_send(&tx_test_queue_0, &tx_test_thread_1_counter, TX_WAIT_FOREVER);
-
- /* Check completion status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
-
-
-void tx_test_thread_2_entry(ULONG thread_input)
-{
-
-ULONG received_message;
-UINT status;
-
- /* This thread retrieves messages placed on the queue by thread 1. */
- while(1)
- {
-
- /* Increment the thread counter. */
- tx_test_thread_2_counter++;
-
- /* Retrieve a message from the queue. */
- status = tx_queue_receive(&tx_test_queue_0, &received_message, TX_WAIT_FOREVER);
-
- /* Check completion status and make sure the message is what we
- expected. */
- if ((status != TX_SUCCESS) || (received_message != tx_test_thread_1_counter))
- break;
- }
-}
-
-
-void tx_test_thread_3_and_4_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This function is executed from thread 3 and thread 4. As the loop
- below shows, these function compete for ownership of semaphore_0. */
- while(1)
- {
-
- /* Get the semaphore with suspension. */
- status = tx_semaphore_get(&tx_test_semaphore_0, TX_WAIT_FOREVER);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Sleep for 2 ticks to hold the semaphore. */
- tx_thread_sleep(2);
-
- /* Release the semaphore. */
- status = tx_semaphore_put(&tx_test_semaphore_0);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
-
-
-void tx_test_thread_5_entry(ULONG thread_input)
-{
-
-UINT status;
-ULONG actual_flags;
-
-
- /* This thread simply waits for an event in a forever loop. */
- while(1)
- {
-
- /* Wait for event flag 0. */
- status = tx_event_flags_get(&tx_test_event_flags_0, 0x1, TX_OR_CLEAR,
- &actual_flags, TX_WAIT_FOREVER);
-
- /* Check status. */
- if ((status != TX_SUCCESS) || (actual_flags != 0x1))
- break;
- }
-}
-
-
-void tx_test_thread_6_and_7_entry(ULONG thread_input)
-{
-
-UINT status;
-
-
- /* This function is executed from thread 6 and thread 7. As the loop
- below shows, these function compete for ownership of mutex_0. */
- while(1)
- {
-
- /* Get the mutex with suspension. */
- status = tx_mutex_get(&tx_test_mutex_0, TX_WAIT_FOREVER);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Get the mutex again with suspension. This shows
- that an owning thread may retrieve the mutex it
- owns multiple times. */
- status = tx_mutex_get(&tx_test_mutex_0, TX_WAIT_FOREVER);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Sleep for 2 ticks to hold the mutex. */
- tx_thread_sleep(2);
-
- /* Release the mutex. */
- status = tx_mutex_put(&tx_test_mutex_0);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
-
- /* Release the mutex again. This will actually
- release ownership since it was obtained twice. */
- status = tx_mutex_put(&tx_test_mutex_0);
-
- /* Check status. */
- if (status != TX_SUCCESS)
- break;
- }
-}
diff --git a/ports/arc_em/metaware/test_validation/threadx_validation/sample_threadx_validation.cmd b/ports/arc_em/metaware/test_validation/threadx_validation/sample_threadx_validation.cmd
deleted file mode 100644
index 78dc1f6e..00000000
--- a/ports/arc_em/metaware/test_validation/threadx_validation/sample_threadx_validation.cmd
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-// This is the linker script example (SRV3-style).
-// (c) Synopsys, 2013
-//
-//
-
-//number of exceptions and interrupts
-NUMBER_OF_EXCEPTIONS = 16;//it is fixed (16)
-NUMBER_OF_INTERRUPTS = 5;//depends on HW configuration
-
-//define Interrupt Vector Table size
-IVT_SIZE_ITEMS = (NUMBER_OF_EXCEPTIONS + NUMBER_OF_INTERRUPTS);//the total IVT size (in "items")
-IVT_SIZE_BYTES = IVT_SIZE_ITEMS * 4;//in bytes
-
-//define ICCM and DCCM locations
-MEMORY {
- ICCM: ORIGIN = 0x00000000, LENGTH = 128K
- DCCM: ORIGIN = 0x80000000, LENGTH = 128K
-}
-
-//define sections and groups
-SECTIONS {
- GROUP: {
- .ivt (TEXT) : # Interrupt table
- {
- ___ivt1 = .;
- * (.ivt)
- ___ivt2 = .;
- // Make the IVT at least IVT_SIZE_BYTES
- . += (___ivt2 - ___ivt1 < IVT_SIZE_BYTES) ? (IVT_SIZE_BYTES - (___ivt2 - ___ivt1)) : 0;
- }
- .ivh (TEXT) : // Interrupt handlers
-
- //TEXT sections
- .text? : { *('.text$crt*') }
- * (TEXT): {}
- //Literals
- * (LIT): {}
- } > ICCM
-
- GROUP: {
- //data sections
- .sdata?: {}
- .sbss?: {}
- *(DATA): {}
- *(BSS): {}
- //stack
- .stack_top: {}
- .stack ALIGN(4) SIZE(DEFINED _STACKSIZE?_STACKSIZE:4096): {}
- .stack_base: {}
- //heap (empty)
- .heap? ALIGN(4) SIZE(DEFINED _HEAPSIZE?_HEAPSIZE:0): {}
- .free_memory: {}
- } > DCCM
- }
diff --git a/ports/arc_em/metaware/test_validation/threadx_validation/tx_initialize_low_level.s b/ports/arc_em/metaware/test_validation/threadx_validation/tx_initialize_low_level.s
deleted file mode 100644
index bea01020..00000000
--- a/ports/arc_em/metaware/test_validation/threadx_validation/tx_initialize_low_level.s
+++ /dev/null
@@ -1,360 +0,0 @@
-;/**************************************************************************/
-;/* */
-;/* Copyright (c) Microsoft Corporation. All rights reserved. */
-;/* */
-;/* This software is licensed under the Microsoft Software License */
-;/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-;/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-;/* and in the root directory of this software. */
-;/* */
-;/**************************************************************************/
-
-;/**************************************************************************/
-;/**************************************************************************/
-;/** */
-;/** ThreadX Component */
-;/** */
-;/** Initialize */
-;/** */
-;/**************************************************************************/
-;/**************************************************************************/
-
- .equ IRQ_SELECT, 0x40B
- .equ KSTACK_TOP, 0x264
- .equ KSTACK_BASE, 0x265
- .equ STATUS32_SC, 0x4000
-
-;
-; /* Define section for placement after all linker allocated RAM memory. This
-; is used to calculate the first free address that is passed to
-; tx_appication_define, soley for the ThreadX application's use. */
-;
- .section ".free_memory","aw"
- .align 4
- .global _tx_first_free_address
-_tx_first_free_address:
- .space 4
-;
-; /* Define section for placement before the main stack area for setting
-; up the STACK_TOP address for hardware stack checking. */
-;
- .section ".stack_top","aw"
- .align 4
- .global _tx_system_stack_top_address
-_tx_system_stack_top_address:
- .space 4
-;
-; /* Define section for placement after the main stack area for setting
-; up the STACK_BASE address for hardware stack checking. */
-;
- .section ".stack_base","aw"
- .align 4
- .global _tx_system_stack_base_address
-_tx_system_stack_base_address:
- .space 4
-;
-;
- .text
-;/**************************************************************************/
-;/* */
-;/* FUNCTION RELEASE */
-;/* */
-;/* _tx_initialize_low_level ARCv2_EM/MetaWare */
-;/* 6.x */
-;/* AUTHOR */
-;/* */
-;/* William E. Lamie, Microsoft Corporation */
-;/* */
-;/* DESCRIPTION */
-;/* */
-;/* This function is responsible for any low-level processor */
-;/* initialization, including setting up interrupt vectors, setting */
-;/* up a periodic timer interrupt source, saving the system stack */
-;/* pointer for use in ISR processing later, and finding the first */
-;/* available RAM memory address for tx_application_define. */
-;/* */
-;/* INPUT */
-;/* */
-;/* None */
-;/* */
-;/* OUTPUT */
-;/* */
-;/* None */
-;/* */
-;/* CALLS */
-;/* */
-;/* None */
-;/* */
-;/* CALLED BY */
-;/* */
-;/* _tx_initialize_kernel_enter ThreadX entry function */
-;/* */
-;/* RELEASE HISTORY */
-;/* */
-;/* DATE NAME DESCRIPTION */
-;/* */
-;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-;/* */
-;/**************************************************************************/
-;VOID _tx_initialize_low_level(VOID)
-;{
- .global _tx_initialize_low_level
- .type _tx_initialize_low_level, @function
-_tx_initialize_low_level:
-
- .ifdef TX_ENABLE_HW_STACK_CHECKING
- mov r0, _tx_system_stack_top_address ; Pickup top of system stack (lowest memory address)
- sr r0, [KSTACK_TOP] ; Setup KSTACK_TOP
- mov r0, _tx_system_stack_base_address ; Pickup base of system stack (highest memory address)
- sr r0, [KSTACK_BASE] ; Setup KSTACK_BASE
- lr r0, [status32] ; Pickup current STATUS32
- or r0, r0, STATUS32_SC ; Or in hardware stack checking enable bit (SC)
- kflag r0 ; Enable hardware stack checking
- .endif
-;
-; /* Save the system stack pointer. */
-; _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-;
- mov r0, _estack ; Pickup the end of stack address
- st r0, [gp, _tx_thread_system_stack_ptr@sda] ; Save system stack pointer
-;
-;
-; /* Pickup the first available memory address. */
-;
- mov r0, _tx_first_free_address ; Pickup first free memory address
-;
-; /* Save the first available memory address. */
-; _tx_initialize_unused_memory = (VOID_PTR) _end;
-;
- st r0, [gp, _tx_initialize_unused_memory@sda]
-;
-;
-; /* Setup Timer 0 for periodic interrupts at interrupt vector 16. */
-;
- mov r0, 0 ; Disable additional ISR reg saving/restoring
- sr r0, [AUX_IRQ_CTRL] ;
-
- mov r0, 16 ; Select timer 0
- sr r0, [IRQ_SELECT] ;
- mov r0, 15 ; Set timer 0 to priority 15
- sr r0, [IRQ_PRIORITY] ;
- mov r0, 1 ; Enable this interrupt
- sr r0, [IRQ_ENABLE] ;
- mov r0, 0x10000 ; Setup timer period
- sr r0, [LIMIT0] ;
- mov r0, 0 ; Clear timer 0 current count
- sr r0, [COUNT0] ;
- mov r0, 3 ; Enable timer 0
- sr r0, [CONTROL0] ;
-
- .ifdef TX_TIMER_1_SETUP
- mov r0, 17 ; Select timer 1
- sr r0, [IRQ_SELECT] ;
- mov r0, 2 ; Set timer 1 to priority 14
- sr r0, [IRQ_PRIORITY] ;
- mov r0, 1 ; Enable this interrupt
- sr r0, [IRQ_ENABLE] ;
- mov r0, 0x10020 ; Setup timer period
- sr r0, [LIMIT1] ;
- mov r0, 0 ; Clear timer 0 current count
- sr r0, [COUNT1] ;
- mov r0, 3 ; Enable timer 0
- sr r0, [CONTROL1] ;
- .endif
-;
-; /* Done, return to caller. */
-;
- j_s.d [blink] ; Return to caller
- nop
-;}
-;
-;
-; /* Define default vector table entries. */
-;
- .global _tx_memory_error
-_tx_memory_error:
- flag 1
- nop
- nop
- nop
- b _tx_memory_error
-
- .global _tx_instruction_error
-_tx_instruction_error:
- flag 1
- nop
- nop
- nop
- b _tx_instruction_error
-
- .global _tx_ev_machine_check
-_tx_ev_machine_check:
- flag 1
- nop
- nop
- nop
- b _tx_ev_machine_check
-
- .global _tx_ev_tblmiss_inst
-_tx_ev_tblmiss_inst:
- flag 1
- nop
- nop
- nop
- b _tx_ev_tblmiss_inst
-
- .global _tx_ev_tblmiss_data
-_tx_ev_tblmiss_data:
- flag 1
- nop
- nop
- nop
- b _tx_ev_tblmiss_data
-
- .global _tx_ev_protection_viol
-_tx_ev_protection_viol:
- flag 1
- nop
- nop
- nop
- b _tx_ev_protection_viol
-
- .global _tx_ev_privilege_viol
-_tx_ev_privilege_viol:
- flag 1
- nop
- nop
- nop
- b _tx_ev_privilege_viol
-
- .global _tx_ev_software_int
-_tx_ev_software_int:
- flag 1
- nop
- nop
- nop
- b _tx_ev_software_int
-
- .global _tx_ev_trap
-_tx_ev_trap:
- flag 1
- nop
- nop
- nop
- b _tx_ev_trap
-
- .global _tx_ev_extension
-_tx_ev_extension:
- flag 1
- nop
- nop
- nop
- b _tx_ev_extension
-
- .global _tx_ev_divide_by_zero
-_tx_ev_divide_by_zero:
- flag 1
- nop
- nop
- nop
- b _tx_ev_divide_by_zero
-
- .global _tx_ev_dc_error
-_tx_ev_dc_error:
- flag 1
- nop
- nop
- nop
- b _tx_ev_dc_error
-
- .global _tx_ev_maligned
-_tx_ev_maligned:
- flag 1
- nop
- nop
- nop
- b _tx_ev_maligned
-
- .global _tx_unsued_0
-_tx_unsued_0:
- flag 1
- nop
- nop
- nop
- b _tx_unsued_0
-
- .global _tx_unused_1
-_tx_unused_1:
- flag 1
- nop
- nop
- nop
- b _tx_unused_1
-
- .global _tx_timer_0
-_tx_timer_0:
-;
-; /* By default, setup Timer 0 as the ThreadX timer interrupt. */
-;
- sub sp, sp, 160 ; Allocate an interrupt stack frame
- st r0, [sp, 0] ; Save r0
- st r1, [sp, 4] ; Save r1
- st r2, [sp, 8] ; Save r2
- mov r0, 3
- sr r0, [CONTROL0]
-
- b _tx_timer_interrupt ; Jump to generic ThreadX timer interrupt
- ; handler
-; flag 1
-; nop
-; nop
-; nop
-; b _tx_timer_0
-
- .global _tx_timer_1
-_tx_timer_1:
- sub sp, sp, 160 ; Allocate an interrupt stack frame
- st blink, [sp, 16] ; Save blink
- bl _tx_thread_context_save ; Call context save
-;
-; /* ISR processing goes here. If the applications wishes to re-enable
-; interrupts, the SETI instruction can be used here. Also note that
-; register usage in assembly code must be confined to the compiler
-; scratch registers. */
-;
- mov r0, 3
- sr r0, [CONTROL1]
-;
- b _tx_thread_context_restore ; Call context restore
-
-; flag 1
-; nop
-; nop
-; nop
-; b _tx_timer_1
-
- .global _tx_undefined_0
-_tx_undefined_0:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_0
-
- .global _tx_undefined_1
-_tx_undefined_1:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_1
-
- .global _tx_undefined_2
-_tx_undefined_2:
- flag 1
- nop
- nop
- nop
- b _tx_undefined_2
-
- .end
diff --git a/ports/arc_em/metaware/test_validation/threadx_validation/vectors.s b/ports/arc_em/metaware/test_validation/threadx_validation/vectors.s
deleted file mode 100644
index c6cbc893..00000000
--- a/ports/arc_em/metaware/test_validation/threadx_validation/vectors.s
+++ /dev/null
@@ -1,29 +0,0 @@
-
-.file "vectors.s"
-.section .ivt,text
-;; This directive forces this section to stay resident even if stripped out by the -zpurgetext linker option
-.sectflag .ivt,include
-
-;// handler's name type number name offset in IVT (hex/dec)
-.long _start ; exception 0 program entry point offset 0x0 0
-.long _tx_memory_error ; exception 1 memory_error offset 0x4 4
-.long _tx_instruction_error ; exception 2 instruction_error offset 0x8 8
-.long _tx_ev_machine_check ; exception 3 EV_MachineCheck offset 0xC 12
-.long _tx_ev_tblmiss_inst ; exception 4 EV_TLBMissI offset 0x10 16
-.long _tx_ev_tblmiss_data ; exception 5 EV_TLBMissD offset 0x14 20
-.long _tx_ev_protection_viol ; exception 6 EV_ProtV offset 0x18 24
-.long _tx_ev_privilege_viol ; exception 7 EV_PrivilegeV offset 0x1C 28
-.long _tx_ev_software_int ; exception 8 EV_SWI offset 0x20 32
-.long _tx_ev_trap ; exception 9 EV_Trap offset 0x24 36
-.long _tx_ev_extension ; exception 10 EV_Extension offset 0x28 40
-.long _tx_ev_divide_by_zero ; exception 11 EV_DivZero offset 0x2C 44
-.long _tx_ev_dc_error ; exception 12 EV_DCError offset 0x30 48
-.long _tx_ev_maligned ; exception 13 EV_Maligned offset 0x34 52
-.long _tx_unsued_0 ; exception 14 unused offset 0x38 56
-.long _tx_unused_1 ; exception 15 unused offset 0x3C 60
-.long _tx_timer_0 ; IRQ 16 Timer 0 offset 0x40 64
-.long _tx_timer_1 ; IRQ 17 Timer 1 offset 0x44 68
-.long _tx_undefined_0 ; IRQ 18 offset 0x48 72
-.long _tx_undefined_1 ; IRQ 19 offset 0x4C 76
-.long _tx_undefined_2 ; IRQ 20 offset 0x50 80
-
diff --git a/ports/arc_em/metaware/test_validation/tx/.cproject b/ports/arc_em/metaware/test_validation/tx/.cproject
deleted file mode 100644
index ce329051..00000000
--- a/ports/arc_em/metaware/test_validation/tx/.cproject
+++ /dev/null
@@ -1,137 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/arc_hs/metaware/example_build/sample_threadx/sample_threadx.c b/ports/arc_hs/metaware/example_build/sample_threadx/sample_threadx.c
index 5a03f35c..81cca72b 100644
--- a/ports/arc_hs/metaware/example_build/sample_threadx/sample_threadx.c
+++ b/ports/arc_hs/metaware/example_build/sample_threadx/sample_threadx.c
@@ -82,7 +82,7 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
diff --git a/ports/arc_hs/metaware/example_build/sample_threadx/tx_initialize_low_level.s b/ports/arc_hs/metaware/example_build/sample_threadx/tx_initialize_low_level.s
index af5b147c..2ea4513f 100644
--- a/ports/arc_hs/metaware/example_build/sample_threadx/tx_initialize_low_level.s
+++ b/ports/arc_hs/metaware/example_build/sample_threadx/tx_initialize_low_level.s
@@ -39,7 +39,7 @@ _tx_first_free_address:
;/* FUNCTION RELEASE */
;/* */
;/* _tx_initialize_low_level ARC_HS/MetaWare */
-;/* 6.1.9 */
+;/* 6.1.10 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -76,6 +76,11 @@ _tx_first_free_address:
;/* 10-15-2021 Andres Mlinar Modified comment(s), optimized*/
;/* system stack usage, */
;/* resulting in version 6.1.9 */
+;/* 01-31-2022 Andres Mlinar Modified comments(s), */
+;/* initialize interrupts right */
+;/* before enabling the task */
+;/* scheduler, */
+;/* resulting in version 6.1.10 */
;/* */
;/**************************************************************************/
;VOID _tx_initialize_low_level(VOID)
@@ -101,6 +106,18 @@ _tx_initialize_low_level:
;
st r0, [gp, _tx_initialize_unused_memory@sda]
;
+; /* Done, return to caller. */
+;
+ j_s.d [blink] ; Return to caller
+ nop
+;}
+;
+;VOID _tx_initialize_start_interrupts(VOID)
+;{
+ .align 4
+ .global _tx_initialize_start_interrupts
+ .type _tx_initialize_start_interrupts, @function
+_tx_initialize_start_interrupts:
;
; /* Setup Timer 0 for periodic interrupts at interrupt vector 16. */
;
diff --git a/ports/arc_hs/metaware/inc/tx_port.h b/ports/arc_hs/metaware/inc/tx_port.h
index dd2b4e42..5d61c45c 100644
--- a/ports/arc_hs/metaware/inc/tx_port.h
+++ b/ports/arc_hs/metaware/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h ARC_HS/MetaWare */
-/* 6.1.6 */
+/* 6.1.10 */
/* */
/* AUTHOR */
/* */
@@ -48,9 +48,14 @@
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s), updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 01-31-2022 Andres Mlinar Modified comments(s), */
+/* initialize interrupts right */
+/* before enabling the task */
+/* scheduler, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -184,6 +189,12 @@ ULONG _tx_misra_time_stamp_get(VOID);
#define TX_INLINE_INITIALIZATION
#endif
+/* Define the ARC-specific initialization code that is expanded in the generic source. */
+
+void _tx_initialize_start_interrupts(void);
+
+#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION _tx_initialize_start_interrupts();
+
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
@@ -325,7 +336,7 @@ VOID tx_thread_register_bank_assign(VOID *thread_ptr, UINT register_bank);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARC_HS/MetaWare Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARC_HS/MetaWare Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/arc_hs/metaware/src/tx_thread_interrupt_control.s b/ports/arc_hs/metaware/src/tx_thread_interrupt_control.s
index 0a85e979..6e1adb89 100644
--- a/ports/arc_hs/metaware/src/tx_thread_interrupt_control.s
+++ b/ports/arc_hs/metaware/src/tx_thread_interrupt_control.s
@@ -72,7 +72,7 @@ _tx_thread_interrupt_control:
; /* Apply the new interrupt posture. */
;
seti r0 ; Set desired interrupt state
- j_s.d [blink] ; Return to caller with delay slot
+ j_s.d [blink] ; Return to caller with delay slot
mov r0, r1 ; Return previous mask value. Return value is TX_INT_DISABLE or TX_INT_ENABLE.
;
;}
diff --git a/ports/arm11/gnu/example_build/libc.a b/ports/arm11/gnu/example_build/libc.a
deleted file mode 100644
index b03b1626..00000000
Binary files a/ports/arm11/gnu/example_build/libc.a and /dev/null differ
diff --git a/ports/arm11/gnu/example_build/libgcc.a b/ports/arm11/gnu/example_build/libgcc.a
deleted file mode 100644
index da2319e0..00000000
Binary files a/ports/arm11/gnu/example_build/libgcc.a and /dev/null differ
diff --git a/ports/arm9/gnu/example_build/libgcc.a b/ports/arm9/gnu/example_build/libgcc.a
deleted file mode 100644
index d7353496..00000000
Binary files a/ports/arm9/gnu/example_build/libgcc.a and /dev/null differ
diff --git a/ports/cortex_a15/gnu/example_build/libc.a b/ports/cortex_a15/gnu/example_build/libc.a
deleted file mode 100644
index 5b04fa4e..00000000
Binary files a/ports/cortex_a15/gnu/example_build/libc.a and /dev/null differ
diff --git a/ports/cortex_a15/gnu/example_build/libgcc.a b/ports/cortex_a15/gnu/example_build/libgcc.a
deleted file mode 100644
index d7353496..00000000
Binary files a/ports/cortex_a15/gnu/example_build/libgcc.a and /dev/null differ
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a34/ac6/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..587034cd
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/.cproject
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/.project b/ports/cortex_a34/ac6/example_build/sample_threadx/.project
similarity index 94%
rename from ports/arc_em/metaware/test_sandbox/threadx_sandbox/.project
rename to ports/cortex_a34/ac6/example_build/sample_threadx/.project
index 247d9fca..a1b15572 100644
--- a/ports/arc_em/metaware/test_sandbox/threadx_sandbox/.project
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/.project
@@ -1,6 +1,6 @@
- sample_threadx_validation
+ sample_threadx
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3.h b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3.h
new file mode 100644
index 00000000..23bc7fd8
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3.h
@@ -0,0 +1,561 @@
+/*
+ * GICv3.h - data types and function prototypes for GICv3 utility routines
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_h
+#define GICV3_h
+
+#include
+
+/*
+ * extra flags for GICD enable
+ */
+typedef enum
+{
+ gicdctlr_EnableGrp0 = (1 << 0),
+ gicdctlr_EnableGrp1NS = (1 << 1),
+ gicdctlr_EnableGrp1A = (1 << 1),
+ gicdctlr_EnableGrp1S = (1 << 2),
+ gicdctlr_EnableAll = (1 << 2) | (1 << 1) | (1 << 0),
+ gicdctlr_ARE_S = (1 << 4), /* Enable Secure state affinity routing */
+ gicdctlr_ARE_NS = (1 << 5), /* Enable Non-Secure state affinity routing */
+ gicdctlr_DS = (1 << 6), /* Disable Security support */
+ gicdctlr_E1NWF = (1 << 7) /* Enable "1-of-N" wakeup model */
+} GICDCTLRFlags_t;
+
+/*
+ * modes for SPI routing
+ */
+typedef enum
+{
+ gicdirouter_ModeSpecific = 0,
+ gicdirouter_ModeAny = (1 << 31)
+} GICDIROUTERBits_t;
+
+typedef enum
+{
+ gicdicfgr_Level = 0,
+ gicdicfgr_Edge = (1 << 1)
+} GICDICFGRBits_t;
+
+typedef enum
+{
+ gicigroupr_G0S = 0,
+ gicigroupr_G1NS = (1 << 0),
+ gicigroupr_G1S = (1 << 2)
+} GICIGROUPRBits_t;
+
+typedef enum
+{
+ gicrwaker_ProcessorSleep = (1 << 1),
+ gicrwaker_ChildrenAsleep = (1 << 2)
+} GICRWAKERBits_t;
+
+/**********************************************************************/
+
+/*
+ * Utility macros & functions
+ */
+#define RANGE_LIMIT(x) ((sizeof(x) / sizeof((x)[0])) - 1)
+
+static inline uint64_t gicv3PackAffinity(uint32_t aff3, uint32_t aff2,
+ uint32_t aff1, uint32_t aff0)
+{
+ /*
+ * only need to cast aff3 to get type promotion for all affinities
+ */
+ return ((((uint64_t)aff3 & 0xff) << 32) |
+ ((aff2 & 0xff) << 16) |
+ ((aff1 & 0xff) << 8) | aff0);
+}
+
+/**********************************************************************/
+
+/*
+ * GIC Distributor Function Prototypes
+ */
+
+/*
+ * ConfigGICD - configure GIC Distributor prior to enabling it
+ *
+ * Inputs:
+ *
+ * control - control flags
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void ConfigGICD(GICDCTLRFlags_t flags);
+
+/*
+ * EnableGICD - top-level enable for GIC Distributor
+ *
+ * Inputs:
+ *
+ * flags - new control flags to set
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void EnableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * DisableGICD - top-level disable for GIC Distributor
+ *
+ * Inputs
+ *
+ * flags - control flags to clear
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void DisableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * SyncAREinGICD - synchronise GICD Address Routing Enable bits
+ *
+ * Inputs
+ *
+ * flags - absolute flag bits to set in GIC Distributor
+ *
+ * dosync - flag whether to wait for ARE bits to match passed
+ * flag field (dosync = true), or whether to set absolute
+ * flag bits (dosync = false)
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * This function is used to resolve a race in an MP system whereby secondary
+ * CPUs cannot reliably program all Redistributor registers until the
+ * primary CPU has enabled Address Routing. The primary CPU will call this
+ * function with dosync = false, while the secondaries will call it with
+ * dosync = true.
+ */
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync);
+
+/*
+ * EnableSPI - enable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnableSPI(uint32_t id);
+
+/*
+ * DisableSPI - disable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisableSPI(uint32_t id);
+
+/*
+ * SetSPIPriority - configure the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetSPIPriority(uint32_t id, uint32_t priority);
+
+/*
+ * GetSPIPriority - determine the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * interrupt priority in the range 0 - 0xff
+ */
+uint32_t GetSPIPriority(uint32_t id);
+
+/*
+ * SetSPIRoute - specify interrupt routing when gicdctlr_ARE is enabled
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * affinity - prepacked "dotted quad" affinity routing. NOTE: use the
+ * gicv3PackAffinity() helper routine to generate this input
+ *
+ * mode - select routing mode (specific affinity, or any recipient)
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode);
+
+/*
+ * GetSPIRoute - read ARE-enabled interrupt routing information
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * routing configuration
+ */
+uint64_t GetSPIRoute(uint32_t id);
+
+/*
+ * SetSPITarget - configure the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * target - 8-bit target bitmap
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPITarget(uint32_t id, uint32_t target);
+
+/*
+ * GetSPITarget - read the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * 8-bit target bitmap
+ */
+uint32_t GetSPITarget(uint32_t id);
+
+/*
+ * ConfigureSPI - setup an interrupt as edge- or level-triggered
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * config - desired configuration
+ *
+ * Returns
+ *
+ *
+ */
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config);
+
+/*
+ * SetSPIPending - mark an interrupt as pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPIPending(uint32_t id);
+
+/*
+ * ClearSPIPending - mark an interrupt as not pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearSPIPending(uint32_t id);
+
+/*
+ * GetSPIPending - query whether an interrupt is pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetSPIPending(uint32_t id);
+
+/*
+ * SetSPISecurity - mark a shared peripheral interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityBlock - mark a block of 32 shared peripheral
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * block - which block to mark (e.g. 1 = Ints 32-63)
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityAll - mark all shared peripheral interrupts
+ * as security
+ *
+ * Inputs:
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityAll(GICIGROUPRBits_t group);
+
+/**********************************************************************/
+
+/*
+ * GIC Re-Distributor Function Prototypes
+ *
+ * The model for calling Redistributor functions is that, rather than
+ * identifying the target redistributor with every function call, the
+ * SelectRedistributor() function is used to identify which redistributor
+ * is to be used for all functions until a different redistributor is
+ * explicitly selected
+ */
+
+/*
+ * WakeupGICR - wake up a Redistributor
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to wakeup
+ *
+ * Returns:
+ *
+ *
+ */
+void WakeupGICR(uint32_t gicr);
+
+/*
+ * EnablePrivateInt - enable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * DisablePrivateInt - disable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority);
+
+/*
+ * GetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * Int priority
+ */
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPending - mark a private (SGI/PPI) interrupt as pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * ClearPrivateIntPending - mark a private (SGI/PPI) interrupt as not pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * GetPrivateIntPending - query whether a private (SGI/PPI) interrupt is pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntSecurity - mark a private (SGI/PPI) interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetPrivateIntSecurityBlock - mark all 32 private (SGI/PPI)
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * group - the group for the interrupt
+ *
+ * Returns:
+ *
+ *
+ */
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group);
+
+#endif /* ndef GICV3_h */
+
+/* EOF GICv3.h */
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicc.h
new file mode 100644
index 00000000..8e6f0acc
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicc.h
@@ -0,0 +1,249 @@
+/*
+ * GICv3_gicc.h - prototypes and inline functions for GICC system register operations
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_gicc_h
+#define GICV3_gicc_h
+
+/**********************************************************************/
+
+typedef enum
+{
+ sreSRE = (1 << 0),
+ sreDFB = (1 << 1),
+ sreDIB = (1 << 2),
+ sreEnable = (1 << 3)
+} ICC_SREBits_t;
+
+static inline void setICC_SRE_EL1(ICC_SREBits_t mode)
+{
+ asm("msr ICC_SRE_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_SRE_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL2(ICC_SREBits_t mode)
+{
+ asm("msr ICC_SRE_EL2, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL2(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_SRE_EL2\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL3(ICC_SREBits_t mode)
+{
+ asm("msr ICC_SRE_EL3, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_SRE_EL3\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ igrpEnable = (1 << 0),
+ igrpEnableGrp1NS = (1 << 0),
+ igrpEnableGrp1S = (1 << 2)
+} ICC_IGRPBits_t;
+
+static inline void setICC_IGRPEN0_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr ICC_IGRPEN0_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr ICC_IGRPEN1_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL3(ICC_IGRPBits_t mode)
+{
+ asm("msr ICC_IGRPEN1_EL3, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ ctlrCBPR = (1 << 0),
+ ctlrCBPR_EL1S = (1 << 0),
+ ctlrEOImode = (1 << 1),
+ ctlrCBPR_EL1NS = (1 << 1),
+ ctlrEOImode_EL3 = (1 << 2),
+ ctlrEOImode_EL1S = (1 << 3),
+ ctlrEOImode_EL1NS = (1 << 4),
+ ctlrRM = (1 << 5),
+ ctlrPMHE = (1 << 6)
+} ICC_CTLRBits_t;
+
+static inline void setICC_CTLR_EL1(ICC_CTLRBits_t mode)
+{
+ asm("msr ICC_CTLR_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_CTLR_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_CTLR_EL3(ICC_CTLRBits_t mode)
+{
+ asm("msr ICC_CTLR_EL3, %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_CTLR_EL3\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+static inline uint64_t getICC_IAR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_IAR0_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_IAR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_IAR1_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_EOIR0(uint32_t interrupt)
+{
+ asm("msr ICC_EOIR0_EL1, %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_EOIR1(uint32_t interrupt)
+{
+ asm("msr ICC_EOIR1_EL1, %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_DIR(uint32_t interrupt)
+{
+ asm("msr ICC_DIR_EL1, %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_PMR(uint32_t priority)
+{
+ asm("msr ICC_PMR_EL1, %0\n; isb" :: "r" ((uint64_t)priority));
+}
+
+static inline void setICC_BPR0(uint32_t binarypoint)
+{
+ asm("msr ICC_BPR0_EL1, %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline void setICC_BPR1(uint32_t binarypoint)
+{
+ asm("msr ICC_BPR1_EL1, %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline uint64_t getICC_BPR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_BPR0_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_BPR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_BPR1_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_RPR(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, ICC_RPR_EL1\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ sgirIRMTarget = 0,
+ sgirIRMAll = (1ull << 40)
+} ICC_SGIRBits_t;
+
+static inline void setICC_SGI0R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr ICC_SGI0R_EL1, %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr ICC_SGI1R_EL1, %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr ICC_ASGI1R_EL1, %0\n; isb" :: "r" (packedbits));
+}
+
+#endif /* ndef GICV3_gicc_h */
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicd.c
new file mode 100644
index 00000000..3bfb4a93
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicd.c
@@ -0,0 +1,339 @@
+/*
+ * GICv3_gicd.c - generic driver code for GICv3 distributor
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include
+
+#include "GICv3.h"
+
+typedef struct
+{
+ volatile uint32_t GICD_CTLR; // +0x0000
+ const volatile uint32_t GICD_TYPER; // +0x0004
+ const volatile uint32_t GICD_IIDR; // +0x0008
+
+ const volatile uint32_t padding0; // +0x000c
+
+ volatile uint32_t GICD_STATUSR; // +0x0010
+
+ const volatile uint32_t padding1[3]; // +0x0014
+
+ volatile uint32_t IMP_DEF[8]; // +0x0020
+
+ volatile uint32_t GICD_SETSPI_NSR; // +0x0040
+ const volatile uint32_t padding2; // +0x0044
+ volatile uint32_t GICD_CLRSPI_NSR; // +0x0048
+ const volatile uint32_t padding3; // +0x004c
+ volatile uint32_t GICD_SETSPI_SR; // +0x0050
+ const volatile uint32_t padding4; // +0x0054
+ volatile uint32_t GICD_CLRSPI_SR; // +0x0058
+
+ const volatile uint32_t padding5[3]; // +0x005c
+
+ volatile uint32_t GICD_SEIR; // +0x0068
+
+ const volatile uint32_t padding6[5]; // +0x006c
+
+ volatile uint32_t GICD_IGROUPR[32]; // +0x0080
+
+ volatile uint32_t GICD_ISENABLER[32]; // +0x0100
+ volatile uint32_t GICD_ICENABLER[32]; // +0x0180
+ volatile uint32_t GICD_ISPENDR[32]; // +0x0200
+ volatile uint32_t GICD_ICPENDR[32]; // +0x0280
+ volatile uint32_t GICD_ISACTIVER[32]; // +0x0300
+ volatile uint32_t GICD_ICACTIVER[32]; // +0x0380
+
+ volatile uint8_t GICD_IPRIORITYR[1024]; // +0x0400
+ volatile uint8_t GICD_ITARGETSR[1024]; // +0x0800
+ volatile uint32_t GICD_ICFGR[64]; // +0x0c00
+ volatile uint32_t GICD_IGRPMODR[32]; // +0x0d00
+ const volatile uint32_t padding7[32]; // +0x0d80
+ volatile uint32_t GICD_NSACR[64]; // +0x0e00
+
+ volatile uint32_t GICD_SGIR; // +0x0f00
+
+ const volatile uint32_t padding8[3]; // +0x0f04
+
+ volatile uint32_t GICD_CPENDSGIR[4]; // +0x0f10
+ volatile uint32_t GICD_SPENDSGIR[4]; // +0x0f20
+
+ const volatile uint32_t padding9[52]; // +0x0f30
+ const volatile uint32_t padding10[5120]; // +0x1000
+
+ volatile uint64_t GICD_IROUTER[1024]; // +0x6000
+} GICv3_distributor;
+
+/*
+ * use the scatter file to place GICD
+ */
+static GICv3_distributor __attribute__((section(".bss.distributor"))) gicd;
+
+void ConfigGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR |= flags;
+}
+
+void DisableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR &= ~flags;
+}
+
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync)
+{
+ if (dosync)
+ {
+ const uint32_t tmask = gicdctlr_ARE_S | gicdctlr_ARE_NS;
+ const uint32_t tval = flags & tmask;
+
+ while ((gicd.GICD_CTLR & tmask) != tval)
+ continue;
+ }
+ else
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ISENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void DisableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ICENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void SetSPIPriority(uint32_t id, uint32_t priority)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ gicd.GICD_IPRIORITYR[bank] = priority;
+}
+
+uint32_t GetSPIPriority(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ return (uint32_t)(gicd.GICD_IPRIORITYR[bank]);
+}
+
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ gicd.GICD_IROUTER[bank] = affinity | (uint64_t)mode;
+}
+
+uint64_t GetSPIRoute(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ return gicd.GICD_IROUTER[bank];
+}
+
+void SetSPITarget(uint32_t id, uint32_t target)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ gicd.GICD_ITARGETSR[bank] = target;
+}
+
+uint32_t GetSPITarget(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ /*
+ * GICD_ITARGETSR has 4 interrupts per register, i.e. 8-bits of
+ * target bitmap per register
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ return (uint32_t)(gicd.GICD_ITARGETSR[bank]);
+}
+
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config)
+{
+ uint32_t bank, tmp;
+
+ /*
+ * GICD_ICFGR has 16 interrupts per register, i.e. 2-bits of
+ * configuration per register
+ */
+ bank = (id >> 4) & RANGE_LIMIT(gicd.GICD_ICFGR);
+ config &= 3;
+
+ id = (id & 0xf) << 1;
+
+ tmp = gicd.GICD_ICFGR[bank];
+ tmp &= ~(3 << id);
+ tmp |= config << id;
+ gicd.GICD_ICFGR[bank] = tmp;
+}
+
+void SetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ISPENDR[bank] = 1 << id;
+}
+
+void ClearSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ICPENDR[bank] = 1 << id;
+}
+
+uint32_t GetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ return (gicd.GICD_ICPENDR[bank] >> id) & 1;
+}
+
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group)
+{
+ uint32_t bank, groupmod;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_IGROUPR);
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicd.GICD_IGROUPR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGROUPR[bank] &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicd.GICD_IGRPMODR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGRPMODR[bank] &= ~(1 << id);
+}
+
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group)
+{
+ uint32_t groupmod;
+ const uint32_t nbits = (sizeof group * 8) - 1;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ block &= RANGE_LIMIT(gicd.GICD_IGROUPR);
+
+ /*
+ * get each bit of group config duplicated over all 32-bits in a word
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicd.GICD_IGROUPR[block] = group;
+ gicd.GICD_IGRPMODR[block] = groupmod;
+}
+
+void SetSPISecurityAll(GICIGROUPRBits_t group)
+{
+ uint32_t block;
+
+ /*
+ * GICD_TYPER.ITLinesNumber gives (No. SPIS / 32) - 1, and we
+ * want to iterate over all blocks excluding 0 (which are the
+ * SGI/PPI interrupts, and not relevant here)
+ */
+ for (block = (gicd.GICD_TYPER & ((1 << 5) - 1)); block > 0; --block)
+ SetSPISecurityBlock(block, group);
+}
+
+/* EOF GICv3_gicd.c */
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicr.c
new file mode 100644
index 00000000..7b437b18
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/GICv3_gicr.c
@@ -0,0 +1,308 @@
+/*
+ * GICv3_gicr.c - generic driver code for GICv3 redistributor
+ *
+ * Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include "GICv3.h"
+
+/*
+ * physical LPI Redistributor register map
+ */
+typedef struct
+{
+ volatile uint32_t GICR_CTLR; // +0x0000 - RW - Redistributor Control Register
+ const volatile uint32_t GICR_IIDR; // +0x0004 - RO - Implementer Identification Register
+ const volatile uint32_t GICR_TYPER[2]; // +0x0008 - RO - Redistributor Type Register
+ volatile uint32_t GICR_STATUSR; // +0x0010 - RW - Error Reporting Status Register, optional
+ volatile uint32_t GICR_WAKER; // +0x0014 - RW - Redistributor Wake Register
+ const volatile uint32_t padding1[2]; // +0x0018 - RESERVED
+#ifndef USE_GIC600
+ volatile uint32_t IMPDEF1[8]; // +0x0020 - ?? - IMPLEMENTATION DEFINED
+#else
+ volatile uint32_t GICR_FCTLR; // +0x0020 - RW - Function Control Register
+ volatile uint32_t GICR_PWRR; // +0x0024 - RW - Power Management Control Register
+ volatile uint32_t GICR_CLASS; // +0x0028 - RW - Class Register
+ const volatile uint32_t padding2[5]; // +0x002C - RESERVED
+#endif
+ volatile uint64_t GICR_SETLPIR; // +0x0040 - WO - Set LPI Pending Register
+ volatile uint64_t GICR_CLRLPIR; // +0x0048 - WO - Clear LPI Pending Register
+ const volatile uint32_t padding3[8]; // +0x0050 - RESERVED
+ volatile uint64_t GICR_PROPBASER; // +0x0070 - RW - Redistributor Properties Base Address Register
+ volatile uint64_t GICR_PENDBASER; // +0x0078 - RW - Redistributor LPI Pending Table Base Address Register
+ const volatile uint32_t padding4[8]; // +0x0080 - RESERVED
+ volatile uint64_t GICR_INVLPIR; // +0x00A0 - WO - Redistributor Invalidate LPI Register
+ const volatile uint32_t padding5[2]; // +0x00A8 - RESERVED
+ volatile uint64_t GICR_INVALLR; // +0x00B0 - WO - Redistributor Invalidate All Register
+ const volatile uint32_t padding6[2]; // +0x00B8 - RESERVED
+ volatile uint64_t GICR_SYNCR; // +0x00C0 - RO - Redistributor Synchronize Register
+ const volatile uint32_t padding7[2]; // +0x00C8 - RESERVED
+ const volatile uint32_t padding8[12]; // +0x00D0 - RESERVED
+ volatile uint64_t IMPDEF2; // +0x0100 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding9[2]; // +0x0108 - RESERVED
+ volatile uint64_t IMPDEF3; // +0x0110 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding10[2]; // +0x0118 - RESERVED
+} GICv3_redistributor_RD;
+
+/*
+ * SGI and PPI Redistributor register map
+ */
+typedef struct
+{
+ const volatile uint32_t padding1[32]; // +0x0000 - RESERVED
+ volatile uint32_t GICR_IGROUPR0; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+ const volatile uint32_t padding2[31]; // +0x0084 - RESERVED
+ volatile uint32_t GICR_ISENABLER; // +0x0100 - RW - Interrupt Set-Enable Registers
+ const volatile uint32_t padding3[31]; // +0x0104 - RESERVED
+ volatile uint32_t GICR_ICENABLER; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ const volatile uint32_t padding4[31]; // +0x0184 - RESERVED
+ volatile uint32_t GICR_ISPENDR; // +0x0200 - RW - Interrupt Set-Pending Registers
+ const volatile uint32_t padding5[31]; // +0x0204 - RESERVED
+ volatile uint32_t GICR_ICPENDR; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ const volatile uint32_t padding6[31]; // +0x0284 - RESERVED
+ volatile uint32_t GICR_ISACTIVER; // +0x0300 - RW - Interrupt Set-Active Register
+ const volatile uint32_t padding7[31]; // +0x0304 - RESERVED
+ volatile uint32_t GICR_ICACTIVER; // +0x0380 - RW - Interrupt Clear-Active Register
+ const volatile uint32_t padding8[31]; // +0x0184 - RESERVED
+ volatile uint8_t GICR_IPRIORITYR[32]; // +0x0400 - RW - Interrupt Priority Registers
+ const volatile uint32_t padding9[504]; // +0x0420 - RESERVED
+ volatile uint32_t GICR_ICnoFGR[2]; // +0x0C00 - RW - Interrupt Configuration Registers
+ const volatile uint32_t padding10[62]; // +0x0C08 - RESERVED
+ volatile uint32_t GICR_IGRPMODR0; // +0x0D00 - RW - ????
+ const volatile uint32_t padding11[63]; // +0x0D04 - RESERVED
+ volatile uint32_t GICR_NSACR; // +0x0E00 - RW - Non-Secure Access Control Register
+} GICv3_redistributor_SGI;
+
+/*
+ * We have a multiplicity of GIC Redistributors; on the GIC-AEM and
+ * GIC-500 they are arranged as one 128KB region per redistributor: one
+ * 64KB page of GICR LPI registers, and one 64KB page of GICR Private
+ * Int registers
+ */
+typedef struct
+{
+ union
+ {
+ GICv3_redistributor_RD RD_base;
+ uint8_t padding[64 * 1024];
+ } RDblock;
+
+ union
+ {
+ GICv3_redistributor_SGI SGI_base;
+ uint8_t padding[64 * 1024];
+ } SGIblock;
+} GICv3_GICR;
+
+/*
+ * use the scatter file to place GIC Redistributor base address
+ *
+ * although this code doesn't know how many Redistributor banks
+ * a particular system will have, we declare gicrbase as an array
+ * to avoid unwanted compiler optimisations when calculating the
+ * base of a particular Redistributor bank
+ */
+static const GICv3_GICR gicrbase[2] __attribute__((section (".bss.redistributor")));
+
+/**********************************************************************/
+
+/*
+ * utility functions to calculate base of a particular
+ * Redistributor bank
+ */
+
+static inline GICv3_redistributor_RD *const getgicrRD(uint32_t gicr)
+{
+ GICv3_GICR *const arraybase = (GICv3_GICR *const)&gicrbase;
+
+ return &((arraybase + gicr)->RDblock.RD_base);
+}
+
+static inline GICv3_redistributor_SGI *const getgicrSGI(uint32_t gicr)
+{
+ GICv3_GICR *arraybase = (GICv3_GICR *)(&gicrbase);
+
+ return &(arraybase[gicr].SGIblock.SGI_base);
+}
+
+/**********************************************************************/
+
+// This function walks a block of RDs to find one with the matching affinity
+uint32_t GetGICR(uint32_t affinity)
+{
+ GICv3_redistributor_RD* gicr;
+ uint32_t index = 0;
+
+ do
+ {
+ gicr = getgicrRD(index);
+ if (gicr->GICR_TYPER[1] == affinity)
+ return index;
+
+ index++;
+ }
+ while((gicr->GICR_TYPER[0] & (1<<4)) == 0); // Keep looking until GICR_TYPER.Last reports no more RDs in block
+
+ return 0xFFFFFFFF; // return -1 to signal not RD found
+}
+
+void WakeupGICR(uint32_t gicr)
+{
+ GICv3_redistributor_RD *const gicrRD = getgicrRD(gicr);
+#ifdef USE_GIC600
+ //Power up Re-distributor for GIC-600
+ gicrRD->GICR_PWRR = 0x2;
+#endif
+
+ /*
+ * step 1 - ensure GICR_WAKER.ProcessorSleep is off
+ */
+ gicrRD->GICR_WAKER &= ~gicrwaker_ProcessorSleep;
+
+ /*
+ * step 2 - wait for children asleep to be cleared
+ */
+ while ((gicrRD->GICR_WAKER & gicrwaker_ChildrenAsleep) != 0)
+ continue;
+
+ /*
+ * OK, GICR is go
+ */
+ return;
+}
+
+void EnablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISENABLER = 1 << id;
+}
+
+void DisablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICENABLER = 1 << id;
+}
+
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ gicrSGI->GICR_IPRIORITYR[id] = priority;
+}
+
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ return (uint32_t)(gicrSGI->GICR_IPRIORITYR[id]);
+}
+
+void SetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISPENDR = 1 << id;
+}
+
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ICPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICPENDR = 1 << id;
+}
+
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ return (gicrSGI->GICR_ISPENDR >> id) & 0x01;
+}
+
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ uint32_t groupmod;
+
+ /*
+ * GICR_IGROUPR0 is one 32-bit register
+ */
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicrSGI->GICR_IGROUPR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGROUPR0 &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicrSGI->GICR_IGRPMODR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGRPMODR0 &= ~(1 << id);
+}
+
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ const uint32_t nbits = (sizeof group * 8) - 1;
+ uint32_t groupmod;
+
+ /*
+ * get each bit of group config duplicated over all 32 bits
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicrSGI->GICR_IGROUPR0 = group;
+ gicrSGI->GICR_IGRPMODR0 = groupmod;
+}
+
+/* EOF GICv3_gicr.c */
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a34/ac6/example_build/sample_threadx/MP_Mutexes.S
new file mode 100644
index 00000000..e7f95aa7
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/MP_Mutexes.S
@@ -0,0 +1,133 @@
+//
+// Armv8-A AArch64 - Basic Mutex Example
+// Includes the option (USE_LSE_ATOMIC) to use Large System Extension (LSE) atomics introduced in Armv8.1-A
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+ .global _mutex_initialize
+ .global _mutex_acquire
+ .global _mutex_release
+
+//
+// These routines implement the mutex management functions required for running
+// the Arm C library in a multi-threaded environment.
+//
+// They use a value of 0 to represent an unlocked mutex, and 1 for a locked mutex
+//
+// **********************************************************************
+//
+
+ .type _mutex_initialize, "function"
+ .cfi_startproc
+_mutex_initialize:
+
+ //
+ // mark the mutex as unlocked
+ //
+ mov w1, #0
+ str w1, [x0]
+
+ //
+ // we are running multi-threaded, so set a non-zero return
+ // value (function prototype says use 1)
+ //
+ mov w0, #1
+ ret
+ .cfi_endproc
+
+#if !defined(USE_LSE_ATOMIC)
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+
+ //
+ // send ourselves an event, so we don't stick on the wfe at the
+ // top of the loop
+ //
+ sevl
+
+ //
+ // wait until the mutex is available
+ //
+loop:
+ wfe
+ ldaxr w1, [x0]
+ cbnz w1, loop
+
+ //
+ // mutex is (at least, it was) available - try to claim it
+ //
+ mov w1, #1
+ stxr w2, w1, [x0]
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+
+ mov w1, #0
+ stlr w1, [x0]
+ ret
+ .cfi_endproc
+
+#else // LSE version
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+ // This uses a "ticket lock". The lock is stored as a 32-bit value:
+ // - the upper 16-bits record the thread's ticket number ("take a ticket")
+ // - the lower 16-bits record the ticket being served ("now serving")
+
+ // atomically load then increment the thread's ticket number ("take a ticket")
+ mov w3, #(1 << 16)
+ ldadda w3, w1, [x0]
+
+ // is the ticket now being served?
+ eor w2, w1, w1, ror #16
+ cbz w2, loop_exit
+
+ // no, so wait for the ticket to be served
+
+ // send a local event to avoid missing an unlock before the exclusive load
+ sevl
+
+loop:
+ wfe
+ ldaxrh w3, [x0]
+ eor w2, w3, w1, lsr #16
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+loop_exit:
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+ mov w1, #1
+ staddlh w1, [x0]
+ ret
+ .cfi_endproc
+#endif
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/MP_Mutexes.h b/ports/cortex_a34/ac6/example_build/sample_threadx/MP_Mutexes.h
new file mode 100644
index 00000000..ec1a1d28
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/MP_Mutexes.h
@@ -0,0 +1,66 @@
+/*
+ * Armv8-A AArch64 - Basic Mutex Example
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef MP_MUTEX_H
+#define MP_MUTEX_H
+
+/*
+ * The Arm C library calls-out to these functions to manage multithreading.
+ * They can also be called by user application code.
+ *
+ * Mutex type is specified by the Arm C library
+ *
+ * Declare function prototypes for libc mutex routines
+ */
+typedef signed int *mutex;
+
+/*
+ * int _mutex_initialize(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ * 0 - application is non-threaded
+ * 1 - application is threaded
+ * The C library uses the return result to indicate whether it is being used in a multithreaded environment.
+ */
+int _mutex_initialize(mutex *m);
+
+/*
+ * void _mutex_acquire(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * Routine does not return until the mutex has been claimed. A load-acquire
+ * is used to guarantee that the mutex claim is properly ordered with
+ * respect to any accesses to the resource protected by the mutex
+ */
+void _mutex_acquire(mutex *m);
+
+/*
+ * void _mutex_release(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * A store-release is used to guarantee that the mutex release is properly
+ * ordered with respect any accesses to the resource protected by the mutex
+ */
+void _mutex_release(mutex *m);
+
+#endif
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a34/ac6/example_build/sample_threadx/PPM_AEM.h
new file mode 100644
index 00000000..52c9a0fe
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/PPM_AEM.h
@@ -0,0 +1,66 @@
+//
+// Private Peripheral Map for the v8 Architecture Envelope Model
+//
+// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef PPM_AEM_H
+#define PPM_AEM_H
+
+//
+// Distributor layout
+//
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_IGROUP 0x0080
+#define GICD_ISENABLE 0x0100
+#define GICD_ICENABLE 0x0180
+#define GICD_ISPEND 0x0200
+#define GICD_ICPEND 0x0280
+#define GICD_ISACTIVE 0x0300
+#define GICD_ICACTIVE 0x0380
+#define GICD_IPRIORITY 0x0400
+#define GICD_ITARGETS 0x0800
+#define GICD_ICFG 0x0c00
+#define GICD_PPISR 0x0d00
+#define GICD_SPISR 0x0d04
+#define GICD_SGIR 0x0f00
+#define GICD_CPENDSGI 0x0f10
+#define GICD_SPENDSGI 0x0f20
+#define GICD_PIDR4 0x0fd0
+#define GICD_PIDR5 0x0fd4
+#define GICD_PIDR6 0x0fd8
+#define GICD_PIDR7 0x0fdc
+#define GICD_PIDR0 0x0fe0
+#define GICD_PIDR1 0x0fe4
+#define GICD_PIDR2 0x0fe8
+#define GICD_PIDR3 0x0fec
+#define GICD_CIDR0 0x0ff0
+#define GICD_CIDR1 0x0ff4
+#define GICD_CIDR2 0x0ff8
+#define GICD_CIDR3 0x0ffc
+
+//
+// CPU Interface layout
+//
+#define GICC_CTLR 0x0000
+#define GICC_PMR 0x0004
+#define GICC_BPR 0x0008
+#define GICC_IAR 0x000c
+#define GICC_EOIR 0x0010
+#define GICC_RPR 0x0014
+#define GICC_HPPIR 0x0018
+#define GICC_ABPR 0x001c
+#define GICC_AIAR 0x0020
+#define GICC_AEOIR 0x0024
+#define GICC_AHPPIR 0x0028
+#define GICC_APR0 0x00d0
+#define GICC_NSAPR0 0x00e0
+#define GICC_IIDR 0x00fc
+#define GICC_DIR 0x1000
+
+#endif // PPM_AEM_H
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..17cceb01
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,393 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_TIMER timer_0;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
+
+/* Define main entry point. */
+
+int main(void)
+{
+
+ /* Initialize timer. */
+ init_timer();
+
+ /* Enter ThreadX. */
+ tx_kernel_enter();
+
+ return 0;
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..44afb600
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,331 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.scat
new file mode 100644
index 00000000..e5783c7c
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -0,0 +1,103 @@
+;********************************************************
+; Scatter file for Armv8-A Startup code on FVP Base model
+; Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+; Use, modification and redistribution of this file is subject to your possession of a
+; valid End User License Agreement for the Arm Product of which these examples are part of
+; and your compliance with all applicable terms and conditions of such licence agreement.
+;********************************************************
+
+LOAD 0x80000000
+{
+ EXEC +0
+ {
+ startup.o (StartUp, +FIRST)
+ * (+RO, +RW, +ZI)
+ }
+
+ ;
+ ; App stacks for all CPUs
+ ; All stacks and heap are aligned to a cache-line boundary
+ ;
+ ARM_LIB_STACK +0 ALIGN 64 EMPTY 8 * 0x4000 {}
+
+ ;
+ ; Separate heap - import symbol __use_two_region_memory
+ ; in source code for this to work correctly
+ ;
+ ARM_LIB_HEAP +0 ALIGN 64 EMPTY 0xA0000 {}
+
+ ;
+ ; Handler stacks for all CPUs
+ ; All stacks and heap are aligned to a cache-line boundary
+ ;
+ HANDLER_STACK +0 ALIGN 64 EMPTY 4 * 0x4000 {}
+
+ ;
+ ; Stacks for EL3
+ ;
+ EL3_STACKS +0 ALIGN 64 EMPTY 8 * 0x1000 {}
+ ;
+ ; Strictly speaking, the L1 tables do not need to
+ ; be so strongly aligned, but no matter
+ ;
+ TTB0_L1 +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ ;
+ ; Various sets of L2 tables
+ ;
+ ; Alignment is 4KB, since the code uses a 4K page
+ ; granularity - larger granularities would require
+ ; correspondingly stricter alignment
+ ;
+ TTB0_L2_RAM +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ TTB0_L2_PRIVATE +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ TTB0_L2_PERIPH +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ ;
+ ; The startup code uses the end of this region to calculate
+ ; the top of memory - do not place any RAM regions after it
+ ;
+ TOP_OF_RAM +0 EMPTY 4 {}
+
+ ;
+ ; CS3 Peripherals is a 64MB region from 0x1c000000
+ ; that includes the following:
+ ; System Registers at 0x1C010000
+ ; UART0 (PL011) at 0x1C090000
+ ; Color LCD Controller (PL111) at 0x1C1F0000
+ ; plus a number of others.
+ ; CS3_PERIPHERALS is used by the startup code for page-table generation
+ ; This region is not truly empty, but we have no
+ ; predefined objects that live within it
+ ;
+ CS3_PERIPHERALS 0x1c000000 EMPTY 0x90000 {}
+
+ ;
+ ; Place the UART peripheral registers data structure
+ ; This is only really needed if USE_SERIAL_PORT is defined, but
+ ; the linker will remove unused sections if not needed
+; PL011 0x1c090000 UNINIT 0x1000
+; {
+; uart.o (+ZI)
+; }
+ ; Note that some other CS3_PERIPHERALS follow this
+
+ ;
+ ; GICv3 distributor
+ ;
+ GICD 0x2f000000 UNINIT 0x8000
+ {
+ GICv3_gicd.o (.bss.distributor)
+ }
+
+ ;
+ ; GICv3 redistributors
+ ; 128KB for each redistributor in the system
+ ;
+ GICR 0x2f100000 UNINIT 0x80000
+ {
+ GICv3_gicr.o (.bss.redistributor)
+ }
+}
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a34/ac6/example_build/sample_threadx/sp804_timer.c
new file mode 100644
index 00000000..4dc009b2
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/sp804_timer.c
@@ -0,0 +1,122 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "sp804_timer.h"
+
+#define TIMER_SP804_CTRL_TIMEREN (1 << 7)
+#define TIMER_SP804_CTRL_TIMERMODE (1 << 6) // Bit 6:
+#define TIMER_SP804_CTRL_INTENABLE (1 << 5)
+#define TIMER_SP804_CTRL_TIMERSIZE (1 << 1) // Bit 1: 0=16-bit, 1=32-bit
+#define TIMER_SP804_CTRL_ONESHOT (1 << 0) // Bit 0: 0=wrapping, 1=one-shot
+
+#define TIMER_SP804_CTRL_PRESCALE_1 (0 << 2) // clk/1
+#define TIMER_SP804_CTRL_PRESCALE_4 (1 << 2) // clk/4
+#define TIMER_SP804_CTRL_PRESCALE_8 (2 << 2) // clk/8
+
+struct sp804_timer
+{
+ volatile uint32_t Time1Load; // +0x00
+ const volatile uint32_t Time1Value; // +0x04 - RO
+ volatile uint32_t Timer1Control; // +0x08
+ volatile uint32_t Timer1IntClr; // +0x0C - WO
+ const volatile uint32_t Timer1RIS; // +0x10 - RO
+ const volatile uint32_t Timer1MIS; // +0x14 - RO
+ volatile uint32_t Timer1BGLoad; // +0x18
+
+ volatile uint32_t Time2Load; // +0x20
+ volatile uint32_t Time2Value; // +0x24
+ volatile uint8_t Timer2Control; // +0x28
+ volatile uint32_t Timer2IntClr; // +0x2C - WO
+ const volatile uint32_t Timer2RIS; // +0x30 - RO
+ const volatile uint32_t Timer2MIS; // +0x34 - RO
+ volatile uint32_t Timer2BGLoad; // +0x38
+
+ // Not including ID registers
+
+};
+
+// Instance of the dual timer, will be placed using the scatter file
+struct sp804_timer* dual_timer;
+
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address)
+{
+ dual_timer = (struct sp804_timer*)address;
+ return;
+}
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt)
+{
+ uint32_t tmp = 0;
+
+ dual_timer->Time1Load = load_value;
+
+ // Fixed setting: 32-bit, no prescaling
+ tmp = TIMER_SP804_CTRL_TIMERSIZE | TIMER_SP804_CTRL_PRESCALE_1 | TIMER_SP804_CTRL_TIMERMODE;
+
+ // Settings from parameters: interrupt generation & reload
+ tmp = tmp | interrupt | auto_reload;
+
+ // Write control register
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Starts the timer
+void startTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp | TIMER_SP804_CTRL_TIMEREN; // Set TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Stops the timer
+void stopTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp & ~TIMER_SP804_CTRL_TIMEREN; // Clear TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void)
+{
+ return dual_timer->Time1Value;
+}
+
+
+void clearTimerIrq(void)
+{
+ // A write to this register, of any value, clears the interrupt
+ dual_timer->Timer1IntClr = 1;
+}
+
+
+// ------------------------------------------------------------
+// End of sp804_timer.c
+// ------------------------------------------------------------
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a34/ac6/example_build/sample_threadx/sp804_timer.h
new file mode 100644
index 00000000..777062cc
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/sp804_timer.h
@@ -0,0 +1,53 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+// Header Filer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#ifndef _SP804_TIMER_
+#define _SP804_TIMER_
+
+#include
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address);
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+
+#define SP804_AUTORELOAD (0)
+#define SP804_SINGLESHOT (1)
+#define SP804_GENERATE_IRQ (1 << 5)
+#define SP804_NO_IRQ (0)
+
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt);
+
+
+// Starts the timer
+void startTimer(void);
+
+
+// Stops the timer
+void stopTimer(void);
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void);
+
+
+// Clears the timer interrupt
+void clearTimerIrq(void);
+
+#endif
+
+// ------------------------------------------------------------
+// End of sp804_timer.h
+// ------------------------------------------------------------
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a34/ac6/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..de100e56
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/startup.S
@@ -0,0 +1,779 @@
+// ------------------------------------------------------------
+// Armv8-A MPCore EL3 AArch64 Startup Code
+//
+// Basic Vectors, MMU, caches and GICv3 initialization
+//
+// Exits in EL1 AArch64
+//
+// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_mmu.h"
+#include "v8_system.h"
+
+
+ .section StartUp, "ax"
+ .balign 4
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+
+ .global InvalidateUDCaches
+ .global ZeroBlock
+
+ .global SetPrivateIntSecurityBlock
+ .global SetSPISecurityAll
+ .global SetPrivateIntPriority
+
+ .global GetGICR
+ .global WakeupGICR
+ .global SyncAREinGICD
+ .global EnableGICD
+ .global EnablePrivateInt
+ .global GetPrivateIntPending
+ .global ClearPrivateIntPending
+
+ .global __main
+ //.global MainApp
+
+ .global Image$$EXEC$$RO$$Base
+ .global Image$$TTB0_L1$$ZI$$Base
+ .global Image$$TTB0_L2_RAM$$ZI$$Base
+ .global Image$$TTB0_L2_PERIPH$$ZI$$Base
+ .global Image$$TOP_OF_RAM$$ZI$$Base
+ .global Image$$GICD$$ZI$$Base
+ .global Image$$ARM_LIB_STACK$$ZI$$Limit
+ .global Image$$EL3_STACKS$$ZI$$Limit
+ .global Image$$CS3_PERIPHERALS$$ZI$$Base
+ // use separate stack and heap, as anticipated by scatter.scat
+ .global __use_two_region_memory
+
+
+// ------------------------------------------------------------
+
+ .global start64
+ .type start64, "function"
+start64:
+
+ //
+ // program the VBARs
+ //
+ ldr x1, =el1_vectors
+ msr VBAR_EL1, x1
+
+ ldr x1, =el2_vectors
+ msr VBAR_EL2, x1
+
+ ldr x1, =el3_vectors
+ msr VBAR_EL3, x1
+
+
+ // GIC-500 comes out of reset in GICv2 compatibility mode - first set
+ // system register enables for all relevant exception levels, and
+ // select GICv3 operating mode
+ //
+ msr SCR_EL3, xzr // Ensure NS bit is initially clear, so secure copy of ICC_SRE_EL1 can be configured
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL3, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Secure copy of ICC_SRE_EL1
+
+ //
+ // set lower exception levels as non-secure, with no access
+ // back to EL2 or EL3, and are AArch64 capable
+ //
+ mov x3, #(SCR_EL3_RW | \
+ SCR_EL3_SMD | \
+ SCR_EL3_NS) // Set NS bit, to access Non-secure registers
+ msr SCR_EL3, x3
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL2, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Non-secure copy of ICC_SRE_EL1
+
+
+ //
+ // no traps or VM modifications from the Hypervisor, EL1 is AArch64
+ //
+ mov x2, #HCR_EL2_RW
+ msr HCR_EL2, x2
+
+ //
+ // VMID is still significant, even when virtualisation is not
+ // being used, so ensure VTTBR_EL2 is properly initialised
+ //
+ msr VTTBR_EL2, xzr
+
+ //
+ // VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by Non-secure EL1 reads of MPIDR_EL1.
+ // VPIDR_EL2 holds the value of the Virtualization Processor ID. This is the value returned by Non-secure EL1 reads of MIDR_EL1.
+ // Both of these registers are architecturally UNKNOWN at reset, and so they must be set to the correct value
+ // (even if EL2/virtualization is not being used), otherwise non-secure EL1 reads of MPIDR_EL1/MIDR_EL1 will return garbage values.
+ // This guarantees that any future reads of MPIDR_EL1 and MIDR_EL1 from Non-secure EL1 will return the correct value.
+ //
+ mrs x0, MPIDR_EL1
+ msr VMPIDR_EL2, x0
+ mrs x0, MIDR_EL1
+ msr VPIDR_EL2, x0
+
+ // extract the core number from MPIDR_EL1 and store it in
+ // x19 (defined by the AAPCS as callee-saved), so we can re-use
+ // the number later
+ //
+ bl GetCPUID
+ mov x19, x0
+
+ //
+ // neither EL3 nor EL2 trap floating point or accesses to CPACR
+ //
+ msr CPTR_EL3, xzr
+ msr CPTR_EL2, xzr
+
+ //
+ // SCTLR_ELx may come out of reset with UNKNOWN values so we will
+ // set the fields to 0 except, possibly, the endianess field(s).
+ // Note that setting SCTLR_EL2 or the EL0 related fields of SCTLR_EL1
+ // is not strictly needed, since we're never in EL2 or EL0
+ //
+#ifdef __ARM_BIG_ENDIAN
+ mov x0, #(SCTLR_ELx_EE | SCTLR_EL1_E0E)
+#else
+ mov x0, #0
+#endif
+ msr SCTLR_EL3, x0
+ msr SCTLR_EL2, x0
+ msr SCTLR_EL1, x0
+
+#ifdef CORTEXA
+ //
+ // Configure ACTLR_EL[23]
+ // ----------------------
+ //
+ // These bits are IMPLEMENTATION DEFINED, so are different for
+ // different processors
+ //
+ // For Cortex-A57, the controls we set are:
+ //
+ // Enable lower level access to CPUACTLR_EL1
+ // Enable lower level access to CPUECTLR_EL1
+ // Enable lower level access to L2CTLR_EL1
+ // Enable lower level access to L2ECTLR_EL1
+ // Enable lower level access to L2ACTLR_EL1
+ //
+ mov x0, #((1 << 0) | \
+ (1 << 1) | \
+ (1 << 4) | \
+ (1 << 5) | \
+ (1 << 6))
+
+ msr ACTLR_EL3, x0
+ msr ACTLR_EL2, x0
+
+ //
+ // configure CPUECTLR_EL1
+ //
+ // These bits are IMP DEF, so need to different for different
+ // processors
+ //
+ // SMPEN - bit 6 - Enables the processor to receive cache
+ // and TLB maintenance operations
+ //
+ // Note: For Cortex-A57/53 SMPEN should be set before enabling
+ // the caches and MMU, or performing any cache and TLB
+ // maintenance operations.
+ //
+ // This register has a defined reset value, so we use a
+ // read-modify-write sequence to set SMPEN
+ //
+ mrs x0, S3_1_c15_c2_1 // Read EL1 CPU Extended Control Register
+ orr x0, x0, #(1 << 6) // Set the SMPEN bit
+ msr S3_1_c15_c2_1, x0 // Write EL1 CPU Extended Control Register
+
+ isb
+#endif
+
+ //
+ // That's the last of the control settings for now
+ //
+ // Note: no ISB after all these changes, as registers won't be
+ // accessed until after an exception return, which is itself a
+ // context synchronisation event
+ //
+
+ //
+ // Setup some EL3 stack space, ready for calling some subroutines, below.
+ //
+ // Stack space allocation is CPU-specific, so use CPU
+ // number already held in x19
+ //
+ // 2^12 bytes per CPU for the EL3 stacks
+ //
+ ldr x0, =Image$$EL3_STACKS$$ZI$$Limit
+ sub x0, x0, x19, lsl #12
+ mov sp, x0
+
+ //
+ // we need to configure the GIC while still in secure mode, specifically
+ // all PPIs and SPIs have to be programmed as Group1 interrupts
+ //
+
+ //
+ // Before the GIC can be reliably programmed, we need to
+ // enable Affinity Routing, as this affects where the configuration
+ // registers are (with Affinity Routing enabled, some registers are
+ // in the Redistributor, whereas those same registers are in the
+ // Distributor with Affinity Routing disabled (i.e. when in GICv2
+ // compatibility mode).
+ //
+ mov x0, #(1 << 4) | (1 << 5) // gicdctlr_ARE_S | gicdctlr_ARE_NS
+ mov x1, x19
+ bl SyncAREinGICD
+
+ //
+ // The Redistributor comes out of reset assuming the processor is
+ // asleep - correct that assumption
+ //
+ bl GetAffinity
+ bl GetGICR
+ mov w20, w0 // Keep a copy for later
+ bl WakeupGICR
+
+ //
+ // Now we're ready to set security and other initialisations
+ //
+ // This is a per-CPU configuration for these interrupts
+ //
+ // for the first cluster, CPU number is the redistributor index
+ //
+ mov w0, w20
+ mov w1, #1 // gicigroupr_G1NS
+ bl SetPrivateIntSecurityBlock
+
+ //
+ // While we're in the Secure World, set the priority mask low enough
+ // for it to be writable in the Non-Secure World
+ //
+ //mov x0, #16 << 3 // 5 bits of priority in the Secure world
+ mov x0, #0xFF // for Non-Secure interrupts
+ msr ICC_PMR_EL1, x0
+
+ //
+ // there's more GIC setup to do, but only for the primary CPU
+ //
+ cbnz x19, drop_to_el1
+
+ //
+ // There's more to do to the GIC - call the utility routine to set
+ // all SPIs to Group1
+ //
+ mov w0, #1 // gicigroupr_G1NS
+ bl SetSPISecurityAll
+
+ //
+ // Set up EL1 entry point and "dummy" exception return information,
+ // then perform exception return to enter EL1
+ //
+ .global drop_to_el1
+drop_to_el1:
+ adr x1, el1_entry_aarch64
+ msr ELR_EL3, x1
+ mov x1, #(AARCH64_SPSR_EL1h | \
+ AARCH64_SPSR_F | \
+ AARCH64_SPSR_I | \
+ AARCH64_SPSR_A)
+ msr SPSR_EL3, x1
+ eret
+
+
+
+// ------------------------------------------------------------
+// EL1 - Common start-up code
+// ------------------------------------------------------------
+
+ .global el1_entry_aarch64
+ .type el1_entry_aarch64, "function"
+el1_entry_aarch64:
+
+ //
+ // Now we're in EL1, setup the application stack
+ // the scatter file allocates 2^14 bytes per app stack
+ //
+ ldr x0, =Image$$HANDLER_STACK$$ZI$$Limit
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+ MSR SPSel, #0
+ ISB
+ ldr x0, =Image$$ARM_LIB_STACK$$ZI$$Limit
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+
+ //
+ // Enable floating point
+ //
+ mov x0, #CPACR_EL1_FPEN
+ msr CPACR_EL1, x0
+
+ //
+ // Invalidate caches and TLBs for all stage 1
+ // translations used at EL1
+ //
+ // Cortex-A processors automatically invalidate their caches on reset
+ // (unless suppressed with the DBGL1RSTDISABLE or L2RSTDISABLE pins).
+ // It is therefore not necessary for software to invalidate the caches
+ // on startup, however, this is done here in case of a warm reset.
+ bl InvalidateUDCaches
+ tlbi VMALLE1
+
+
+ //
+ // Set TTBR0 Base address
+ //
+ // The CPUs share one set of translation tables that are
+ // generated by CPU0 at run-time
+ //
+ // TTBR1_EL1 is not used in this example
+ //
+ ldr x1, =Image$$TTB0_L1$$ZI$$Base
+ msr TTBR0_EL1, x1
+
+
+ //
+ // Set up memory attributes
+ //
+ // These equate to:
+ //
+ // 0 -> 0b01000100 = 0x00000044 = Normal, Inner/Outer Non-Cacheable
+ // 1 -> 0b11111111 = 0x0000ff00 = Normal, Inner/Outer WriteBack Read/Write Allocate
+ // 2 -> 0b00000100 = 0x00040000 = Device-nGnRE
+ //
+ mov x1, #0xff44
+ movk x1, #4, LSL #16 // equiv to: movk x1, #0x0000000000040000
+ msr MAIR_EL1, x1
+
+
+ //
+ // Set up TCR_EL1
+ //
+ // We're using only TTBR0 (EPD1 = 1), and the page table entries:
+ // - are using an 8-bit ASID from TTBR0
+ // - have a 4K granularity (TG0 = 0b00)
+ // - are outer-shareable (SH0 = 0b10)
+ // - are using Inner & Outer WBWA Normal memory ([IO]RGN0 = 0b01)
+ // - map
+ // + 32 bits of VA space (T0SZ = 0x20)
+ // + into a 32-bit PA space (IPS = 0b000)
+ //
+ // 36 32 28 24 20 16 12 8 4 0
+ // -----+----+----+----+----+----+----+----+----+----+
+ // | | |OOII| | | |OOII| | |
+ // TT | | |RRRR|E T | T| |RRRR|E T | T|
+ // BB | I I|TTSS|GGGG|P 1 | 1|TTSS|GGGG|P 0 | 0|
+ // IIA| P P|GGHH|NNNN|DAS | S|GGHH|NNNN|D S | S|
+ // 10S| S-S|1111|1111|11Z-|---Z|0000|0000|0 Z-|---Z|
+ //
+ // 000 0000 0000 0000 1000 0000 0010 0101 0010 0000
+ //
+ // 0x 8 0 2 5 2 0
+ //
+ // Note: the ISB is needed to ensure the changes to system
+ // context are before the write of SCTLR_EL1.M to enable
+ // the MMU. It is likely on a "real" implementation that
+ // this setup would work without an ISB, due to the
+ // amount of code that gets executed before enabling the
+ // MMU, but that would not be architecturally correct.
+ //
+ ldr x1, =0x0000000000802520
+ msr TCR_EL1, x1
+ isb
+
+ //
+ // x19 already contains the CPU number, so branch to secondary
+ // code if we're not on CPU0
+ //
+ cbnz x19, el1_secondary
+
+ //
+ // Fall through to primary code
+ //
+
+
+//
+// ------------------------------------------------------------
+//
+// EL1 - primary CPU init code
+//
+// This code is run on CPU0, while the other CPUs are in the
+// holding pen
+//
+
+ .global el1_primary
+ .type el1_primary, "function"
+el1_primary:
+
+ //
+ // Turn on the banked GIC distributor enable,
+ // ready for individual CPU enables later
+ //
+ mov w0, #(1 << 1) // gicdctlr_EnableGrp1A
+ bl EnableGICD
+
+ //
+ // Generate TTBR0 L1
+ //
+ // at 4KB granularity, 32-bit VA space, table lookup starts at
+ // L1, with 1GB regions
+ //
+ // we are going to create entries pointing to L2 tables for a
+ // couple of these 1GB regions, the first of which is the
+ // RAM on the VE board model - get the table addresses and
+ // start by emptying out the L1 page tables (4 entries at L1
+ // for a 4K granularity)
+ //
+ // x21 = address of L1 tables
+ //
+ ldr x21, =Image$$TTB0_L1$$ZI$$Base
+ mov x0, x21
+ mov x1, #(4 << 3)
+ bl ZeroBlock
+
+ //
+ // time to start mapping the RAM regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // x22 = address of L2 tables, needs to be remembered in case
+ // we want to re-use the tables for mapping peripherals
+ //
+ ldr x22, =Image$$TTB0_L2_RAM$$ZI$$Base
+ mov x1, #(512 << 3)
+ mov x0, x22
+ bl ZeroBlock
+
+ //
+ // Get the start address of RAM (the EXEC region) into x4
+ // and calculate the offset into the L1 table (1GB per region,
+ // max 4GB)
+ //
+ // x23 = L1 table offset, saved for later comparison against
+ // peripheral offset
+ //
+ ldr x4, =Image$$EXEC$$RO$$Base
+ ubfx x23, x4, #30, #2
+
+ orr x1, x22, #TT_S1_ATTR_PAGE
+ str x1, [x21, x23, lsl #3]
+
+ //
+ // we've already used the RAM start address in x4 - we now need
+ // to get this in terms of an offset into the L2 page tables,
+ // where each entry covers 2MB
+ //
+ ubfx x2, x4, #21, #9
+
+ //
+ // TOP_OF_RAM in the scatter file marks the end of the
+ // Execute region in RAM: convert the end of this region to an
+ // offset too, being careful to round up, then calculate the
+ // number of entries to write
+ //
+ ldr x5, =Image$$TOP_OF_RAM$$ZI$$Base
+ sub x3, x5, #1
+ ubfx x3, x3, #21, #9
+ add x3, x3, #1
+ sub x3, x3, x2
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as Shared, Normal WBWA (MAIR[1]) with a flat
+ // VA->PA translation
+ //
+ bic x4, x4, #((1 << 21) - 1)
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (1 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_SH_INNER | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // factor the offset into the page table address and then write
+ // the entries
+ //
+ add x0, x22, x2, lsl #3
+
+loop1:
+ subs x3, x3, #1
+ str x1, [x0], #8
+ add x1, x1, #0x200, LSL #12 // equiv to add x1, x1, #(1 << 21) // 2MB per entry
+ bne loop1
+
+
+ //
+ // now mapping the Peripheral regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // The assumption here is that all peripherals live within
+ // a common 1GB region (i.e. that there's a single set of
+ // L2 pages for all the peripherals). We only use a UART
+ // and the GIC in this example, so the assumption is sound
+ //
+ // x24 = address of L2 peripheral tables
+ //
+ ldr x24, =Image$$TTB0_L2_PERIPH$$ZI$$Base
+
+ //
+ // get the GICD address into x4 and calculate
+ // the offset into the L1 table
+ //
+ // x25 = L1 table offset
+ //
+ ldr x4, =Image$$GICD$$ZI$$Base
+ ubfx x25, x4, #30, #2
+
+ //
+ // here's the tricky bit: it's possible that the peripherals are
+ // in the same 1GB region as the RAM, in which case we don't need
+ // to prime a separate set of L2 page tables, nor add them to the
+ // L1 tables
+ //
+ // if we're going to re-use the TTB0_L2_RAM tables, get their
+ // address into x24, which is used later on to write the PTEs
+ //
+ cmp x25, x23
+ csel x24, x22, x24, EQ
+ b.eq nol2setup
+
+ //
+ // Peripherals are in a separate 1GB region, and so have their own
+ // set of L2 tables - clean out the tables and add them to the L1
+ // table
+ //
+ mov x0, x24
+ mov x1, #512 << 3
+ bl ZeroBlock
+
+ orr x1, x24, #TT_S1_ATTR_PAGE
+ str x1, [x21, x25, lsl #3]
+
+ //
+ // there's only going to be a single 2MB region for GICD (in
+ // x4) - get this in terms of an offset into the L2 page tables
+ //
+ // with larger systems, it is possible that the GIC redistributor
+ // registers require extra 2MB pages, in which case extra code
+ // would be required here
+ //
+nol2setup:
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry for this, so no loop as we have for RAM, above
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // we have CS3_PERIPHERALS that include the UART controller
+ //
+ // Again, the code is making assumptions - this time that the CS3_PERIPHERALS
+ // region uses the same 1GB portion of the address space as the GICD,
+ // and thus shares the same set of L2 page tables
+ //
+ // Get CS3_PERIPHERALS address into x4 and calculate the offset into the
+ // L2 tables
+ //
+ ldr x4, =Image$$CS3_PERIPHERALS$$ZI$$Base
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry again - write it
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // issue a barrier to ensure all table entry writes are complete
+ //
+ dsb ish
+
+ //
+ // Enable the MMU. Caches will be enabled later, after scatterloading.
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to C library init code
+ //
+ b __main
+
+
+// ------------------------------------------------------------
+
+// AArch64 Arm C library startup add-in:
+
+// The Arm Architecture Reference Manual for Armv8-A states:
+//
+// Instruction accesses to Non-cacheable Normal memory can be held in instruction caches.
+// Correspondingly, the sequence for ensuring that modifications to instructions are available
+// for execution must include invalidation of the modified locations from the instruction cache,
+// even if the instructions are held in Normal Non-cacheable memory.
+// This includes cases where the instruction cache is disabled.
+//
+// To invalidate the AArch64 instruction cache after scatter-loading and before initialization of the stack and heap,
+// it is necessary for the user to:
+//
+// * Implement instruction cache invalidation code in _platform_pre_stackheap_init.
+// * Ensure all code on the path from the program entry up to and including _platform_pre_stackheap_init is located in a root region.
+//
+// In this example, this function is only called once, by the primary core
+
+ .global _platform_pre_stackheap_init
+ .type _platform_pre_stackheap_init, "function"
+ .cfi_startproc
+_platform_pre_stackheap_init:
+ dsb ish // ensure all previous stores have completed before invalidating
+ ic ialluis // I cache invalidate all inner shareable to PoU (which includes secondary cores)
+ dsb ish // ensure completion on inner shareable domain (which includes secondary cores)
+ isb
+
+ // Scatter-loading is complete, so enable the caches here, so that the C-library's mutex initialization later will work
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ msr SCTLR_EL1, x1
+ isb
+
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+// EL1 - secondary CPU init code
+//
+// This code is run on CPUs 1, 2, 3 etc....
+// ------------------------------------------------------------
+
+ .global el1_secondary
+ .type el1_secondary, "function"
+el1_secondary:
+
+ //
+ // the primary CPU is going to use SGI 15 as a wakeup event
+ // to let us know when it is OK to proceed, so prepare for
+ // receiving that interrupt
+ //
+ // NS interrupt priorities run from 0 to 15, with 15 being
+ // too low a priority to ever raise an interrupt, so let's
+ // use 14
+ //
+ mov w0, w20
+ mov w1, #15
+ mov w2, #14 << 4 // we're in NS world, so 4 bits of priority,
+ // 8-bit field, - 4 = 4-bit shift
+ bl SetPrivateIntPriority
+
+ mov w0, w20
+ mov w1, #15
+ bl EnablePrivateInt
+
+ //
+ // set priority mask as low as possible; although,being in the
+ // NS World, we can't set bit[7] of the priority, we still
+ // write all 8-bits of priority to an ICC register
+ //
+ mov x0, #31 << 3
+ msr ICC_PMR_EL1, x0
+
+ //
+ // set global enable and wait for our interrupt to arrive
+ //
+ mov x0, #1
+ msr ICC_IGRPEN1_EL1, x0
+ isb
+
+loop_wfi:
+ dsb SY // Clear all pending data accesses
+ wfi // Go to sleep
+
+ //
+ // something woke us from our wait, was it the required interrupt?
+ //
+ mov w0, w20
+ mov w1, #15
+ bl GetPrivateIntPending
+ cbz w0, loop_wfi
+
+ //
+ // it was - there's no need to actually take the interrupt,
+ // so just clear it
+ //
+ mov w0, w20
+ mov w1, #15
+ bl ClearPrivateIntPending
+
+ //
+ // Enable the MMU and caches
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to thread start
+ //
+ //B MainApp
+ b __main
+
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/timer_interrupts.c b/ports/cortex_a34/ac6/example_build/sample_threadx/timer_interrupts.c
new file mode 100644
index 00000000..8f522217
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/timer_interrupts.c
@@ -0,0 +1,152 @@
+/* Bare-metal example for Armv8-A FVP Base model */
+
+/* Timer and interrupts */
+
+/* Copyright (c) 2016-2018 Arm Limited (or its affiliates). All rights reserved. */
+/* Use, modification and redistribution of this file is subject to your possession of a */
+/* valid End User License Agreement for the Arm Product of which these examples are part of */
+/* and your compliance with all applicable terms and conditions of such licence agreement. */
+
+#include
+
+#include "GICv3.h"
+#include "GICv3_gicc.h"
+#include "sp804_timer.h"
+
+void _tx_timer_interrupt(void);
+
+// LED Base address
+#define LED_BASE (volatile unsigned int *)0x1C010008
+
+
+void nudge_leds(void) // Move LEDs along
+{
+ static int state = 1;
+ static int value = 1;
+
+ if (state)
+ {
+ int max = (1 << 7);
+ value <<= 1;
+ if (value == max)
+ state = 0;
+ }
+ else
+ {
+ value >>= 1;
+ if (value == 1)
+ state = 1;
+ }
+
+ *LED_BASE = value; // Update LEDs hardware
+}
+
+
+// Initialize Timer 0 and Interrupt Controller
+void init_timer(void)
+{
+ // Enable interrupts
+ __asm("MSR DAIFClr, #0xF");
+ setICC_IGRPEN1_EL1(igrpEnable);
+
+ // Configure the SP804 timer to generate an interrupt
+ setTimerBaseAddress(0x1C110000);
+ initTimer(0x200, SP804_AUTORELOAD, SP804_GENERATE_IRQ);
+ startTimer();
+
+ // The SP804 timer generates SPI INTID 34. Enable
+ // this ID, and route it to core 0.0.0.0 (this one!)
+ SetSPIRoute(34, 0, gicdirouter_ModeSpecific); // Route INTID 34 to 0.0.0.0 (this core)
+ SetSPIPriority(34, 0); // Set INTID 34 to priority to 0
+ ConfigureSPI(34, gicdicfgr_Level); // Set INTID 34 as level-sensitive
+ EnableSPI(34); // Enable INTID 34
+}
+
+
+// --------------------------------------------------------
+
+void irqHandler(void)
+{
+ unsigned int ID;
+
+ ID = getICC_IAR1(); // readIntAck();
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("irqHandler() - Reserved INTID %d\n\n", ID);
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("irqHandler() - External timer interrupt\n\n");
+ nudge_leds();
+ clearTimerIrq();
+
+ /* Call ThreadX timer interrupt processing. */
+ _tx_timer_interrupt();
+
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("irqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
+
+// --------------------------------------------------------
+
+// Not actually used in this example, but provided for completeness
+
+void fiqHandler(void)
+{
+ unsigned int ID;
+ unsigned int aliased = 0;
+
+ ID = getICC_IAR0(); // readIntAck();
+ //printf("fiqHandler() - Read %d from IAR0\n", ID);
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("fiqHandler() - Reserved INTID %d\n\n", ID);
+ ID = getICC_IAR1(); // readAliasedIntAck();
+ //printf("fiqHandler() - Read %d from AIAR\n", ID);
+ aliased = 1;
+
+ // If still spurious then simply return
+ if ((1020 <= ID) && (ID <= 1023))
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("fiqHandler() - External timer interrupt\n\n");
+ clearTimerIrq();
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("fiqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ // NOTE: If the ID was read from the Aliased IAR, then
+ // the aliased EOI register must be used
+ if (aliased == 0)
+ setICC_EOIR0(ID); // writeEOI(ID);
+ else
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/use_model_semihosting.ds b/ports/cortex_a34/ac6/example_build/sample_threadx/use_model_semihosting.ds
new file mode 100644
index 00000000..6fde52b2
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/use_model_semihosting.ds
@@ -0,0 +1 @@
+set semihosting enabled off
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_aarch64.S
new file mode 100644
index 00000000..f8db3bfe
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_aarch64.S
@@ -0,0 +1,179 @@
+// ------------------------------------------------------------
+// Armv8-A AArch64 - Common helper functions
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global EnableCachesEL1
+ .global DisableCachesEL1
+ .global InvalidateUDCaches
+ .global GetMIDR
+ .global GetMPIDR
+ .global GetAffinity
+ .global GetCPUID
+
+// ------------------------------------------------------------
+
+//
+// void EnableCachesEL1(void)
+//
+// enable Instruction and Data caches
+//
+ .type EnableCachesEL1, "function"
+ .cfi_startproc
+EnableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_ELx_I
+ orr x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+ .type DisableCachesEL1, "function"
+ .cfi_startproc
+DisableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_ELx_I
+ bic x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// void InvalidateUDCaches(void)
+//
+// Invalidate data and unified caches
+//
+ .type InvalidateUDCaches, "function"
+ .cfi_startproc
+InvalidateUDCaches:
+ // From the Armv8-A Architecture Reference Manual
+
+ dmb ish // ensure all prior inner-shareable accesses have been observed
+
+ mrs x0, CLIDR_EL1
+ and w3, w0, #0x07000000 // get 2 x level of coherence
+ lsr w3, w3, #23
+ cbz w3, finished
+ mov w10, #0 // w10 = 2 x cache level
+ mov w8, #1 // w8 = constant 0b1
+loop_level:
+ add w2, w10, w10, lsr #1 // calculate 3 x cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7
+ cmp w1, #2
+ b.lt next_level // no data or unified cache at this level
+ msr CSSELR_EL1, x10 // select this cache level
+ isb // synchronize change of csselr
+ mrs x1, CCSIDR_EL1 // read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen)-4
+ add w2, w2, #4 // w2 = log2(linelen)
+ ubfx w4, w1, #3, #10 // w4 = max way number, right aligned
+ clz w5, w4 // w5 = 32-log2(ways), bit position of way in dc operand
+ lsl w9, w4, w5 // w9 = max way number, aligned to position in dc operand
+ lsl w16, w8, w5 // w16 = amount to decrement way number per iteration
+loop_way:
+ ubfx w7, w1, #13, #15 // w7 = max set number, right aligned
+ lsl w7, w7, w2 // w7 = max set number, aligned to position in dc operand
+ lsl w17, w8, w2 // w17 = amount to decrement set number per iteration
+loop_set:
+ orr w11, w10, w9 // w11 = combine way number and cache number ...
+ orr w11, w11, w7 // ... and set number for dc operand
+ dc isw, x11 // do data cache invalidate by set and way
+ subs w7, w7, w17 // decrement set number
+ b.ge loop_set
+ subs x9, x9, x16 // decrement way number
+ b.ge loop_way
+next_level:
+ add w10, w10, #2 // increment 2 x cache level
+ cmp w3, w10
+ b.gt loop_level
+ dsb sy // ensure completion of previous cache maintenance operation
+ isb
+finished:
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// ID Register functions
+//
+
+ .type GetMIDR, "function"
+ .cfi_startproc
+GetMIDR:
+
+ mrs x0, MIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetMPIDR, "function"
+ .cfi_startproc
+GetMPIDR:
+
+ mrs x0, MPIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetAffinity, "function"
+ .cfi_startproc
+GetAffinity:
+
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #32, #8
+ bfi w0, w1, #24, #8
+ ret
+ .cfi_endproc
+
+
+ .type GetCPUID, "function"
+ .cfi_startproc
+GetCPUID:
+
+ mrs x0, MIDR_EL1
+ ubfx x0, x0, #4, #12 // extract PartNum
+ cmp x0, #0xD0D // Cortex-A77
+ b.eq DynamIQ
+ cmp x0, #0xD0B // Cortex-A76
+ b.eq DynamIQ
+ cmp x0, #0xD0A // Cortex-A75
+ b.eq DynamIQ
+ cmp x0, #0xD05 // Cortex-A55
+ b.eq DynamIQ
+ b Others
+DynamIQ:
+ mrs x0, MPIDR_EL1
+ ubfx x0, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ ret
+
+Others:
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #MPIDR_EL1_AFF0_LSB, #MPIDR_EL1_AFF_WIDTH
+ ubfx x2, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ add x0, x1, x2, LSL #2
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/v8_aarch64.h b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_aarch64.h
new file mode 100644
index 00000000..b09079a4
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_aarch64.h
@@ -0,0 +1,103 @@
+/*
+ *
+ * Armv8-A AArch64 common helper functions
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+
+#ifndef V8_AARCH64_H
+#define V8_AARCH64_H
+
+/*
+ * Parameters for data barriers
+ */
+#define OSHLD 1
+#define OSHST 2
+#define OSH 3
+#define NSHLD 5
+#define NSHST 6
+#define NSH 7
+#define ISHLD 9
+#define ISHST 10
+#define ISH 11
+#define LD 13
+#define ST 14
+#define SY 15
+
+/**********************************************************************/
+
+/*
+ * function prototypes
+ */
+
+/*
+ * void InvalidateUDCaches(void)
+ * invalidates all Unified and Data Caches
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * guarantees that all levels of cache will be invalidated before
+ * returning to caller
+ */
+void InvalidateUDCaches(void);
+
+/*
+ * unsigned long long EnableCachesEL1(void)
+ * enables I- and D- caches at EL1
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * New value of SCTLR_EL1
+ *
+ * Side Effects
+ * context will be synchronised before returning to caller
+ */
+unsigned long long EnableCachesEL1(void);
+
+/*
+ * unsigned long long GetMIDR(void)
+ * returns the contents of MIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MIDR_EL0
+ */
+unsigned long long GetMIDR(void);
+
+/*
+ * unsigned long long GetMPIDR(void)
+ * returns the contents of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0
+ */
+unsigned long long GetMPIDR(void);
+
+/*
+ * unsigned int GetCPUID(void)
+ * returns the Aff0 field of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0[7:0]
+ */
+unsigned int GetCPUID(void);
+
+#endif
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_mmu.h
new file mode 100644
index 00000000..ee8834fa
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_mmu.h
@@ -0,0 +1,128 @@
+//
+// Defines for v8 Memory Model
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_MMU_H
+#define V8_MMU_H
+
+//
+// Translation Control Register fields
+//
+// RGN field encodings
+//
+#define TCR_RGN_NC 0b00
+#define TCR_RGN_WBWA 0b01
+#define TCR_RGN_WT 0b10
+#define TCR_RGN_WBRA 0b11
+
+//
+// Shareability encodings
+//
+#define TCR_SHARE_NONE 0b00
+#define TCR_SHARE_OUTER 0b10
+#define TCR_SHARE_INNER 0b11
+
+//
+// Granule size encodings
+//
+#define TCR_GRANULE_4K 0b00
+#define TCR_GRANULE_64K 0b01
+#define TCR_GRANULE_16K 0b10
+
+//
+// Physical Address sizes
+//
+#define TCR_SIZE_4G 0b000
+#define TCR_SIZE_64G 0b001
+#define TCR_SIZE_1T 0b010
+#define TCR_SIZE_4T 0b011
+#define TCR_SIZE_16T 0b100
+#define TCR_SIZE_256T 0b101
+
+//
+// Translation Control Register fields
+//
+#define TCR_EL1_T0SZ_SHIFT 0
+#define TCR_EL1_EPD0 (1 << 7)
+#define TCR_EL1_IRGN0_SHIFT 8
+#define TCR_EL1_ORGN0_SHIFT 10
+#define TCR_EL1_SH0_SHIFT 12
+#define TCR_EL1_TG0_SHIFT 14
+
+#define TCR_EL1_T1SZ_SHIFT 16
+#define TCR_EL1_A1 (1 << 22)
+#define TCR_EL1_EPD1 (1 << 23)
+#define TCR_EL1_IRGN1_SHIFT 24
+#define TCR_EL1_ORGN1_SHIFT 26
+#define TCR_EL1_SH1_SHIFT 28
+#define TCR_EL1_TG1_SHIFT 30
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_EL1_AS (1 << 36)
+#define TCR_EL1_TBI0 (1 << 37)
+#define TCR_EL1_TBI1 (1 << 38)
+
+//
+// Stage 1 Translation Table descriptor fields
+//
+#define TT_S1_ATTR_FAULT (0b00 << 0)
+#define TT_S1_ATTR_BLOCK (0b01 << 0) // Level 1/2
+#define TT_S1_ATTR_TABLE (0b11 << 0) // Level 0/1/2
+#define TT_S1_ATTR_PAGE (0b11 << 0) // Level 3
+
+#define TT_S1_ATTR_MATTR_LSB 2
+
+#define TT_S1_ATTR_NS (1 << 5)
+
+#define TT_S1_ATTR_AP_RW_PL1 (0b00 << 6)
+#define TT_S1_ATTR_AP_RW_ANY (0b01 << 6)
+#define TT_S1_ATTR_AP_RO_PL1 (0b10 << 6)
+#define TT_S1_ATTR_AP_RO_ANY (0b11 << 6)
+
+#define TT_S1_ATTR_SH_NONE (0b00 << 8)
+#define TT_S1_ATTR_SH_OUTER (0b10 << 8)
+#define TT_S1_ATTR_SH_INNER (0b11 << 8)
+
+#define TT_S1_ATTR_AF (1 << 10)
+#define TT_S1_ATTR_nG (1 << 11)
+
+// OA bits [15:12] - If Armv8.2-LPA is implemented, bits[15:12] are bits[51:48]
+// and bits[47:16] are bits[47:16] of the output address for a page of memory
+
+#define TT_S1_ATTR_nT (1 << 16) // Present if Armv8.4-TTRem is implemented, otherwise RES0
+
+#define TT_S1_ATTR_DBM (1 << 51) // Present if Armv8.1-TTHM is implemented, otherwise RES0
+
+#define TT_S1_ATTR_CONTIG (1 << 52)
+#define TT_S1_ATTR_PXN (1 << 53)
+#define TT_S1_ATTR_UXN (1 << 54)
+
+// PBHA bits[62:59] - If Armv8.2-TTPBHA is implemented, hardware can use these bits
+// for IMPLEMENTATIONDEFINED purposes, otherwise IGNORED
+
+#define TT_S1_MAIR_DEV_nGnRnE 0b00000000
+#define TT_S1_MAIR_DEV_nGnRE 0b00000100
+#define TT_S1_MAIR_DEV_nGRE 0b00001000
+#define TT_S1_MAIR_DEV_GRE 0b00001100
+
+//
+// Inner and Outer Normal memory attributes use the same bit patterns
+// Outer attributes just need to be shifted up
+//
+#define TT_S1_MAIR_OUTER_SHIFT 4
+
+#define TT_S1_MAIR_WT_TRANS_RA 0b0010
+
+#define TT_S1_MAIR_WB_TRANS_RA 0b0110
+#define TT_S1_MAIR_WB_TRANS_RWA 0b0111
+
+#define TT_S1_MAIR_WT_RA 0b1010
+
+#define TT_S1_MAIR_WB_RA 0b1110
+#define TT_S1_MAIR_WB_RWA 0b1111
+
+#endif // V8_MMU_H
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/v8_system.h b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_system.h
new file mode 100644
index 00000000..ff96deff
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_system.h
@@ -0,0 +1,115 @@
+//
+// Defines for v8 System Registers
+//
+// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_SYSTEM_H
+#define V8_SYSTEM_H
+
+//
+// AArch64 SPSR
+//
+#define AARCH64_SPSR_EL3h 0b1101
+#define AARCH64_SPSR_EL3t 0b1100
+#define AARCH64_SPSR_EL2h 0b1001
+#define AARCH64_SPSR_EL2t 0b1000
+#define AARCH64_SPSR_EL1h 0b0101
+#define AARCH64_SPSR_EL1t 0b0100
+#define AARCH64_SPSR_EL0t 0b0000
+#define AARCH64_SPSR_RW (1 << 4)
+#define AARCH64_SPSR_F (1 << 6)
+#define AARCH64_SPSR_I (1 << 7)
+#define AARCH64_SPSR_A (1 << 8)
+#define AARCH64_SPSR_D (1 << 9)
+#define AARCH64_SPSR_IL (1 << 20)
+#define AARCH64_SPSR_SS (1 << 21)
+#define AARCH64_SPSR_V (1 << 28)
+#define AARCH64_SPSR_C (1 << 29)
+#define AARCH64_SPSR_Z (1 << 30)
+#define AARCH64_SPSR_N (1 << 31)
+
+//
+// Multiprocessor Affinity Register
+//
+#define MPIDR_EL1_AFF3_LSB 32
+#define MPIDR_EL1_U (1 << 30)
+#define MPIDR_EL1_MT (1 << 24)
+#define MPIDR_EL1_AFF2_LSB 16
+#define MPIDR_EL1_AFF1_LSB 8
+#define MPIDR_EL1_AFF0_LSB 0
+#define MPIDR_EL1_AFF_WIDTH 8
+
+//
+// Data Cache Zero ID Register
+//
+#define DCZID_EL0_BS_LSB 0
+#define DCZID_EL0_BS_WIDTH 4
+#define DCZID_EL0_DZP_LSB 5
+#define DCZID_EL0_DZP (1 << 5)
+
+//
+// System Control Register
+//
+#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_EL1_E0E (1 << 24)
+#define SCTLR_ELx_WXN (1 << 19)
+#define SCTLR_EL1_nTWE (1 << 18)
+#define SCTLR_EL1_nTWI (1 << 16)
+#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_EL1_UMA (1 << 9)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
+#define SCTLR_EL1_THEE (1 << 6)
+#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M (1 << 0)
+
+//
+// Architectural Feature Access Control Register
+//
+#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_FPEN (3 << 20)
+
+//
+// Architectural Feature Trap Register
+//
+#define CPTR_ELx_TCPAC (1 << 31)
+#define CPTR_ELx_TTA (1 << 20)
+#define CPTR_ELx_TFP (1 << 10)
+
+//
+// Secure Configuration Register
+//
+#define SCR_EL3_TWE (1 << 13)
+#define SCR_EL3_TWI (1 << 12)
+#define SCR_EL3_ST (1 << 11)
+#define SCR_EL3_RW (1 << 10)
+#define SCR_EL3_SIF (1 << 9)
+#define SCR_EL3_HCE (1 << 8)
+#define SCR_EL3_SMD (1 << 7)
+#define SCR_EL3_EA (1 << 3)
+#define SCR_EL3_FIQ (1 << 2)
+#define SCR_EL3_IRQ (1 << 1)
+#define SCR_EL3_NS (1 << 0)
+
+//
+// Hypervisor Configuration Register
+//
+#define HCR_EL2_ID (1 << 33)
+#define HCR_EL2_CD (1 << 32)
+#define HCR_EL2_RW (1 << 31)
+#define HCR_EL2_TRVM (1 << 30)
+#define HCR_EL2_HVC (1 << 29)
+#define HCR_EL2_TDZ (1 << 28)
+
+#endif // V8_SYSTEM_H
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/v8_utils.S b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_utils.S
new file mode 100644
index 00000000..f0fcef26
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/v8_utils.S
@@ -0,0 +1,69 @@
+//
+// Simple utility routines for baremetal v8 code
+//
+// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+//
+// void *ZeroBlock(void *blockPtr, unsigned int nBytes)
+//
+// Zero fill a block of memory
+// Fill memory pages or similar structures with zeros.
+// The byte count must be a multiple of the block fill size (16 bytes)
+//
+// Inputs:
+// blockPtr - base address of block to fill
+// nBytes - block size, in bytes
+//
+// Returns:
+// pointer to just filled block, NULL if nBytes is
+// incompatible with block fill size
+//
+ .global ZeroBlock
+ .type ZeroBlock, "function"
+ .cfi_startproc
+ZeroBlock:
+
+ //
+ // we fill data by steam, 16 bytes at a time: check that
+ // blocksize is a multiple of that
+ //
+ ubfx x2, x1, #0, #4
+ cbnz x2, incompatible
+
+ //
+ // we already have one register full of zeros, get another
+ //
+ mov x3, x2
+
+ //
+ // OK, set temporary pointer and away we go
+ //
+ add x0, x0, x1
+
+loop0:
+ subs x1, x1, #16
+ stp x2, x3, [x0, #-16]!
+ b.ne loop0
+
+ //
+ // that's all - x0 will be back to its start value
+ //
+ ret
+
+ //
+ // parameters are incompatible with block size - return
+ // an indication that this is so
+ //
+incompatible:
+ mov x0,#0
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a34/ac6/example_build/sample_threadx/vectors.S b/ports/cortex_a34/ac6/example_build/sample_threadx/vectors.S
new file mode 100644
index 00000000..9e60e001
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/sample_threadx/vectors.S
@@ -0,0 +1,252 @@
+// ------------------------------------------------------------
+// Armv8-A Vector tables
+//
+// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+ .global c0sync1
+ .global irqHandler
+ .global fiqHandler
+ .global irqFirstLevelHandler
+ .global fiqFirstLevelHandler
+
+ .section EL1VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el1_vectors:
+c0sync1: B c0sync1
+
+ .balign 0x80
+c0irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr1: B c0serr1
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync1: B cxsync1
+
+ .balign 0x80
+cxirq1: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr1: B cxserr1
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync1: B l64sync1
+
+ .balign 0x80
+l64irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr1: B l64serr1
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync1: B l32sync1
+
+ .balign 0x80
+l32irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr1: B l32serr1
+
+//----------------------------------------------------------------
+
+ .section EL2VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el2_vectors:
+c0sync2: B c0sync2
+
+ .balign 0x80
+c0irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr2: B c0serr2
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync2: B cxsync2
+
+ .balign 0x80
+cxirq2: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr2: B cxserr2
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync2: B l64sync2
+
+ .balign 0x80
+l64irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr2: B l64serr2
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync2: B l32sync2
+
+ .balign 0x80
+l32irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr2: B l32serr2
+
+//----------------------------------------------------------------
+
+ .section EL3VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el3_vectors:
+c0sync3: B c0sync3
+
+ .balign 0x80
+c0irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr3: B c0serr3
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync3: B cxsync3
+
+ .balign 0x80
+cxirq3: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr3: B cxserr3
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync3: B l64sync3
+
+ .balign 0x80
+l64irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr3: B l64serr3
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync3: B l32sync3
+
+ .balign 0x80
+l32irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr3: B l32serr3
+
+
+ .section InterruptHandlers, "ax"
+ .balign 4
+
+ .type irqFirstLevelHandler, "function"
+irqFirstLevelHandler:
+ MSR SPSel, 0
+ STP x29, x30, [sp, #-16]!
+ BL _tx_thread_context_save
+ BL irqHandler
+ B _tx_thread_context_restore
+
+ .type fiqFirstLevelHandler, "function"
+fiqFirstLevelHandler:
+ STP x29, x30, [sp, #-16]!
+ STP x18, x19, [sp, #-16]!
+ STP x16, x17, [sp, #-16]!
+ STP x14, x15, [sp, #-16]!
+ STP x12, x13, [sp, #-16]!
+ STP x10, x11, [sp, #-16]!
+ STP x8, x9, [sp, #-16]!
+ STP x6, x7, [sp, #-16]!
+ STP x4, x5, [sp, #-16]!
+ STP x2, x3, [sp, #-16]!
+ STP x0, x1, [sp, #-16]!
+
+ BL fiqHandler
+
+ LDP x0, x1, [sp], #16
+ LDP x2, x3, [sp], #16
+ LDP x4, x5, [sp], #16
+ LDP x6, x7, [sp], #16
+ LDP x8, x9, [sp], #16
+ LDP x10, x11, [sp], #16
+ LDP x12, x13, [sp], #16
+ LDP x14, x15, [sp], #16
+ LDP x16, x17, [sp], #16
+ LDP x18, x19, [sp], #16
+ LDP x29, x30, [sp], #16
+ ERET
diff --git a/ports/cortex_a34/ac6/example_build/tx/.cproject b/ports/cortex_a34/ac6/example_build/tx/.cproject
new file mode 100644
index 00000000..21675fd6
--- /dev/null
+++ b/ports/cortex_a34/ac6/example_build/tx/.cproject
@@ -0,0 +1,148 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/arc_em/metaware/test_regression/tx/.project b/ports/cortex_a34/ac6/example_build/tx/.project
similarity index 100%
rename from ports/arc_em/metaware/test_regression/tx/.project
rename to ports/cortex_a34/ac6/example_build/tx/.project
diff --git a/ports/cortex_a34/ac6/inc/tx_port.h b/ports/cortex_a34/ac6/inc/tx_port.h
new file mode 100644
index 00000000..33bccbf1
--- /dev/null
+++ b/ports/cortex_a34/ac6/inc/tx_port.h
@@ -0,0 +1,379 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef int LONG;
+typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
+typedef short SHORT;
+typedef unsigned short USHORT;
+#define ULONG64_DEFINED
+
+/* Override the alignment type to use 64-bit alignment and storage for pointers. */
+
+#define ALIGN_TYPE_DEFINED
+typedef unsigned long long ALIGN_TYPE;
+
+
+/* Override the free block marker for byte pools to be a 64-bit constant. */
+
+#define TX_BYTE_BLOCK_FREE ((ALIGN_TYPE) 0xFFFFEEEEFFFFEEEE)
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_MISRA_ENABLE
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
+#define TX_INLINE_INITIALIZATION
+#endif
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifndef TX_MISRA_ENABLE
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#ifndef TX_DISABLE_INLINE
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) b = (UINT) __builtin_ctz((unsigned int) m);
+
+#endif
+
+
+/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
+ can figure out what thread timeout to process. */
+
+#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define the thread timeout setup logic in _tx_thread_create. */
+
+#define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
+ (t) -> tx_thread_timer.tx_timer_internal_timeout_param = 0; \
+ (t) -> tx_thread_timer.tx_timer_internal_thread_timeout_ptr = (VOID *) (t);
+
+
+/* Define the thread timeout pointer setup in _tx_thread_timeout. */
+
+#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifndef TX_DISABLE_INLINE
+
+/* Define macros, with in-line assembly for performance. */
+
+__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
+{
+
+unsigned long long daif_value;
+
+ __asm__ volatile (" MRS %0, DAIF ": "=r" (daif_value) );
+ __asm__ volatile (" MSR DAIFSet, 0x3" : : : "memory" );
+ return((unsigned int) daif_value);
+}
+
+__attribute__( ( always_inline ) ) static inline void __restore_interrupts(unsigned int daif_value)
+{
+
+unsigned long long temp;
+
+ temp = (unsigned long long) daif_value;
+ __asm__ volatile (" MSR DAIF,%0": : "r" (temp): "memory" );
+}
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+#define TX_DISABLE interrupt_save = __disable_interrupts();
+#define TX_RESTORE __restore_interrupts(interrupt_save);
+
+#else
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+#endif
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
diff --git a/ports/cortex_a34/ac6/src/tx_initialize_low_level.S b/ports/cortex_a34/ac6/src/tx_initialize_low_level.S
new file mode 100644
index 00000000..a56c067a
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_initialize_low_level.S
@@ -0,0 +1,98 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_initialize_low_level(VOID)
+// {
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level, @function
+_tx_initialize_low_level:
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+ /* Save the system stack pointer. */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
+
+ LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
+ MOV x1, sp // Pickup SP
+ BIC x1, x1, #0xF // Get 16-bit alignment
+ STR x1, [x0] // Store system stack
+
+ /* Save the first available memory address. */
+ /* _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit; */
+
+ LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
+ LDR x1, =zi_limit // Pickup unused memory address
+ LDR x1, [x1] //
+ STR x1, [x0] // Store unused memory address
+
+ /* Done, return to caller. */
+
+ RET // Return to caller
+// }
+
+
+zi_limit:
+ .quad (Image$$TOP_OF_RAM$$Base)
+
diff --git a/ports/cortex_a34/ac6/src/tx_thread_context_restore.S b/ports/cortex_a34/ac6/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..994c404d
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_context_restore.S
@@ -0,0 +1,287 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_restore(VOID)
+// {
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore, @function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ // if (--_tx_thread_system_state)
+ // {
+
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ SUB w2, w2, #1 // Decrement the counter
+ STR w2, [x3, #0] // Store the counter
+ CMP w2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup actual current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR x3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR w2, [x3, #0] // Pickup actual preempt disable flag
+ CMP w2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR x2, [x3, #0] // Pickup actual execute thread pointer
+ CMP x0, x2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+
+ /* Pickup the saved stack pointer. */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+ // else
+ // {
+__tx_thread_preempt_restore:
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ STP x20, x21, [sp, #-16]! // Save x20, x21
+ STP x22, x23, [sp, #-16]! // Save x22, x23
+ STP x24, x25, [sp, #-16]! // Save x24, x25
+ STP x26, x27, [sp, #-16]! // Save x26, x27
+ STP x28, x29, [sp, #-16]! // Save x28, x29
+#ifdef ENABLE_ARM_FP
+ LDR w3, [x0, #248] // Pickup FP enable flag
+ CMP w3, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q0, q1, [sp, #-32]! // Save q0, q1
+ STP q2, q3, [sp, #-32]! // Save q2, q3
+ STP q4, q5, [sp, #-32]! // Save q4, q5
+ STP q6, q7, [sp, #-32]! // Save q6, q7
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ STP q16, q17, [sp, #-32]! // Save q16, q17
+ STP q18, q19, [sp, #-32]! // Save q18, q19
+ STP q20, q21, [sp, #-32]! // Save q20, q21
+ STP q22, q23, [sp, #-32]! // Save q22, q23
+ STP q24, q25, [sp, #-32]! // Save q24, q25
+ STP q26, q27, [sp, #-32]! // Save q26, q27
+ STP q28, q29, [sp, #-32]! // Save q28, q29
+ STP q30, q31, [sp, #-32]! // Save q30, q31
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+ STP x4, x5, [sp, #-16]! // Save x4 (SPSR_EL3), x5 (ELR_E3)
+
+ MOV x3, sp // Move sp into x3
+ STR x3, [x0, #8] // Save stack pointer in thread control
+ // block
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+
+ /* Save the remaining time-slice and disable it. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w2, [x0, #36] // Save thread's time-slice
+ MOV w2, #0 // Clear value
+ STR w2, [x3, #0] // Disable global time-slice flag
+
+ // }
+__tx_thread_dont_save_ts:
+
+
+ /* Clear the current task pointer. */
+ /* _tx_thread_current_ptr = TX_NULL; */
+
+ MOV x0, #0 // NULL value
+ STR x0, [x1, #0] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ // _tx_thread_schedule();
+
+ // }
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ LDR x1, =_tx_thread_schedule // Build address for _tx_thread_schedule
+#ifdef EL1
+ MSR ELR_EL1, x1 // Setup point of interrupt
+// MOV x1, #0x4 // Setup EL1 return
+// MSR spsr_el1, x1 // Move into SPSR
+#else
+#ifdef EL2
+ MSR ELR_EL2, x1 // Setup point of interrupt
+// MOV x1, #0x8 // Setup EL2 return
+// MSR spsr_el2, x1 // Move into SPSR
+#else
+ MSR ELR_EL3, x1 // Setup point of interrupt
+// MOV x1, #0xC // Setup EL3 return
+// MSR spsr_el3, x1 // Move into SPSR
+#endif
+#endif
+ ERET // Return to scheduler
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_context_save.S b/ports/cortex_a34/ac6/src/tx_thread_context_save.S
new file mode 100644
index 00000000..859a1e44
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_context_save.S
@@ -0,0 +1,216 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_save(VOID)
+// {
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save, @function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ and all other registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+ // if (_tx_thread_system_state++)
+ // {
+
+ STP x0, x1, [sp, #-16]! // Save x0, x1
+ STP x2, x3, [sp, #-16]! // Save x2, x3
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ CMP w2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD w2, w2, #1 // Increment the nested interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x0, SPSR_EL1 // Pickup SPSR
+ MRS x1, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x0, SPSR_EL2 // Pickup SPSR
+ MRS x1, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x0, SPSR_EL3 // Pickup SPSR
+ MRS x1, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x0, x1, [sp, #-16]! // Save SPSR, ELR
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ /* Return to the ISR. */
+
+ RET // Return to ISR
+
+__tx_thread_not_nested_save:
+ // }
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ // else if (_tx_thread_current_ptr)
+ // {
+
+ ADD w2, w2, #1 // Increment the interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x4, SPSR_EL1 // Pickup SPSR
+ MRS x5, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x4, SPSR_EL2 // Pickup SPSR
+ MRS x5, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x4, SPSR_EL3 // Pickup SPSR
+ MRS x5, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x4, x5, [sp, #-16]! // Save SPSR, ELR
+
+ /* Save the current stack pointer in the thread's control block. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+
+ MOV x4, sp //
+ STR x4, [x0, #8] // Save thread stack pointer
+
+ /* Switch to the system stack. */
+ // sp = _tx_thread_system_stack_ptr;
+
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ RET // Return to caller
+
+ // }
+ // else
+ // {
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ ADD sp, sp, #48 // Recover saved registers
+ RET // Continue IRQ processing
+
+ // }
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_fp_disable.c b/ports/cortex_a34/ac6/src/tx_thread_fp_disable.c
new file mode 100644
index 00000000..e8a7f213
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_fp_disable.c
@@ -0,0 +1,94 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Module Manager */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* Scott Larson, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_disable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_FALSE;
+ }
+ }
+}
diff --git a/ports/cortex_a34/ac6/src/tx_thread_fp_enable.c b/ports/cortex_a34/ac6/src/tx_thread_fp_enable.c
new file mode 100644
index 00000000..4e69205c
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_fp_enable.c
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_enable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now setup the FP enable flag in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_TRUE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a34/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a34/ac6/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..6a5a7741
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_interrupt_control.S
@@ -0,0 +1,81 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control, @function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x1, DAIF // Pickup current interrupt posture
+
+ /* Apply the new interrupt posture. */
+
+ MSR DAIF, x0 // Set new interrupt posture
+ MOV x0, x1 // Setup return value
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a34/ac6/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..d0062ef8
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,79 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_disable(void)
+// {
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable, @function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x0, DAIF // Pickup current interrupt lockout posture
+
+ /* Mask interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a34/ac6/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..1b6261ba
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,77 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore, @function
+_tx_thread_interrupt_restore:
+
+ /* Restore the old interrupt posture. */
+
+ MSR DAIF, x0 // Setup the old posture
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_schedule.S b/ports/cortex_a34/ac6/src/tx_thread_schedule.S
new file mode 100644
index 00000000..9a7a7262
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_schedule.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_schedule(VOID)
+// {
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule, @function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+ MSR DAIFClr, 0x3 // Enable interrupts
+
+ /* Wait for a thread to execute. */
+ // do
+ // {
+
+ LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+#ifdef TX_ENABLE_WFI
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BNE _tx_thread_schedule_thread //
+ WFI //
+ B __tx_thread_schedule_loop // Keep looking for a thread
+_tx_thread_schedule_thread:
+#else
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+#endif
+
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+ /* Setup the current thread pointer. */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR x0, [x1, #0] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
+
+ LDR w2, [x0, #4] // Pickup run counter
+ LDR w3, [x0, #36] // Pickup time-slice for this thread
+ ADD w2, w2, #1 // Increment thread run-counter
+ STR w2, [x0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ // variable
+ LDR x4, [x0, #8] // Switch stack pointers
+ MOV sp, x4 //
+ STR w3, [x2, #0] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV x19, x0 // Save x0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV x0, x19 // Restore x0
+#endif
+
+ /* Switch to the thread's stack. */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL)
+ BEQ _tx_solicited_return
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_interrupt_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q30, q31, [sp], #32 // Recover q30, q31
+ LDP q28, q29, [sp], #32 // Recover q28, q29
+ LDP q26, q27, [sp], #32 // Recover q26, q27
+ LDP q24, q25, [sp], #32 // Recover q24, q25
+ LDP q22, q23, [sp], #32 // Recover q22, q23
+ LDP q20, q21, [sp], #32 // Recover q20, q21
+ LDP q18, q19, [sp], #32 // Recover q18, q19
+ LDP q16, q17, [sp], #32 // Recover q16, q17
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+ LDP q6, q7, [sp], #32 // Recover q6, q7
+ LDP q4, q5, [sp], #32 // Recover q4, q5
+ LDP q2, q3, [sp], #32 // Recover q2, q3
+ LDP q0, q1, [sp], #32 // Recover q0, q1
+_skip_interrupt_fp_restore:
+#endif
+ LDP x28, x29, [sp], #16 // Recover x28
+ LDP x26, x27, [sp], #16 // Recover x26, x27
+ LDP x24, x25, [sp], #16 // Recover x24, x25
+ LDP x22, x23, [sp], #16 // Recover x22, x23
+ LDP x20, x21, [sp], #16 // Recover x20, x21
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+_tx_solicited_return:
+
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_solicited_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+_skip_solicited_fp_restore:
+#endif
+ LDP x27, x28, [sp], #16 // Recover x27, x28
+ LDP x25, x26, [sp], #16 // Recover x25, x26
+ LDP x23, x24, [sp], #16 // Recover x23, x24
+ LDP x21, x22, [sp], #16 // Recover x21, x22
+ LDP x19, x20, [sp], #16 // Recover x19, x20
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ MSR DAIF, x4 // Recover DAIF
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_stack_build.S b/ports/cortex_a34/ac6/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..5b7e945a
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_stack_build.S
@@ -0,0 +1,158 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build, @function
+_tx_thread_stack_build:
+
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
+ Stack Top: SSPR Initial SSPR
+ ELR Point of interrupt
+ x28 Initial value for x28
+ not used Not used
+ x26 Initial value for x26
+ x27 Initial value for x27
+ x24 Initial value for x24
+ x25 Initial value for x25
+ x22 Initial value for x22
+ x23 Initial value for x23
+ x20 Initial value for x20
+ x21 Initial value for x21
+ x18 Initial value for x18
+ x19 Initial value for x19
+ x16 Initial value for x16
+ x17 Initial value for x17
+ x14 Initial value for x14
+ x15 Initial value for x15
+ x12 Initial value for x12
+ x13 Initial value for x13
+ x10 Initial value for x10
+ x11 Initial value for x11
+ x8 Initial value for x8
+ x9 Initial value for x9
+ x6 Initial value for x6
+ x7 Initial value for x7
+ x4 Initial value for x4
+ x5 Initial value for x5
+ x2 Initial value for x2
+ x3 Initial value for x3
+ x0 Initial value for x0
+ x1 Initial value for x1
+ x29 Initial value for x29 (frame pointer)
+ x30 Initial value for x30 (link register)
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR x4, [x0, #24] // Pickup end of stack area
+ BIC x4, x4, #0xF // Ensure 16-byte alignment
+
+ /* Actually build the stack frame. */
+
+ MOV x2, #0 // Build clear value
+ MOV x3, #0 //
+
+ STP x2, x3, [x4, #-16]! // Set backtrace to 0
+ STP x2, x3, [x4, #-16]! // Set initial x29, x30
+ STP x2, x3, [x4, #-16]! // Set initial x0, x1
+ STP x2, x3, [x4, #-16]! // Set initial x2, x3
+ STP x2, x3, [x4, #-16]! // Set initial x4, x5
+ STP x2, x3, [x4, #-16]! // Set initial x6, x7
+ STP x2, x3, [x4, #-16]! // Set initial x8, x9
+ STP x2, x3, [x4, #-16]! // Set initial x10, x11
+ STP x2, x3, [x4, #-16]! // Set initial x12, x13
+ STP x2, x3, [x4, #-16]! // Set initial x14, x15
+ STP x2, x3, [x4, #-16]! // Set initial x16, x17
+ STP x2, x3, [x4, #-16]! // Set initial x18, x19
+ STP x2, x3, [x4, #-16]! // Set initial x20, x21
+ STP x2, x3, [x4, #-16]! // Set initial x22, x23
+ STP x2, x3, [x4, #-16]! // Set initial x24, x25
+ STP x2, x3, [x4, #-16]! // Set initial x26, x27
+ STP x2, x3, [x4, #-16]! // Set initial x28
+#ifdef EL1
+ MOV x2, #0x4 // Build initial SPSR (EL1)
+#else
+#ifdef EL2
+ MOV x2, #0x8 // Build initial SPSR (EL2)
+#else
+ MOV x2, #0xC // Build initial SPSR (EL3)
+#endif
+#endif
+ MOV x3, x1 // Build initial ELR
+ STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
+
+ /* Setup stack pointer. */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
+
+ STR x4, [x0, #8] // Save stack pointer in thread's
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_thread_system_return.S b/ports/cortex_a34/ac6/src/tx_thread_system_return.S
new file mode 100644
index 00000000..7d42b63d
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_thread_system_return.S
@@ -0,0 +1,151 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_system_return(VOID)
+// {
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return, @function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ MRS x0, DAIF // Pickup DAIF
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+ STP x19, x20, [sp, #-16]! // Save x19, x20
+ STP x21, x22, [sp, #-16]! // Save x21, x22
+ STP x23, x24, [sp, #-16]! // Save x23, x24
+ STP x25, x26, [sp, #-16]! // Save x25, x26
+ STP x27, x28, [sp, #-16]! // Save x27, x28
+ LDR x5, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR x6, [x5, #0] // Pickup current thread pointer
+
+#ifdef ENABLE_ARM_FP
+ LDR w7, [x6, #248] // Pickup FP enable flag
+ CMP w7, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+
+ MOV x1, #0 // Clear x1
+ STP x0, x1, [sp, #-16]! // Save DAIF and clear value for ELR_EK1
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ MOV x19, x5 // Save x5
+ MOV x20, x6 // Save x6
+ BL _tx_execution_thread_exit // Call the thread exit function
+ MOV x5, x19 // Restore x5
+ MOV x6, x20 // Restore x6
+#endif
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR w1, [x2, #0] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
+
+ MOV x4, sp //
+ STR x4, [x6, #8] // Save thread stack pointer
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+ /* Determine if the time-slice is active. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ MOV x4, #0 // Build clear value
+ CMP w1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save the current remaining time-slice. */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w4, [x2, #0] // Clear time-slice
+ STR w1, [x6, #36] // Store current time-slice
+
+ // }
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+ // _tx_thread_current_ptr = TX_NULL;
+
+ STR x4, [x5, #0] // Clear current thread pointer
+
+ B _tx_thread_schedule // Jump to scheduler!
+
+// }
diff --git a/ports/cortex_a34/ac6/src/tx_timer_interrupt.S b/ports/cortex_a34/ac6/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..5810b5c2
--- /dev/null
+++ b/ports/cortex_a34/ac6/src/tx_timer_interrupt.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_timer_interrupt(VOID)
+// {
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt, @function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+ // _tx_timer_system_clock++;
+
+ LDR x1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR w0, [x1, #0] // Pickup system clock
+ ADD w0, w0, #1 // Increment system clock
+ STR w0, [x1, #0] // Store new system clock
+
+ /* Test for time-slice expiration. */
+ /* if (_tx_timer_time_slice)
+ { */
+
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+ /* _tx_timer_time_slice--; */
+
+ SUB w2, w2, #1 // Decrement the time-slice
+ STR w2, [x3, #0] // Store new time-slice value
+
+ /* Check for expiration. */
+ /* if (__tx_timer_time_slice == 0) */
+
+ CMP w2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+ /* _tx_timer_expired_time_slice = TX_TRUE; */
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV w0, #1 // Build expired value
+ STR w0, [x3, #0] // Set time-slice expiration flag
+
+ /* } */
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+ // if (*_tx_timer_current_ptr)
+ // {
+
+ LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
+ LDR x0, [x1, #0] // Pickup current timer
+ LDR x2, [x0, #0] // Pickup timer list entry
+ CMP x2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+ // _tx_timer_expired = TX_TRUE;
+
+ LDR x3, =_tx_timer_expired // Pickup expiration flag address
+ MOV w2, #1 // Build expired value
+ STR w2, [x3, #0] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+ // }
+ // else
+ // {
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ // _tx_timer_current_ptr++;
+
+ ADD x0, x0, #8 // Move to next timer
+
+ /* Check for wrap-around. */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
+
+ LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
+ LDR x2, [x3, #0] // Pickup list end
+ CMP x0, x2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wrap-around logic
+
+ /* Wrap to beginning of list. */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
+
+ LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
+ LDR x0, [x3, #0] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR x0, [x1, #0] // Store new current timer pointer
+ // }
+
+__tx_timer_done:
+
+
+ /* See if anything has expired. */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
+ LDR w2, [x3, #0] // Pickup time-slice expired flag
+ CMP w2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR x1, =_tx_timer_expired // Pickup addr of other expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+
+ /* Did a timer expire? */
+ // if (_tx_timer_expired)
+ // {
+
+ LDR x1, =_tx_timer_expired // Pickup addr of expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ // _tx_timer_expiration_process();
+
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+ // }
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+ // if (_tx_timer_expired_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR w2, [x3, #0] // Pickup the actual flag
+ CMP w2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+ // _tx_thread_time_slice();
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+ // }/
+
+__tx_timer_not_ts_expiration:
+
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ // }
+
+__tx_timer_nothing_expired:
+
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/.cproject b/ports/cortex_a34/gnu/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..d801e51a
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/.cproject
@@ -0,0 +1,242 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/arc_em/metaware/test_validation/threadx_validation/.project b/ports/cortex_a34/gnu/example_build/sample_threadx/.project
similarity index 94%
rename from ports/arc_em/metaware/test_validation/threadx_validation/.project
rename to ports/cortex_a34/gnu/example_build/sample_threadx/.project
index 247d9fca..a1b15572 100644
--- a/ports/arc_em/metaware/test_validation/threadx_validation/.project
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/.project
@@ -1,6 +1,6 @@
- sample_threadx_validation
+ sample_threadx
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3.h b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3.h
new file mode 100644
index 00000000..23bc7fd8
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3.h
@@ -0,0 +1,561 @@
+/*
+ * GICv3.h - data types and function prototypes for GICv3 utility routines
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_h
+#define GICV3_h
+
+#include
+
+/*
+ * extra flags for GICD enable
+ */
+typedef enum
+{
+ gicdctlr_EnableGrp0 = (1 << 0),
+ gicdctlr_EnableGrp1NS = (1 << 1),
+ gicdctlr_EnableGrp1A = (1 << 1),
+ gicdctlr_EnableGrp1S = (1 << 2),
+ gicdctlr_EnableAll = (1 << 2) | (1 << 1) | (1 << 0),
+ gicdctlr_ARE_S = (1 << 4), /* Enable Secure state affinity routing */
+ gicdctlr_ARE_NS = (1 << 5), /* Enable Non-Secure state affinity routing */
+ gicdctlr_DS = (1 << 6), /* Disable Security support */
+ gicdctlr_E1NWF = (1 << 7) /* Enable "1-of-N" wakeup model */
+} GICDCTLRFlags_t;
+
+/*
+ * modes for SPI routing
+ */
+typedef enum
+{
+ gicdirouter_ModeSpecific = 0,
+ gicdirouter_ModeAny = (1 << 31)
+} GICDIROUTERBits_t;
+
+typedef enum
+{
+ gicdicfgr_Level = 0,
+ gicdicfgr_Edge = (1 << 1)
+} GICDICFGRBits_t;
+
+typedef enum
+{
+ gicigroupr_G0S = 0,
+ gicigroupr_G1NS = (1 << 0),
+ gicigroupr_G1S = (1 << 2)
+} GICIGROUPRBits_t;
+
+typedef enum
+{
+ gicrwaker_ProcessorSleep = (1 << 1),
+ gicrwaker_ChildrenAsleep = (1 << 2)
+} GICRWAKERBits_t;
+
+/**********************************************************************/
+
+/*
+ * Utility macros & functions
+ */
+#define RANGE_LIMIT(x) ((sizeof(x) / sizeof((x)[0])) - 1)
+
+static inline uint64_t gicv3PackAffinity(uint32_t aff3, uint32_t aff2,
+ uint32_t aff1, uint32_t aff0)
+{
+ /*
+ * only need to cast aff3 to get type promotion for all affinities
+ */
+ return ((((uint64_t)aff3 & 0xff) << 32) |
+ ((aff2 & 0xff) << 16) |
+ ((aff1 & 0xff) << 8) | aff0);
+}
+
+/**********************************************************************/
+
+/*
+ * GIC Distributor Function Prototypes
+ */
+
+/*
+ * ConfigGICD - configure GIC Distributor prior to enabling it
+ *
+ * Inputs:
+ *
+ * control - control flags
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void ConfigGICD(GICDCTLRFlags_t flags);
+
+/*
+ * EnableGICD - top-level enable for GIC Distributor
+ *
+ * Inputs:
+ *
+ * flags - new control flags to set
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void EnableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * DisableGICD - top-level disable for GIC Distributor
+ *
+ * Inputs
+ *
+ * flags - control flags to clear
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void DisableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * SyncAREinGICD - synchronise GICD Address Routing Enable bits
+ *
+ * Inputs
+ *
+ * flags - absolute flag bits to set in GIC Distributor
+ *
+ * dosync - flag whether to wait for ARE bits to match passed
+ * flag field (dosync = true), or whether to set absolute
+ * flag bits (dosync = false)
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * This function is used to resolve a race in an MP system whereby secondary
+ * CPUs cannot reliably program all Redistributor registers until the
+ * primary CPU has enabled Address Routing. The primary CPU will call this
+ * function with dosync = false, while the secondaries will call it with
+ * dosync = true.
+ */
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync);
+
+/*
+ * EnableSPI - enable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnableSPI(uint32_t id);
+
+/*
+ * DisableSPI - disable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisableSPI(uint32_t id);
+
+/*
+ * SetSPIPriority - configure the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetSPIPriority(uint32_t id, uint32_t priority);
+
+/*
+ * GetSPIPriority - determine the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * interrupt priority in the range 0 - 0xff
+ */
+uint32_t GetSPIPriority(uint32_t id);
+
+/*
+ * SetSPIRoute - specify interrupt routing when gicdctlr_ARE is enabled
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * affinity - prepacked "dotted quad" affinity routing. NOTE: use the
+ * gicv3PackAffinity() helper routine to generate this input
+ *
+ * mode - select routing mode (specific affinity, or any recipient)
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode);
+
+/*
+ * GetSPIRoute - read ARE-enabled interrupt routing information
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * routing configuration
+ */
+uint64_t GetSPIRoute(uint32_t id);
+
+/*
+ * SetSPITarget - configure the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * target - 8-bit target bitmap
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPITarget(uint32_t id, uint32_t target);
+
+/*
+ * GetSPITarget - read the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * 8-bit target bitmap
+ */
+uint32_t GetSPITarget(uint32_t id);
+
+/*
+ * ConfigureSPI - setup an interrupt as edge- or level-triggered
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * config - desired configuration
+ *
+ * Returns
+ *
+ *
+ */
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config);
+
+/*
+ * SetSPIPending - mark an interrupt as pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPIPending(uint32_t id);
+
+/*
+ * ClearSPIPending - mark an interrupt as not pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearSPIPending(uint32_t id);
+
+/*
+ * GetSPIPending - query whether an interrupt is pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetSPIPending(uint32_t id);
+
+/*
+ * SetSPISecurity - mark a shared peripheral interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityBlock - mark a block of 32 shared peripheral
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * block - which block to mark (e.g. 1 = Ints 32-63)
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityAll - mark all shared peripheral interrupts
+ * as security
+ *
+ * Inputs:
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityAll(GICIGROUPRBits_t group);
+
+/**********************************************************************/
+
+/*
+ * GIC Re-Distributor Function Prototypes
+ *
+ * The model for calling Redistributor functions is that, rather than
+ * identifying the target redistributor with every function call, the
+ * SelectRedistributor() function is used to identify which redistributor
+ * is to be used for all functions until a different redistributor is
+ * explicitly selected
+ */
+
+/*
+ * WakeupGICR - wake up a Redistributor
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to wakeup
+ *
+ * Returns:
+ *
+ *
+ */
+void WakeupGICR(uint32_t gicr);
+
+/*
+ * EnablePrivateInt - enable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * DisablePrivateInt - disable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority);
+
+/*
+ * GetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * Int priority
+ */
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPending - mark a private (SGI/PPI) interrupt as pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * ClearPrivateIntPending - mark a private (SGI/PPI) interrupt as not pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * GetPrivateIntPending - query whether a private (SGI/PPI) interrupt is pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntSecurity - mark a private (SGI/PPI) interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetPrivateIntSecurityBlock - mark all 32 private (SGI/PPI)
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * group - the group for the interrupt
+ *
+ * Returns:
+ *
+ *
+ */
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group);
+
+#endif /* ndef GICV3_h */
+
+/* EOF GICv3.h */
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_aliases.h b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_aliases.h
new file mode 100644
index 00000000..0928d14c
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_aliases.h
@@ -0,0 +1,113 @@
+//
+// Aliases for GICv3 registers
+//
+// Copyright (c) 2016-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef GICV3_ALIASES_H
+#define GICV3_ALIASES_H
+
+#ifndef __clang__
+
+/*
+ * Mapping of MSR and MRS to physical and virtual CPU interface registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-5
+ */
+#define ICC_AP0R0_EL1 S3_0_C12_C8_4
+#define ICC_AP0R1_EL1 S3_0_C12_C8_5
+#define ICC_AP0R2_EL1 S3_0_C12_C8_6
+#define ICC_AP0R3_EL1 S3_0_C12_C8_7
+
+#define ICC_AP1R0_EL1 S3_0_C12_C9_0
+#define ICC_AP1R1_EL1 S3_0_C12_C9_1
+#define ICC_AP1R2_EL1 S3_0_C12_C9_2
+#define ICC_AP1R3_EL1 S3_0_C12_C9_3
+
+#define ICC_ASGI1R_EL1 S3_0_C12_C11_6
+
+#define ICC_BPR0_EL1 S3_0_C12_C8_3
+#define ICC_BPR1_EL1 S3_0_C12_C12_3
+
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+
+#define ICC_DIR_EL1 S3_0_C12_C11_1
+
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+#define ICC_SGI1R_EL1 S3_0_C12_C11_5
+
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+
+/*
+ * Mapping of MSR and MRS to virtual interface control registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-6
+ */
+#define ICH_AP0R0_EL2 S3_4_C12_C8_0
+#define ICH_AP0R1_EL2 S3_4_C12_C8_1
+#define ICH_AP0R2_EL2 S3_4_C12_C8_2
+#define ICH_AP0R3_EL2 S3_4_C12_C8_3
+
+#define ICH_AP1R0_EL2 S3_4_C12_C9_0
+#define ICH_AP1R1_EL2 S3_4_C12_C9_1
+#define ICH_AP1R2_EL2 S3_4_C12_C9_2
+#define ICH_AP1R3_EL2 S3_4_C12_C9_3
+
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+
+#define ICH_VTR_EL2 S3_4_C12_C11_1
+
+#define ICH_MISR_EL2 S3_4_C12_C11_2
+
+#define ICH_EISR_EL2 S3_4_C12_C11_3
+
+#define ICH_ELRSR_EL2 S3_4_C12_C11_5
+
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+
+#define ICH_LR0_EL2 S3_4_C12_C12_0
+#define ICH_LR1_EL2 S3_4_C12_C12_1
+#define ICH_LR2_EL2 S3_4_C12_C12_2
+#define ICH_LR3_EL2 S3_4_C12_C12_3
+#define ICH_LR4_EL2 S3_4_C12_C12_4
+#define ICH_LR5_EL2 S3_4_C12_C12_5
+#define ICH_LR6_EL2 S3_4_C12_C12_6
+#define ICH_LR7_EL2 S3_4_C12_C12_7
+#define ICH_LR8_EL2 S3_4_C12_C13_0
+#define ICH_LR9_EL2 S3_4_C12_C13_1
+#define ICH_LR10_EL2 S3_4_C12_C13_2
+#define ICH_LR11_EL2 S3_4_C12_C13_3
+#define ICH_LR12_EL2 S3_4_C12_C13_4
+#define ICH_LR13_EL2 S3_4_C12_C13_5
+#define ICH_LR14_EL2 S3_4_C12_C13_6
+#define ICH_LR15_EL2 S3_4_C12_C13_7
+
+#endif /* not __clang__ */
+
+#endif /* GICV3_ALIASES */
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicc.h
new file mode 100644
index 00000000..2b8a2d3e
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicc.h
@@ -0,0 +1,254 @@
+/*
+ * GICv3_gicc.h - prototypes and inline functions for GICC system register operations
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_gicc_h
+#define GICV3_gicc_h
+
+#include "GICv3_aliases.h"
+
+#define stringify_no_expansion(x) #x
+#define stringify(x) stringify_no_expansion(x)
+
+/**********************************************************************/
+
+typedef enum
+{
+ sreSRE = (1 << 0),
+ sreDFB = (1 << 1),
+ sreDIB = (1 << 2),
+ sreEnable = (1 << 3)
+} ICC_SREBits_t;
+
+static inline void setICC_SRE_EL1(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL2(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL2)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL2(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL2)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL3(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ igrpEnable = (1 << 0),
+ igrpEnableGrp1NS = (1 << 0),
+ igrpEnableGrp1S = (1 << 2)
+} ICC_IGRPBits_t;
+
+static inline void setICC_IGRPEN0_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN0_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL3(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ ctlrCBPR = (1 << 0),
+ ctlrCBPR_EL1S = (1 << 0),
+ ctlrEOImode = (1 << 1),
+ ctlrCBPR_EL1NS = (1 << 1),
+ ctlrEOImode_EL3 = (1 << 2),
+ ctlrEOImode_EL1S = (1 << 3),
+ ctlrEOImode_EL1NS = (1 << 4),
+ ctlrRM = (1 << 5),
+ ctlrPMHE = (1 << 6)
+} ICC_CTLRBits_t;
+
+static inline void setICC_CTLR_EL1(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_CTLR_EL3(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+static inline uint64_t getICC_IAR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_IAR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_EOIR0(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR0_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_EOIR1(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR1_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_DIR(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_DIR_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_PMR(uint32_t priority)
+{
+ asm("msr "stringify(ICC_PMR_EL1)", %0\n; isb" :: "r" ((uint64_t)priority));
+}
+
+static inline void setICC_BPR0(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR0_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline void setICC_BPR1(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR1_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline uint64_t getICC_BPR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_BPR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_RPR(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_RPR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ sgirIRMTarget = 0,
+ sgirIRMAll = (1ull << 40)
+} ICC_SGIRBits_t;
+
+static inline void setICC_SGI0R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI0R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_ASGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+#endif /* ndef GICV3_gicc_h */
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicd.c
new file mode 100644
index 00000000..2cf9e843
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicd.c
@@ -0,0 +1,339 @@
+/*
+ * GICv3_gicd.c - generic driver code for GICv3 distributor
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include
+
+#include "GICv3.h"
+
+typedef struct
+{
+ volatile uint32_t GICD_CTLR; // +0x0000
+ const volatile uint32_t GICD_TYPER; // +0x0004
+ const volatile uint32_t GICD_IIDR; // +0x0008
+
+ const volatile uint32_t padding0; // +0x000c
+
+ volatile uint32_t GICD_STATUSR; // +0x0010
+
+ const volatile uint32_t padding1[3]; // +0x0014
+
+ volatile uint32_t IMP_DEF[8]; // +0x0020
+
+ volatile uint32_t GICD_SETSPI_NSR; // +0x0040
+ const volatile uint32_t padding2; // +0x0044
+ volatile uint32_t GICD_CLRSPI_NSR; // +0x0048
+ const volatile uint32_t padding3; // +0x004c
+ volatile uint32_t GICD_SETSPI_SR; // +0x0050
+ const volatile uint32_t padding4; // +0x0054
+ volatile uint32_t GICD_CLRSPI_SR; // +0x0058
+
+ const volatile uint32_t padding5[3]; // +0x005c
+
+ volatile uint32_t GICD_SEIR; // +0x0068
+
+ const volatile uint32_t padding6[5]; // +0x006c
+
+ volatile uint32_t GICD_IGROUPR[32]; // +0x0080
+
+ volatile uint32_t GICD_ISENABLER[32]; // +0x0100
+ volatile uint32_t GICD_ICENABLER[32]; // +0x0180
+ volatile uint32_t GICD_ISPENDR[32]; // +0x0200
+ volatile uint32_t GICD_ICPENDR[32]; // +0x0280
+ volatile uint32_t GICD_ISACTIVER[32]; // +0x0300
+ volatile uint32_t GICD_ICACTIVER[32]; // +0x0380
+
+ volatile uint8_t GICD_IPRIORITYR[1024]; // +0x0400
+ volatile uint8_t GICD_ITARGETSR[1024]; // +0x0800
+ volatile uint32_t GICD_ICFGR[64]; // +0x0c00
+ volatile uint32_t GICD_IGRPMODR[32]; // +0x0d00
+ const volatile uint32_t padding7[32]; // +0x0d80
+ volatile uint32_t GICD_NSACR[64]; // +0x0e00
+
+ volatile uint32_t GICD_SGIR; // +0x0f00
+
+ const volatile uint32_t padding8[3]; // +0x0f04
+
+ volatile uint32_t GICD_CPENDSGIR[4]; // +0x0f10
+ volatile uint32_t GICD_SPENDSGIR[4]; // +0x0f20
+
+ const volatile uint32_t padding9[52]; // +0x0f30
+ const volatile uint32_t padding10[5120]; // +0x1000
+
+ volatile uint64_t GICD_IROUTER[1024]; // +0x6000
+} GICv3_distributor;
+
+/*
+ * use the scatter file to place GICD
+ */
+GICv3_distributor __attribute__((section(".gicd"))) gicd;
+
+void ConfigGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR |= flags;
+}
+
+void DisableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR &= ~flags;
+}
+
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync)
+{
+ if (dosync)
+ {
+ const uint32_t tmask = gicdctlr_ARE_S | gicdctlr_ARE_NS;
+ const uint32_t tval = flags & tmask;
+
+ while ((gicd.GICD_CTLR & tmask) != tval)
+ continue;
+ }
+ else
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ISENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void DisableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ICENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void SetSPIPriority(uint32_t id, uint32_t priority)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ gicd.GICD_IPRIORITYR[bank] = priority;
+}
+
+uint32_t GetSPIPriority(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ return (uint32_t)(gicd.GICD_IPRIORITYR[bank]);
+}
+
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ gicd.GICD_IROUTER[bank] = affinity | (uint64_t)mode;
+}
+
+uint64_t GetSPIRoute(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ return gicd.GICD_IROUTER[bank];
+}
+
+void SetSPITarget(uint32_t id, uint32_t target)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ gicd.GICD_ITARGETSR[bank] = target;
+}
+
+uint32_t GetSPITarget(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ /*
+ * GICD_ITARGETSR has 4 interrupts per register, i.e. 8-bits of
+ * target bitmap per register
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ return (uint32_t)(gicd.GICD_ITARGETSR[bank]);
+}
+
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config)
+{
+ uint32_t bank, tmp;
+
+ /*
+ * GICD_ICFGR has 16 interrupts per register, i.e. 2-bits of
+ * configuration per register
+ */
+ bank = (id >> 4) & RANGE_LIMIT(gicd.GICD_ICFGR);
+ config &= 3;
+
+ id = (id & 0xf) << 1;
+
+ tmp = gicd.GICD_ICFGR[bank];
+ tmp &= ~(3 << id);
+ tmp |= config << id;
+ gicd.GICD_ICFGR[bank] = tmp;
+}
+
+void SetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ISPENDR[bank] = 1 << id;
+}
+
+void ClearSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ICPENDR[bank] = 1 << id;
+}
+
+uint32_t GetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ return (gicd.GICD_ICPENDR[bank] >> id) & 1;
+}
+
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group)
+{
+ uint32_t bank, groupmod;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_IGROUPR);
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicd.GICD_IGROUPR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGROUPR[bank] &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicd.GICD_IGRPMODR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGRPMODR[bank] &= ~(1 << id);
+}
+
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group)
+{
+ uint32_t groupmod;
+ const uint32_t nbits = (sizeof group * 8) - 1;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ block &= RANGE_LIMIT(gicd.GICD_IGROUPR);
+
+ /*
+ * get each bit of group config duplicated over all 32-bits in a word
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicd.GICD_IGROUPR[block] = group;
+ gicd.GICD_IGRPMODR[block] = groupmod;
+}
+
+void SetSPISecurityAll(GICIGROUPRBits_t group)
+{
+ uint32_t block;
+
+ /*
+ * GICD_TYPER.ITLinesNumber gives (No. SPIS / 32) - 1, and we
+ * want to iterate over all blocks excluding 0 (which are the
+ * SGI/PPI interrupts, and not relevant here)
+ */
+ for (block = (gicd.GICD_TYPER & ((1 << 5) - 1)); block > 0; --block)
+ SetSPISecurityBlock(block, group);
+}
+
+/* EOF GICv3_gicd.c */
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicr.c
new file mode 100644
index 00000000..b0d22c40
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/GICv3_gicr.c
@@ -0,0 +1,308 @@
+/*
+ * GICv3_gicr.c - generic driver code for GICv3 redistributor
+ *
+ * Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include "GICv3.h"
+
+/*
+ * physical LPI Redistributor register map
+ */
+typedef struct
+{
+ volatile uint32_t GICR_CTLR; // +0x0000 - RW - Redistributor Control Register
+ const volatile uint32_t GICR_IIDR; // +0x0004 - RO - Implementer Identification Register
+ const volatile uint32_t GICR_TYPER[2]; // +0x0008 - RO - Redistributor Type Register
+ volatile uint32_t GICR_STATUSR; // +0x0010 - RW - Error Reporting Status Register, optional
+ volatile uint32_t GICR_WAKER; // +0x0014 - RW - Redistributor Wake Register
+ const volatile uint32_t padding1[2]; // +0x0018 - RESERVED
+#ifndef USE_GIC600
+ volatile uint32_t IMPDEF1[8]; // +0x0020 - ?? - IMPLEMENTATION DEFINED
+#else
+ volatile uint32_t GICR_FCTLR; // +0x0020 - RW - Function Control Register
+ volatile uint32_t GICR_PWRR; // +0x0024 - RW - Power Management Control Register
+ volatile uint32_t GICR_CLASS; // +0x0028 - RW - Class Register
+ const volatile uint32_t padding2[5]; // +0x002C - RESERVED
+#endif
+ volatile uint64_t GICR_SETLPIR; // +0x0040 - WO - Set LPI Pending Register
+ volatile uint64_t GICR_CLRLPIR; // +0x0048 - WO - Clear LPI Pending Register
+ const volatile uint32_t padding3[8]; // +0x0050 - RESERVED
+ volatile uint64_t GICR_PROPBASER; // +0x0070 - RW - Redistributor Properties Base Address Register
+ volatile uint64_t GICR_PENDBASER; // +0x0078 - RW - Redistributor LPI Pending Table Base Address Register
+ const volatile uint32_t padding4[8]; // +0x0080 - RESERVED
+ volatile uint64_t GICR_INVLPIR; // +0x00A0 - WO - Redistributor Invalidate LPI Register
+ const volatile uint32_t padding5[2]; // +0x00A8 - RESERVED
+ volatile uint64_t GICR_INVALLR; // +0x00B0 - WO - Redistributor Invalidate All Register
+ const volatile uint32_t padding6[2]; // +0x00B8 - RESERVED
+ volatile uint64_t GICR_SYNCR; // +0x00C0 - RO - Redistributor Synchronize Register
+ const volatile uint32_t padding7[2]; // +0x00C8 - RESERVED
+ const volatile uint32_t padding8[12]; // +0x00D0 - RESERVED
+ volatile uint64_t IMPDEF2; // +0x0100 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding9[2]; // +0x0108 - RESERVED
+ volatile uint64_t IMPDEF3; // +0x0110 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding10[2]; // +0x0118 - RESERVED
+} GICv3_redistributor_RD;
+
+/*
+ * SGI and PPI Redistributor register map
+ */
+typedef struct
+{
+ const volatile uint32_t padding1[32]; // +0x0000 - RESERVED
+ volatile uint32_t GICR_IGROUPR0; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+ const volatile uint32_t padding2[31]; // +0x0084 - RESERVED
+ volatile uint32_t GICR_ISENABLER; // +0x0100 - RW - Interrupt Set-Enable Registers
+ const volatile uint32_t padding3[31]; // +0x0104 - RESERVED
+ volatile uint32_t GICR_ICENABLER; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ const volatile uint32_t padding4[31]; // +0x0184 - RESERVED
+ volatile uint32_t GICR_ISPENDR; // +0x0200 - RW - Interrupt Set-Pending Registers
+ const volatile uint32_t padding5[31]; // +0x0204 - RESERVED
+ volatile uint32_t GICR_ICPENDR; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ const volatile uint32_t padding6[31]; // +0x0284 - RESERVED
+ volatile uint32_t GICR_ISACTIVER; // +0x0300 - RW - Interrupt Set-Active Register
+ const volatile uint32_t padding7[31]; // +0x0304 - RESERVED
+ volatile uint32_t GICR_ICACTIVER; // +0x0380 - RW - Interrupt Clear-Active Register
+ const volatile uint32_t padding8[31]; // +0x0184 - RESERVED
+ volatile uint8_t GICR_IPRIORITYR[32]; // +0x0400 - RW - Interrupt Priority Registers
+ const volatile uint32_t padding9[504]; // +0x0420 - RESERVED
+ volatile uint32_t GICR_ICnoFGR[2]; // +0x0C00 - RW - Interrupt Configuration Registers
+ const volatile uint32_t padding10[62]; // +0x0C08 - RESERVED
+ volatile uint32_t GICR_IGRPMODR0; // +0x0D00 - RW - ????
+ const volatile uint32_t padding11[63]; // +0x0D04 - RESERVED
+ volatile uint32_t GICR_NSACR; // +0x0E00 - RW - Non-Secure Access Control Register
+} GICv3_redistributor_SGI;
+
+/*
+ * We have a multiplicity of GIC Redistributors; on the GIC-AEM and
+ * GIC-500 they are arranged as one 128KB region per redistributor: one
+ * 64KB page of GICR LPI registers, and one 64KB page of GICR Private
+ * Int registers
+ */
+typedef struct
+{
+ union
+ {
+ GICv3_redistributor_RD RD_base;
+ uint8_t padding[64 * 1024];
+ } RDblock;
+
+ union
+ {
+ GICv3_redistributor_SGI SGI_base;
+ uint8_t padding[64 * 1024];
+ } SGIblock;
+} GICv3_GICR;
+
+/*
+ * use the scatter file to place GIC Redistributor base address
+ *
+ * although this code doesn't know how many Redistributor banks
+ * a particular system will have, we declare gicrbase as an array
+ * to avoid unwanted compiler optimisations when calculating the
+ * base of a particular Redistributor bank
+ */
+static const GICv3_GICR gicrbase[2] __attribute__((section (".gicr")));
+
+/**********************************************************************/
+
+/*
+ * utility functions to calculate base of a particular
+ * Redistributor bank
+ */
+
+static inline GICv3_redistributor_RD *const getgicrRD(uint32_t gicr)
+{
+ GICv3_GICR *const arraybase = (GICv3_GICR *const)&gicrbase;
+
+ return &((arraybase + gicr)->RDblock.RD_base);
+}
+
+static inline GICv3_redistributor_SGI *const getgicrSGI(uint32_t gicr)
+{
+ GICv3_GICR *arraybase = (GICv3_GICR *)(&gicrbase);
+
+ return &(arraybase[gicr].SGIblock.SGI_base);
+}
+
+/**********************************************************************/
+
+// This function walks a block of RDs to find one with the matching affinity
+uint32_t GetGICR(uint32_t affinity)
+{
+ GICv3_redistributor_RD* gicr;
+ uint32_t index = 0;
+
+ do
+ {
+ gicr = getgicrRD(index);
+ if (gicr->GICR_TYPER[1] == affinity)
+ return index;
+
+ index++;
+ }
+ while((gicr->GICR_TYPER[0] & (1<<4)) == 0); // Keep looking until GICR_TYPER.Last reports no more RDs in block
+
+ return 0xFFFFFFFF; // return -1 to signal not RD found
+}
+
+void WakeupGICR(uint32_t gicr)
+{
+ GICv3_redistributor_RD *const gicrRD = getgicrRD(gicr);
+#ifdef USE_GIC600
+ //Power up Re-distributor for GIC-600
+ gicrRD->GICR_PWRR = 0x2;
+#endif
+
+ /*
+ * step 1 - ensure GICR_WAKER.ProcessorSleep is off
+ */
+ gicrRD->GICR_WAKER &= ~gicrwaker_ProcessorSleep;
+
+ /*
+ * step 2 - wait for children asleep to be cleared
+ */
+ while ((gicrRD->GICR_WAKER & gicrwaker_ChildrenAsleep) != 0)
+ continue;
+
+ /*
+ * OK, GICR is go
+ */
+ return;
+}
+
+void EnablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISENABLER = 1 << id;
+}
+
+void DisablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICENABLER = 1 << id;
+}
+
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ gicrSGI->GICR_IPRIORITYR[id] = priority;
+}
+
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ return (uint32_t)(gicrSGI->GICR_IPRIORITYR[id]);
+}
+
+void SetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISPENDR = 1 << id;
+}
+
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ICPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICPENDR = 1 << id;
+}
+
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ return (gicrSGI->GICR_ISPENDR >> id) & 0x01;
+}
+
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ uint32_t groupmod;
+
+ /*
+ * GICR_IGROUPR0 is one 32-bit register
+ */
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicrSGI->GICR_IGROUPR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGROUPR0 &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicrSGI->GICR_IGRPMODR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGRPMODR0 &= ~(1 << id);
+}
+
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ const uint32_t nbits = (sizeof group * 8) - 1;
+ uint32_t groupmod;
+
+ /*
+ * get each bit of group config duplicated over all 32 bits
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicrSGI->GICR_IGROUPR0 = group;
+ gicrSGI->GICR_IGRPMODR0 = groupmod;
+}
+
+/* EOF GICv3_gicr.c */
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a34/gnu/example_build/sample_threadx/MP_Mutexes.S
new file mode 100644
index 00000000..e7f95aa7
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/MP_Mutexes.S
@@ -0,0 +1,133 @@
+//
+// Armv8-A AArch64 - Basic Mutex Example
+// Includes the option (USE_LSE_ATOMIC) to use Large System Extension (LSE) atomics introduced in Armv8.1-A
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+ .global _mutex_initialize
+ .global _mutex_acquire
+ .global _mutex_release
+
+//
+// These routines implement the mutex management functions required for running
+// the Arm C library in a multi-threaded environment.
+//
+// They use a value of 0 to represent an unlocked mutex, and 1 for a locked mutex
+//
+// **********************************************************************
+//
+
+ .type _mutex_initialize, "function"
+ .cfi_startproc
+_mutex_initialize:
+
+ //
+ // mark the mutex as unlocked
+ //
+ mov w1, #0
+ str w1, [x0]
+
+ //
+ // we are running multi-threaded, so set a non-zero return
+ // value (function prototype says use 1)
+ //
+ mov w0, #1
+ ret
+ .cfi_endproc
+
+#if !defined(USE_LSE_ATOMIC)
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+
+ //
+ // send ourselves an event, so we don't stick on the wfe at the
+ // top of the loop
+ //
+ sevl
+
+ //
+ // wait until the mutex is available
+ //
+loop:
+ wfe
+ ldaxr w1, [x0]
+ cbnz w1, loop
+
+ //
+ // mutex is (at least, it was) available - try to claim it
+ //
+ mov w1, #1
+ stxr w2, w1, [x0]
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+
+ mov w1, #0
+ stlr w1, [x0]
+ ret
+ .cfi_endproc
+
+#else // LSE version
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+ // This uses a "ticket lock". The lock is stored as a 32-bit value:
+ // - the upper 16-bits record the thread's ticket number ("take a ticket")
+ // - the lower 16-bits record the ticket being served ("now serving")
+
+ // atomically load then increment the thread's ticket number ("take a ticket")
+ mov w3, #(1 << 16)
+ ldadda w3, w1, [x0]
+
+ // is the ticket now being served?
+ eor w2, w1, w1, ror #16
+ cbz w2, loop_exit
+
+ // no, so wait for the ticket to be served
+
+ // send a local event to avoid missing an unlock before the exclusive load
+ sevl
+
+loop:
+ wfe
+ ldaxrh w3, [x0]
+ eor w2, w3, w1, lsr #16
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+loop_exit:
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+ mov w1, #1
+ staddlh w1, [x0]
+ ret
+ .cfi_endproc
+#endif
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/MP_Mutexes.h b/ports/cortex_a34/gnu/example_build/sample_threadx/MP_Mutexes.h
new file mode 100644
index 00000000..ec1a1d28
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/MP_Mutexes.h
@@ -0,0 +1,66 @@
+/*
+ * Armv8-A AArch64 - Basic Mutex Example
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef MP_MUTEX_H
+#define MP_MUTEX_H
+
+/*
+ * The Arm C library calls-out to these functions to manage multithreading.
+ * They can also be called by user application code.
+ *
+ * Mutex type is specified by the Arm C library
+ *
+ * Declare function prototypes for libc mutex routines
+ */
+typedef signed int *mutex;
+
+/*
+ * int _mutex_initialize(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ * 0 - application is non-threaded
+ * 1 - application is threaded
+ * The C library uses the return result to indicate whether it is being used in a multithreaded environment.
+ */
+int _mutex_initialize(mutex *m);
+
+/*
+ * void _mutex_acquire(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * Routine does not return until the mutex has been claimed. A load-acquire
+ * is used to guarantee that the mutex claim is properly ordered with
+ * respect to any accesses to the resource protected by the mutex
+ */
+void _mutex_acquire(mutex *m);
+
+/*
+ * void _mutex_release(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * A store-release is used to guarantee that the mutex release is properly
+ * ordered with respect any accesses to the resource protected by the mutex
+ */
+void _mutex_release(mutex *m);
+
+#endif
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a34/gnu/example_build/sample_threadx/PPM_AEM.h
new file mode 100644
index 00000000..52c9a0fe
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/PPM_AEM.h
@@ -0,0 +1,66 @@
+//
+// Private Peripheral Map for the v8 Architecture Envelope Model
+//
+// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef PPM_AEM_H
+#define PPM_AEM_H
+
+//
+// Distributor layout
+//
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_IGROUP 0x0080
+#define GICD_ISENABLE 0x0100
+#define GICD_ICENABLE 0x0180
+#define GICD_ISPEND 0x0200
+#define GICD_ICPEND 0x0280
+#define GICD_ISACTIVE 0x0300
+#define GICD_ICACTIVE 0x0380
+#define GICD_IPRIORITY 0x0400
+#define GICD_ITARGETS 0x0800
+#define GICD_ICFG 0x0c00
+#define GICD_PPISR 0x0d00
+#define GICD_SPISR 0x0d04
+#define GICD_SGIR 0x0f00
+#define GICD_CPENDSGI 0x0f10
+#define GICD_SPENDSGI 0x0f20
+#define GICD_PIDR4 0x0fd0
+#define GICD_PIDR5 0x0fd4
+#define GICD_PIDR6 0x0fd8
+#define GICD_PIDR7 0x0fdc
+#define GICD_PIDR0 0x0fe0
+#define GICD_PIDR1 0x0fe4
+#define GICD_PIDR2 0x0fe8
+#define GICD_PIDR3 0x0fec
+#define GICD_CIDR0 0x0ff0
+#define GICD_CIDR1 0x0ff4
+#define GICD_CIDR2 0x0ff8
+#define GICD_CIDR3 0x0ffc
+
+//
+// CPU Interface layout
+//
+#define GICC_CTLR 0x0000
+#define GICC_PMR 0x0004
+#define GICC_BPR 0x0008
+#define GICC_IAR 0x000c
+#define GICC_EOIR 0x0010
+#define GICC_RPR 0x0014
+#define GICC_HPPIR 0x0018
+#define GICC_ABPR 0x001c
+#define GICC_AIAR 0x0020
+#define GICC_AEOIR 0x0024
+#define GICC_AHPPIR 0x0028
+#define GICC_APR0 0x00d0
+#define GICC_NSAPR0 0x00e0
+#define GICC_IIDR 0x00fc
+#define GICC_DIR 0x1000
+
+#endif // PPM_AEM_H
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..17cceb01
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,393 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_TIMER timer_0;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
+
+/* Define main entry point. */
+
+int main(void)
+{
+
+ /* Initialize timer. */
+ init_timer();
+
+ /* Enter ThreadX. */
+ tx_kernel_enter();
+
+ return 0;
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..dd70fe94
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,328 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.ld b/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.ld
new file mode 100644
index 00000000..eec8f12b
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/sample_threadx.ld
@@ -0,0 +1,245 @@
+/* Linker script to place sections and symbol values.
+ * It references following symbols, which must be defined in code:
+ * start64 : Entry point
+ *
+ * It defines following symbols, which code can use without definition:
+ * __cs3_peripherals
+ * __code_start
+ * __exidx_start
+ * __exidx_end
+ * __data_start
+ * __preinit_array_start
+ * __preinit_array_end
+ * __init_array_start
+ * __init_array_end
+ * __fini_array_start
+ * __fini_array_end
+ * __bss_start__
+ * __bss_end__
+ * __end__
+ * __stack
+ * __el3_stack
+ * __ttb0_l1
+ * __ttb0_l2_ram
+ * __ttb0_l2_private
+ * __ttb0_l2_periph
+ * __top_of_ram
+ */
+
+ENTRY(start64)
+
+SECTIONS
+{
+ /*
+ * CS3 Peripherals is a 64MB region from 0x1c000000
+ * that includes the following:
+ * System Registers at 0x1C010000
+ * UART0 (PL011) at 0x1C090000
+ * Color LCD Controller (PL111) at 0x1C1F0000
+ * plus a number of others.
+ * CS3_PERIPHERALS is used by the startup code for page-table generation
+ * This region is not truly empty, but we have no
+ * predefined objects that live within it
+ */
+ __cs3_peripherals = 0x1c000000;
+
+ /*
+ * GICv3 distributor
+ */
+ .gicd 0x2f000000 (NOLOAD):
+ {
+ *(.gicd)
+ }
+
+ /*
+ * GICv3 redistributors
+ * 128KB for each redistributor in the system
+ */
+ .gicr 0x2f100000 (NOLOAD):
+ {
+ *(.gicr)
+ }
+
+ .vectors 0x80000000:
+ {
+ __code_start = .;
+ KEEP(*(StartUp))
+ KEEP(*(EL1VECTORS EL2VECTORS EL3VECTORS))
+ }
+
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+
+ .text :
+ {
+ *(.text*)
+ }
+
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+
+ .rodata :
+ {
+ *(.rodata .rodata.* .gnu.linkonce.r.*)
+ }
+
+ .eh_frame :
+ {
+ KEEP (*(.eh_frame))
+ }
+
+ .ARM.extab :
+ {
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ }
+
+ .ARM.exidx :
+ {
+ __exidx_start = .;
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ __exidx_end = .;
+ }
+
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array ))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT(.fini_array.*)))
+ KEEP (*(.fini_array ))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+
+ .jcr :
+ {
+ KEEP (*(.jcr))
+ }
+
+ .data :
+ {
+ __data_start = . ;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+
+ .bss :
+ {
+ . = ALIGN(4);
+ __bss_start__ = .;
+ *(.bss*)
+ *(COMMON)
+ . = ALIGN(4);
+ __bss_end__ = .;
+ }
+
+ .heap (NOLOAD):
+ {
+ . = ALIGN(64);
+ __end__ = .;
+ PROVIDE(end = .);
+ . = . + 0x1000;
+ }
+
+ .stack (NOLOAD):
+ {
+ . = ALIGN(64);
+ . = . + 8 * 0x4000;
+ __handler_stack = .;
+ }
+
+ .stack (NOLOAD):
+ {
+ . = ALIGN(64);
+ . = . + 8 * 0x4000;
+ __stack = .;
+ }
+
+ .el3_stack (NOLOAD):
+ {
+ . = ALIGN(64);
+ . = . + 8 * 0x1000;
+ __el3_stack = .;
+ }
+
+ .ttb0_l1 (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l1 = .;
+ . = . + 0x1000;
+ }
+
+ .ttb0_l2_ram (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l2_ram = .;
+ . = . + 0x1000;
+ }
+
+ .ttb0_l2_private (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l2_private = .;
+ . = . + 0x1000;
+ }
+
+ .ttb0_l2_periph (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l2_periph = .;
+ . = . + 0x1000;
+ }
+
+ /*
+ * The startup code uses the end of this region to calculate
+ * the top of memory - don't place any RAM regions after it
+ */
+ __top_of_ram = .;
+}
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a34/gnu/example_build/sample_threadx/sp804_timer.c
new file mode 100644
index 00000000..4dc009b2
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/sp804_timer.c
@@ -0,0 +1,122 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "sp804_timer.h"
+
+#define TIMER_SP804_CTRL_TIMEREN (1 << 7)
+#define TIMER_SP804_CTRL_TIMERMODE (1 << 6) // Bit 6:
+#define TIMER_SP804_CTRL_INTENABLE (1 << 5)
+#define TIMER_SP804_CTRL_TIMERSIZE (1 << 1) // Bit 1: 0=16-bit, 1=32-bit
+#define TIMER_SP804_CTRL_ONESHOT (1 << 0) // Bit 0: 0=wrapping, 1=one-shot
+
+#define TIMER_SP804_CTRL_PRESCALE_1 (0 << 2) // clk/1
+#define TIMER_SP804_CTRL_PRESCALE_4 (1 << 2) // clk/4
+#define TIMER_SP804_CTRL_PRESCALE_8 (2 << 2) // clk/8
+
+struct sp804_timer
+{
+ volatile uint32_t Time1Load; // +0x00
+ const volatile uint32_t Time1Value; // +0x04 - RO
+ volatile uint32_t Timer1Control; // +0x08
+ volatile uint32_t Timer1IntClr; // +0x0C - WO
+ const volatile uint32_t Timer1RIS; // +0x10 - RO
+ const volatile uint32_t Timer1MIS; // +0x14 - RO
+ volatile uint32_t Timer1BGLoad; // +0x18
+
+ volatile uint32_t Time2Load; // +0x20
+ volatile uint32_t Time2Value; // +0x24
+ volatile uint8_t Timer2Control; // +0x28
+ volatile uint32_t Timer2IntClr; // +0x2C - WO
+ const volatile uint32_t Timer2RIS; // +0x30 - RO
+ const volatile uint32_t Timer2MIS; // +0x34 - RO
+ volatile uint32_t Timer2BGLoad; // +0x38
+
+ // Not including ID registers
+
+};
+
+// Instance of the dual timer, will be placed using the scatter file
+struct sp804_timer* dual_timer;
+
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address)
+{
+ dual_timer = (struct sp804_timer*)address;
+ return;
+}
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt)
+{
+ uint32_t tmp = 0;
+
+ dual_timer->Time1Load = load_value;
+
+ // Fixed setting: 32-bit, no prescaling
+ tmp = TIMER_SP804_CTRL_TIMERSIZE | TIMER_SP804_CTRL_PRESCALE_1 | TIMER_SP804_CTRL_TIMERMODE;
+
+ // Settings from parameters: interrupt generation & reload
+ tmp = tmp | interrupt | auto_reload;
+
+ // Write control register
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Starts the timer
+void startTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp | TIMER_SP804_CTRL_TIMEREN; // Set TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Stops the timer
+void stopTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp & ~TIMER_SP804_CTRL_TIMEREN; // Clear TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void)
+{
+ return dual_timer->Time1Value;
+}
+
+
+void clearTimerIrq(void)
+{
+ // A write to this register, of any value, clears the interrupt
+ dual_timer->Timer1IntClr = 1;
+}
+
+
+// ------------------------------------------------------------
+// End of sp804_timer.c
+// ------------------------------------------------------------
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a34/gnu/example_build/sample_threadx/sp804_timer.h
new file mode 100644
index 00000000..777062cc
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/sp804_timer.h
@@ -0,0 +1,53 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+// Header Filer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#ifndef _SP804_TIMER_
+#define _SP804_TIMER_
+
+#include
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address);
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+
+#define SP804_AUTORELOAD (0)
+#define SP804_SINGLESHOT (1)
+#define SP804_GENERATE_IRQ (1 << 5)
+#define SP804_NO_IRQ (0)
+
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt);
+
+
+// Starts the timer
+void startTimer(void);
+
+
+// Stops the timer
+void stopTimer(void);
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void);
+
+
+// Clears the timer interrupt
+void clearTimerIrq(void);
+
+#endif
+
+// ------------------------------------------------------------
+// End of sp804_timer.h
+// ------------------------------------------------------------
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/startup.S b/ports/cortex_a34/gnu/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..67dd8a6a
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/startup.S
@@ -0,0 +1,787 @@
+// ------------------------------------------------------------
+// Armv8-A MPCore EL3 AArch64 Startup Code
+//
+// Basic Vectors, MMU, caches and GICv3 initialization
+//
+// Exits in EL1 AArch64
+//
+// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_mmu.h"
+#include "v8_system.h"
+#include "GICv3_aliases.h"
+
+ .section StartUp, "ax"
+ .balign 4
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+
+ .global InvalidateUDCaches
+ .global ZeroBlock
+
+ .global SetPrivateIntSecurityBlock
+ .global SetSPISecurityAll
+ .global SetPrivateIntPriority
+
+ .global GetGICR
+ .global WakeupGICR
+ .global SyncAREinGICD
+ .global EnableGICD
+ .global EnablePrivateInt
+ .global GetPrivateIntPending
+ .global ClearPrivateIntPending
+
+ .global _start
+ .global MainApp
+
+ .global __code_start
+ .global __ttb0_l1
+ .global __ttb0_l2_ram
+ .global __ttb0_l2_periph
+ .global __top_of_ram
+ .global gicd
+ .global __stack
+ .global __el3_stack
+ .global __cs3_peripherals
+
+
+
+
+// ------------------------------------------------------------
+
+ .global start64
+ .type start64, "function"
+start64:
+
+ //
+ // program the VBARs
+ //
+ ldr x1, =el1_vectors
+ msr VBAR_EL1, x1
+
+ ldr x1, =el2_vectors
+ msr VBAR_EL2, x1
+
+ ldr x1, =el3_vectors
+ msr VBAR_EL3, x1
+
+
+ // GIC-500 comes out of reset in GICv2 compatibility mode - first set
+ // system register enables for all relevant exception levels, and
+ // select GICv3 operating mode
+ //
+ msr SCR_EL3, xzr // Ensure NS bit is initially clear, so secure copy of ICC_SRE_EL1 can be configured
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL3, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Secure copy of ICC_SRE_EL1
+
+ //
+ // set lower exception levels as non-secure, with no access
+ // back to EL2 or EL3, and are AArch64 capable
+ //
+ mov x3, #(SCR_EL3_RW | \
+ SCR_EL3_SMD | \
+ SCR_EL3_NS) // Set NS bit, to access Non-secure registers
+ msr SCR_EL3, x3
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL2, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Non-secure copy of ICC_SRE_EL1
+
+
+ //
+ // no traps or VM modifications from the Hypervisor, EL1 is AArch64
+ //
+ mov x2, #HCR_EL2_RW
+ msr HCR_EL2, x2
+
+ //
+ // VMID is still significant, even when virtualisation is not
+ // being used, so ensure VTTBR_EL2 is properly initialised
+ //
+ msr VTTBR_EL2, xzr
+
+ //
+ // VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by Non-secure EL1 reads of MPIDR_EL1.
+ // VPIDR_EL2 holds the value of the Virtualization Processor ID. This is the value returned by Non-secure EL1 reads of MIDR_EL1.
+ // Both of these registers are architecturally UNKNOWN at reset, and so they must be set to the correct value
+ // (even if EL2/virtualization is not being used), otherwise non-secure EL1 reads of MPIDR_EL1/MIDR_EL1 will return garbage values.
+ // This guarantees that any future reads of MPIDR_EL1 and MIDR_EL1 from Non-secure EL1 will return the correct value.
+ //
+ mrs x0, MPIDR_EL1
+ msr VMPIDR_EL2, x0
+ mrs x0, MIDR_EL1
+ msr VPIDR_EL2, x0
+
+ // extract the core number from MPIDR_EL1 and store it in
+ // x19 (defined by the AAPCS as callee-saved), so we can re-use
+ // the number later
+ //
+ bl GetCPUID
+ mov x19, x0
+
+ //
+ // neither EL3 nor EL2 trap floating point or accesses to CPACR
+ //
+ msr CPTR_EL3, xzr
+ msr CPTR_EL2, xzr
+
+ //
+ // SCTLR_ELx may come out of reset with UNKNOWN values so we will
+ // set the fields to 0 except, possibly, the endianess field(s).
+ // Note that setting SCTLR_EL2 or the EL0 related fields of SCTLR_EL1
+ // is not strictly needed, since we're never in EL2 or EL0
+ //
+#ifdef __ARM_BIG_ENDIAN
+ mov x0, #(SCTLR_ELx_EE | SCTLR_EL1_E0E)
+#else
+ mov x0, #0
+#endif
+ msr SCTLR_EL3, x0
+ msr SCTLR_EL2, x0
+ msr SCTLR_EL1, x0
+
+#ifdef CORTEXA
+ //
+ // Configure ACTLR_EL[23]
+ // ----------------------
+ //
+ // These bits are IMPLEMENTATION DEFINED, so are different for
+ // different processors
+ //
+ // For Cortex-A57, the controls we set are:
+ //
+ // Enable lower level access to CPUACTLR_EL1
+ // Enable lower level access to CPUECTLR_EL1
+ // Enable lower level access to L2CTLR_EL1
+ // Enable lower level access to L2ECTLR_EL1
+ // Enable lower level access to L2ACTLR_EL1
+ //
+ mov x0, #((1 << 0) | \
+ (1 << 1) | \
+ (1 << 4) | \
+ (1 << 5) | \
+ (1 << 6))
+
+ msr ACTLR_EL3, x0
+ msr ACTLR_EL2, x0
+
+ //
+ // configure CPUECTLR_EL1
+ //
+ // These bits are IMP DEF, so need to different for different
+ // processors
+ //
+ // SMPEN - bit 6 - Enables the processor to receive cache
+ // and TLB maintenance operations
+ //
+ // Note: For Cortex-A57/53 SMPEN should be set before enabling
+ // the caches and MMU, or performing any cache and TLB
+ // maintenance operations.
+ //
+ // This register has a defined reset value, so we use a
+ // read-modify-write sequence to set SMPEN
+ //
+ mrs x0, S3_1_c15_c2_1 // Read EL1 CPU Extended Control Register
+ orr x0, x0, #(1 << 6) // Set the SMPEN bit
+ msr S3_1_c15_c2_1, x0 // Write EL1 CPU Extended Control Register
+
+ isb
+#endif
+
+ //
+ // That's the last of the control settings for now
+ //
+ // Note: no ISB after all these changes, as registers won't be
+ // accessed until after an exception return, which is itself a
+ // context synchronisation event
+ //
+
+ //
+ // Setup some EL3 stack space, ready for calling some subroutines, below.
+ //
+ // Stack space allocation is CPU-specific, so use CPU
+ // number already held in x19
+ //
+ // 2^12 bytes per CPU for the EL3 stacks
+ //
+ ldr x0, =__el3_stack
+ sub x0, x0, x19, lsl #12
+ mov sp, x0
+
+ //
+ // we need to configure the GIC while still in secure mode, specifically
+ // all PPIs and SPIs have to be programmed as Group1 interrupts
+ //
+
+ //
+ // Before the GIC can be reliably programmed, we need to
+ // enable Affinity Routing, as this affects where the configuration
+ // registers are (with Affinity Routing enabled, some registers are
+ // in the Redistributor, whereas those same registers are in the
+ // Distributor with Affinity Routing disabled (i.e. when in GICv2
+ // compatibility mode).
+ //
+ mov x0, #(1 << 4) | (1 << 5) // gicdctlr_ARE_S | gicdctlr_ARE_NS
+ mov x1, x19
+ bl SyncAREinGICD
+
+ //
+ // The Redistributor comes out of reset assuming the processor is
+ // asleep - correct that assumption
+ //
+ bl GetAffinity
+ bl GetGICR
+ mov w20, w0 // Keep a copy for later
+ bl WakeupGICR
+
+ //
+ // Now we're ready to set security and other initialisations
+ //
+ // This is a per-CPU configuration for these interrupts
+ //
+ // for the first cluster, CPU number is the redistributor index
+ //
+ mov w0, w20
+ mov w1, #1 // gicigroupr_G1NS
+ bl SetPrivateIntSecurityBlock
+
+ //
+ // While we're in the Secure World, set the priority mask low enough
+ // for it to be writable in the Non-Secure World
+ //
+ //mov x0, #16 << 3 // 5 bits of priority in the Secure world
+ mov x0, #0xFF // for Non-Secure interrupts
+ msr ICC_PMR_EL1, x0
+
+ //
+ // there's more GIC setup to do, but only for the primary CPU
+ //
+ cbnz x19, drop_to_el1
+
+ //
+ // There's more to do to the GIC - call the utility routine to set
+ // all SPIs to Group1
+ //
+ mov w0, #1 // gicigroupr_G1NS
+ bl SetSPISecurityAll
+
+ //
+ // Set up EL1 entry point and "dummy" exception return information,
+ // then perform exception return to enter EL1
+ //
+ .global drop_to_el1
+drop_to_el1:
+ adr x1, el1_entry_aarch64
+ msr ELR_EL3, x1
+ mov x1, #(AARCH64_SPSR_EL1h | \
+ AARCH64_SPSR_F | \
+ AARCH64_SPSR_I | \
+ AARCH64_SPSR_A)
+ msr SPSR_EL3, x1
+ eret
+
+
+
+// ------------------------------------------------------------
+// EL1 - Common start-up code
+// ------------------------------------------------------------
+
+ .global el1_entry_aarch64
+ .type el1_entry_aarch64, "function"
+el1_entry_aarch64:
+
+ //
+ // Now we're in EL1, setup the application stack
+ // the scatter file allocates 2^14 bytes per app stack
+ //
+ ldr x0, =__handler_stack
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+ MSR SPSel, #0
+ ISB
+ ldr x0, =__stack
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+
+ //
+ // Enable floating point
+ //
+ mov x0, #CPACR_EL1_FPEN
+ msr CPACR_EL1, x0
+
+ //
+ // Invalidate caches and TLBs for all stage 1
+ // translations used at EL1
+ //
+ // Cortex-A processors automatically invalidate their caches on reset
+ // (unless suppressed with the DBGL1RSTDISABLE or L2RSTDISABLE pins).
+ // It is therefore not necessary for software to invalidate the caches
+ // on startup, however, this is done here in case of a warm reset.
+ bl InvalidateUDCaches
+ tlbi VMALLE1
+
+
+ //
+ // Set TTBR0 Base address
+ //
+ // The CPUs share one set of translation tables that are
+ // generated by CPU0 at run-time
+ //
+ // TTBR1_EL1 is not used in this example
+ //
+ ldr x1, =__ttb0_l1
+ msr TTBR0_EL1, x1
+
+
+ //
+ // Set up memory attributes
+ //
+ // These equate to:
+ //
+ // 0 -> 0b01000100 = 0x00000044 = Normal, Inner/Outer Non-Cacheable
+ // 1 -> 0b11111111 = 0x0000ff00 = Normal, Inner/Outer WriteBack Read/Write Allocate
+ // 2 -> 0b00000100 = 0x00040000 = Device-nGnRE
+ //
+ mov x1, #0xff44
+ movk x1, #4, LSL #16 // equiv to: movk x1, #0x0000000000040000
+ msr MAIR_EL1, x1
+
+
+ //
+ // Set up TCR_EL1
+ //
+ // We're using only TTBR0 (EPD1 = 1), and the page table entries:
+ // - are using an 8-bit ASID from TTBR0
+ // - have a 4K granularity (TG0 = 0b00)
+ // - are outer-shareable (SH0 = 0b10)
+ // - are using Inner & Outer WBWA Normal memory ([IO]RGN0 = 0b01)
+ // - map
+ // + 32 bits of VA space (T0SZ = 0x20)
+ // + into a 32-bit PA space (IPS = 0b000)
+ //
+ // 36 32 28 24 20 16 12 8 4 0
+ // -----+----+----+----+----+----+----+----+----+----+
+ // | | |OOII| | | |OOII| | |
+ // TT | | |RRRR|E T | T| |RRRR|E T | T|
+ // BB | I I|TTSS|GGGG|P 1 | 1|TTSS|GGGG|P 0 | 0|
+ // IIA| P P|GGHH|NNNN|DAS | S|GGHH|NNNN|D S | S|
+ // 10S| S-S|1111|1111|11Z-|---Z|0000|0000|0 Z-|---Z|
+ //
+ // 000 0000 0000 0000 1000 0000 0010 0101 0010 0000
+ //
+ // 0x 8 0 2 5 2 0
+ //
+ // Note: the ISB is needed to ensure the changes to system
+ // context are before the write of SCTLR_EL1.M to enable
+ // the MMU. It is likely on a "real" implementation that
+ // this setup would work without an ISB, due to the
+ // amount of code that gets executed before enabling the
+ // MMU, but that would not be architecturally correct.
+ //
+ ldr x1, =0x0000000000802520
+ msr TCR_EL1, x1
+ isb
+
+ //
+ // x19 already contains the CPU number, so branch to secondary
+ // code if we're not on CPU0
+ //
+ cbnz x19, el1_secondary
+
+ //
+ // Fall through to primary code
+ //
+
+
+//
+// ------------------------------------------------------------
+//
+// EL1 - primary CPU init code
+//
+// This code is run on CPU0, while the other CPUs are in the
+// holding pen
+//
+
+ .global el1_primary
+ .type el1_primary, "function"
+el1_primary:
+
+ //
+ // Turn on the banked GIC distributor enable,
+ // ready for individual CPU enables later
+ //
+ mov w0, #(1 << 1) // gicdctlr_EnableGrp1A
+ bl EnableGICD
+
+ //
+ // Generate TTBR0 L1
+ //
+ // at 4KB granularity, 32-bit VA space, table lookup starts at
+ // L1, with 1GB regions
+ //
+ // we are going to create entries pointing to L2 tables for a
+ // couple of these 1GB regions, the first of which is the
+ // RAM on the VE board model - get the table addresses and
+ // start by emptying out the L1 page tables (4 entries at L1
+ // for a 4K granularity)
+ //
+ // x21 = address of L1 tables
+ //
+ ldr x21, =__ttb0_l1
+ mov x0, x21
+ mov x1, #(4 << 3)
+ bl ZeroBlock
+
+ //
+ // time to start mapping the RAM regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // x22 = address of L2 tables, needs to be remembered in case
+ // we want to re-use the tables for mapping peripherals
+ //
+ ldr x22, =__ttb0_l2_ram
+ mov x1, #(512 << 3)
+ mov x0, x22
+ bl ZeroBlock
+
+ //
+ // Get the start address of RAM (the EXEC region) into x4
+ // and calculate the offset into the L1 table (1GB per region,
+ // max 4GB)
+ //
+ // x23 = L1 table offset, saved for later comparison against
+ // peripheral offset
+ //
+ ldr x4, =__code_start
+ ubfx x23, x4, #30, #2
+
+ orr x1, x22, #TT_S1_ATTR_PAGE
+ str x1, [x21, x23, lsl #3]
+
+ //
+ // we've already used the RAM start address in x4 - we now need
+ // to get this in terms of an offset into the L2 page tables,
+ // where each entry covers 2MB
+ //
+ ubfx x2, x4, #21, #9
+
+ //
+ // TOP_OF_RAM in the scatter file marks the end of the
+ // Execute region in RAM: convert the end of this region to an
+ // offset too, being careful to round up, then calculate the
+ // number of entries to write
+ //
+ ldr x5, =__top_of_ram
+ sub x3, x5, #1
+ ubfx x3, x3, #21, #9
+ add x3, x3, #1
+ sub x3, x3, x2
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as Shared, Normal WBWA (MAIR[1]) with a flat
+ // VA->PA translation
+ //
+ bic x4, x4, #((1 << 21) - 1)
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (1 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_SH_INNER | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // factor the offset into the page table address and then write
+ // the entries
+ //
+ add x0, x22, x2, lsl #3
+
+loop1:
+ subs x3, x3, #1
+ str x1, [x0], #8
+ add x1, x1, #0x200, LSL #12 // equiv to add x1, x1, #(1 << 21) // 2MB per entry
+ bne loop1
+
+
+ //
+ // now mapping the Peripheral regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // The assumption here is that all peripherals live within
+ // a common 1GB region (i.e. that there's a single set of
+ // L2 pages for all the peripherals). We only use a UART
+ // and the GIC in this example, so the assumption is sound
+ //
+ // x24 = address of L2 peripheral tables
+ //
+ ldr x24, =__ttb0_l2_periph
+
+ //
+ // get the GICD address into x4 and calculate
+ // the offset into the L1 table
+ //
+ // x25 = L1 table offset
+ //
+ ldr x4, =gicd
+ ubfx x25, x4, #30, #2
+
+ //
+ // here's the tricky bit: it's possible that the peripherals are
+ // in the same 1GB region as the RAM, in which case we don't need
+ // to prime a separate set of L2 page tables, nor add them to the
+ // L1 tables
+ //
+ // if we're going to re-use the TTB0_L2_RAM tables, get their
+ // address into x24, which is used later on to write the PTEs
+ //
+ cmp x25, x23
+ csel x24, x22, x24, EQ
+ b.eq nol2setup
+
+ //
+ // Peripherals are in a separate 1GB region, and so have their own
+ // set of L2 tables - clean out the tables and add them to the L1
+ // table
+ //
+ mov x0, x24
+ mov x1, #512 << 3
+ bl ZeroBlock
+
+ orr x1, x24, #TT_S1_ATTR_PAGE
+ str x1, [x21, x25, lsl #3]
+
+ //
+ // there's only going to be a single 2MB region for GICD (in
+ // x4) - get this in terms of an offset into the L2 page tables
+ //
+ // with larger systems, it is possible that the GIC redistributor
+ // registers require extra 2MB pages, in which case extra code
+ // would be required here
+ //
+nol2setup:
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry for this, so no loop as we have for RAM, above
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // we have CS3_PERIPHERALS that include the UART controller
+ //
+ // Again, the code is making assumptions - this time that the CS3_PERIPHERALS
+ // region uses the same 1GB portion of the address space as the GICD,
+ // and thus shares the same set of L2 page tables
+ //
+ // Get CS3_PERIPHERALS address into x4 and calculate the offset into the
+ // L2 tables
+ //
+ ldr x4, =__cs3_peripherals
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry again - write it
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // issue a barrier to ensure all table entry writes are complete
+ //
+ dsb ish
+
+ //
+ // Enable the MMU. Caches will be enabled later, after scatterloading.
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // The Arm Architecture Reference Manual for Armv8-A states:
+ //
+ // Instruction accesses to Non-cacheable Normal memory can be held in instruction caches.
+ // Correspondingly, the sequence for ensuring that modifications to instructions are available
+ // for execution must include invalidation of the modified locations from the instruction cache,
+ // even if the instructions are held in Normal Non-cacheable memory.
+ // This includes cases where the instruction cache is disabled.
+ //
+
+ dsb ish // ensure all previous stores have completed before invalidating
+ ic ialluis // I cache invalidate all inner shareable to PoU (which includes secondary cores)
+ dsb ish // ensure completion on inner shareable domain (which includes secondary cores)
+ isb
+
+ // Scatter-loading is complete, so enable the caches here, so that the C-library's mutex initialization later will work
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ msr SCTLR_EL1, x1
+ isb
+
+ // Zero the bss
+ ldr x0, =__bss_start__ // Start of block
+ mov x1, #0 // Fill value
+ ldr x2, =__bss_end__ // End of block
+ sub x2, x2, x0 // Length of block
+ bl memset
+
+ // Set up the standard file handles
+ bl initialise_monitor_handles
+
+ // Set up _fini and fini_array to be called at exit
+ ldr x0, =__libc_fini_array
+ bl atexit
+
+ // Call preinit_array, _init and init_array
+ bl __libc_init_array
+
+ // Set argc = 1, argv[0] = "" and then call main
+ .pushsection .data
+ .align 3
+argv:
+ .dword arg0
+ .dword 0
+arg0:
+ .byte 0
+ .popsection
+
+ mov x0, #1
+ ldr x1, =argv
+ bl main
+
+ b exit // Will not return
+
+// ------------------------------------------------------------
+// EL1 - secondary CPU init code
+//
+// This code is run on CPUs 1, 2, 3 etc....
+// ------------------------------------------------------------
+
+ .global el1_secondary
+ .type el1_secondary, "function"
+el1_secondary:
+
+ //
+ // the primary CPU is going to use SGI 15 as a wakeup event
+ // to let us know when it is OK to proceed, so prepare for
+ // receiving that interrupt
+ //
+ // NS interrupt priorities run from 0 to 15, with 15 being
+ // too low a priority to ever raise an interrupt, so let's
+ // use 14
+ //
+ mov w0, w20
+ mov w1, #15
+ mov w2, #14 << 4 // we're in NS world, so 4 bits of priority,
+ // 8-bit field, - 4 = 4-bit shift
+ bl SetPrivateIntPriority
+
+ mov w0, w20
+ mov w1, #15
+ bl EnablePrivateInt
+
+ //
+ // set priority mask as low as possible; although,being in the
+ // NS World, we can't set bit[7] of the priority, we still
+ // write all 8-bits of priority to an ICC register
+ //
+ mov x0, #31 << 3
+ msr ICC_PMR_EL1, x0
+
+ //
+ // set global enable and wait for our interrupt to arrive
+ //
+ mov x0, #1
+ msr ICC_IGRPEN1_EL1, x0
+ isb
+
+loop_wfi:
+ dsb SY // Clear all pending data accesses
+ wfi // Go to sleep
+
+ //
+ // something woke us from our wait, was it the required interrupt?
+ //
+ mov w0, w20
+ mov w1, #15
+ bl GetPrivateIntPending
+ cbz w0, loop_wfi
+
+ //
+ // it was - there's no need to actually take the interrupt,
+ // so just clear it
+ //
+ mov w0, w20
+ mov w1, #15
+ bl ClearPrivateIntPending
+
+ //
+ // Enable the MMU and caches
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to thread start
+ //
+ //B MainApp
+
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/timer_interrupts.c b/ports/cortex_a34/gnu/example_build/sample_threadx/timer_interrupts.c
new file mode 100644
index 00000000..8f522217
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/timer_interrupts.c
@@ -0,0 +1,152 @@
+/* Bare-metal example for Armv8-A FVP Base model */
+
+/* Timer and interrupts */
+
+/* Copyright (c) 2016-2018 Arm Limited (or its affiliates). All rights reserved. */
+/* Use, modification and redistribution of this file is subject to your possession of a */
+/* valid End User License Agreement for the Arm Product of which these examples are part of */
+/* and your compliance with all applicable terms and conditions of such licence agreement. */
+
+#include
+
+#include "GICv3.h"
+#include "GICv3_gicc.h"
+#include "sp804_timer.h"
+
+void _tx_timer_interrupt(void);
+
+// LED Base address
+#define LED_BASE (volatile unsigned int *)0x1C010008
+
+
+void nudge_leds(void) // Move LEDs along
+{
+ static int state = 1;
+ static int value = 1;
+
+ if (state)
+ {
+ int max = (1 << 7);
+ value <<= 1;
+ if (value == max)
+ state = 0;
+ }
+ else
+ {
+ value >>= 1;
+ if (value == 1)
+ state = 1;
+ }
+
+ *LED_BASE = value; // Update LEDs hardware
+}
+
+
+// Initialize Timer 0 and Interrupt Controller
+void init_timer(void)
+{
+ // Enable interrupts
+ __asm("MSR DAIFClr, #0xF");
+ setICC_IGRPEN1_EL1(igrpEnable);
+
+ // Configure the SP804 timer to generate an interrupt
+ setTimerBaseAddress(0x1C110000);
+ initTimer(0x200, SP804_AUTORELOAD, SP804_GENERATE_IRQ);
+ startTimer();
+
+ // The SP804 timer generates SPI INTID 34. Enable
+ // this ID, and route it to core 0.0.0.0 (this one!)
+ SetSPIRoute(34, 0, gicdirouter_ModeSpecific); // Route INTID 34 to 0.0.0.0 (this core)
+ SetSPIPriority(34, 0); // Set INTID 34 to priority to 0
+ ConfigureSPI(34, gicdicfgr_Level); // Set INTID 34 as level-sensitive
+ EnableSPI(34); // Enable INTID 34
+}
+
+
+// --------------------------------------------------------
+
+void irqHandler(void)
+{
+ unsigned int ID;
+
+ ID = getICC_IAR1(); // readIntAck();
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("irqHandler() - Reserved INTID %d\n\n", ID);
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("irqHandler() - External timer interrupt\n\n");
+ nudge_leds();
+ clearTimerIrq();
+
+ /* Call ThreadX timer interrupt processing. */
+ _tx_timer_interrupt();
+
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("irqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
+
+// --------------------------------------------------------
+
+// Not actually used in this example, but provided for completeness
+
+void fiqHandler(void)
+{
+ unsigned int ID;
+ unsigned int aliased = 0;
+
+ ID = getICC_IAR0(); // readIntAck();
+ //printf("fiqHandler() - Read %d from IAR0\n", ID);
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("fiqHandler() - Reserved INTID %d\n\n", ID);
+ ID = getICC_IAR1(); // readAliasedIntAck();
+ //printf("fiqHandler() - Read %d from AIAR\n", ID);
+ aliased = 1;
+
+ // If still spurious then simply return
+ if ((1020 <= ID) && (ID <= 1023))
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("fiqHandler() - External timer interrupt\n\n");
+ clearTimerIrq();
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("fiqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ // NOTE: If the ID was read from the Aliased IAR, then
+ // the aliased EOI register must be used
+ if (aliased == 0)
+ setICC_EOIR0(ID); // writeEOI(ID);
+ else
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/use_model_semihosting.ds b/ports/cortex_a34/gnu/example_build/sample_threadx/use_model_semihosting.ds
new file mode 100644
index 00000000..6fde52b2
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/use_model_semihosting.ds
@@ -0,0 +1 @@
+set semihosting enabled off
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_aarch64.S
new file mode 100644
index 00000000..f8db3bfe
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_aarch64.S
@@ -0,0 +1,179 @@
+// ------------------------------------------------------------
+// Armv8-A AArch64 - Common helper functions
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global EnableCachesEL1
+ .global DisableCachesEL1
+ .global InvalidateUDCaches
+ .global GetMIDR
+ .global GetMPIDR
+ .global GetAffinity
+ .global GetCPUID
+
+// ------------------------------------------------------------
+
+//
+// void EnableCachesEL1(void)
+//
+// enable Instruction and Data caches
+//
+ .type EnableCachesEL1, "function"
+ .cfi_startproc
+EnableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_ELx_I
+ orr x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+ .type DisableCachesEL1, "function"
+ .cfi_startproc
+DisableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_ELx_I
+ bic x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// void InvalidateUDCaches(void)
+//
+// Invalidate data and unified caches
+//
+ .type InvalidateUDCaches, "function"
+ .cfi_startproc
+InvalidateUDCaches:
+ // From the Armv8-A Architecture Reference Manual
+
+ dmb ish // ensure all prior inner-shareable accesses have been observed
+
+ mrs x0, CLIDR_EL1
+ and w3, w0, #0x07000000 // get 2 x level of coherence
+ lsr w3, w3, #23
+ cbz w3, finished
+ mov w10, #0 // w10 = 2 x cache level
+ mov w8, #1 // w8 = constant 0b1
+loop_level:
+ add w2, w10, w10, lsr #1 // calculate 3 x cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7
+ cmp w1, #2
+ b.lt next_level // no data or unified cache at this level
+ msr CSSELR_EL1, x10 // select this cache level
+ isb // synchronize change of csselr
+ mrs x1, CCSIDR_EL1 // read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen)-4
+ add w2, w2, #4 // w2 = log2(linelen)
+ ubfx w4, w1, #3, #10 // w4 = max way number, right aligned
+ clz w5, w4 // w5 = 32-log2(ways), bit position of way in dc operand
+ lsl w9, w4, w5 // w9 = max way number, aligned to position in dc operand
+ lsl w16, w8, w5 // w16 = amount to decrement way number per iteration
+loop_way:
+ ubfx w7, w1, #13, #15 // w7 = max set number, right aligned
+ lsl w7, w7, w2 // w7 = max set number, aligned to position in dc operand
+ lsl w17, w8, w2 // w17 = amount to decrement set number per iteration
+loop_set:
+ orr w11, w10, w9 // w11 = combine way number and cache number ...
+ orr w11, w11, w7 // ... and set number for dc operand
+ dc isw, x11 // do data cache invalidate by set and way
+ subs w7, w7, w17 // decrement set number
+ b.ge loop_set
+ subs x9, x9, x16 // decrement way number
+ b.ge loop_way
+next_level:
+ add w10, w10, #2 // increment 2 x cache level
+ cmp w3, w10
+ b.gt loop_level
+ dsb sy // ensure completion of previous cache maintenance operation
+ isb
+finished:
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// ID Register functions
+//
+
+ .type GetMIDR, "function"
+ .cfi_startproc
+GetMIDR:
+
+ mrs x0, MIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetMPIDR, "function"
+ .cfi_startproc
+GetMPIDR:
+
+ mrs x0, MPIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetAffinity, "function"
+ .cfi_startproc
+GetAffinity:
+
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #32, #8
+ bfi w0, w1, #24, #8
+ ret
+ .cfi_endproc
+
+
+ .type GetCPUID, "function"
+ .cfi_startproc
+GetCPUID:
+
+ mrs x0, MIDR_EL1
+ ubfx x0, x0, #4, #12 // extract PartNum
+ cmp x0, #0xD0D // Cortex-A77
+ b.eq DynamIQ
+ cmp x0, #0xD0B // Cortex-A76
+ b.eq DynamIQ
+ cmp x0, #0xD0A // Cortex-A75
+ b.eq DynamIQ
+ cmp x0, #0xD05 // Cortex-A55
+ b.eq DynamIQ
+ b Others
+DynamIQ:
+ mrs x0, MPIDR_EL1
+ ubfx x0, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ ret
+
+Others:
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #MPIDR_EL1_AFF0_LSB, #MPIDR_EL1_AFF_WIDTH
+ ubfx x2, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ add x0, x1, x2, LSL #2
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/v8_aarch64.h b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_aarch64.h
new file mode 100644
index 00000000..b09079a4
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_aarch64.h
@@ -0,0 +1,103 @@
+/*
+ *
+ * Armv8-A AArch64 common helper functions
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+
+#ifndef V8_AARCH64_H
+#define V8_AARCH64_H
+
+/*
+ * Parameters for data barriers
+ */
+#define OSHLD 1
+#define OSHST 2
+#define OSH 3
+#define NSHLD 5
+#define NSHST 6
+#define NSH 7
+#define ISHLD 9
+#define ISHST 10
+#define ISH 11
+#define LD 13
+#define ST 14
+#define SY 15
+
+/**********************************************************************/
+
+/*
+ * function prototypes
+ */
+
+/*
+ * void InvalidateUDCaches(void)
+ * invalidates all Unified and Data Caches
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * guarantees that all levels of cache will be invalidated before
+ * returning to caller
+ */
+void InvalidateUDCaches(void);
+
+/*
+ * unsigned long long EnableCachesEL1(void)
+ * enables I- and D- caches at EL1
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * New value of SCTLR_EL1
+ *
+ * Side Effects
+ * context will be synchronised before returning to caller
+ */
+unsigned long long EnableCachesEL1(void);
+
+/*
+ * unsigned long long GetMIDR(void)
+ * returns the contents of MIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MIDR_EL0
+ */
+unsigned long long GetMIDR(void);
+
+/*
+ * unsigned long long GetMPIDR(void)
+ * returns the contents of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0
+ */
+unsigned long long GetMPIDR(void);
+
+/*
+ * unsigned int GetCPUID(void)
+ * returns the Aff0 field of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0[7:0]
+ */
+unsigned int GetCPUID(void);
+
+#endif
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_mmu.h
new file mode 100644
index 00000000..ee8834fa
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_mmu.h
@@ -0,0 +1,128 @@
+//
+// Defines for v8 Memory Model
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_MMU_H
+#define V8_MMU_H
+
+//
+// Translation Control Register fields
+//
+// RGN field encodings
+//
+#define TCR_RGN_NC 0b00
+#define TCR_RGN_WBWA 0b01
+#define TCR_RGN_WT 0b10
+#define TCR_RGN_WBRA 0b11
+
+//
+// Shareability encodings
+//
+#define TCR_SHARE_NONE 0b00
+#define TCR_SHARE_OUTER 0b10
+#define TCR_SHARE_INNER 0b11
+
+//
+// Granule size encodings
+//
+#define TCR_GRANULE_4K 0b00
+#define TCR_GRANULE_64K 0b01
+#define TCR_GRANULE_16K 0b10
+
+//
+// Physical Address sizes
+//
+#define TCR_SIZE_4G 0b000
+#define TCR_SIZE_64G 0b001
+#define TCR_SIZE_1T 0b010
+#define TCR_SIZE_4T 0b011
+#define TCR_SIZE_16T 0b100
+#define TCR_SIZE_256T 0b101
+
+//
+// Translation Control Register fields
+//
+#define TCR_EL1_T0SZ_SHIFT 0
+#define TCR_EL1_EPD0 (1 << 7)
+#define TCR_EL1_IRGN0_SHIFT 8
+#define TCR_EL1_ORGN0_SHIFT 10
+#define TCR_EL1_SH0_SHIFT 12
+#define TCR_EL1_TG0_SHIFT 14
+
+#define TCR_EL1_T1SZ_SHIFT 16
+#define TCR_EL1_A1 (1 << 22)
+#define TCR_EL1_EPD1 (1 << 23)
+#define TCR_EL1_IRGN1_SHIFT 24
+#define TCR_EL1_ORGN1_SHIFT 26
+#define TCR_EL1_SH1_SHIFT 28
+#define TCR_EL1_TG1_SHIFT 30
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_EL1_AS (1 << 36)
+#define TCR_EL1_TBI0 (1 << 37)
+#define TCR_EL1_TBI1 (1 << 38)
+
+//
+// Stage 1 Translation Table descriptor fields
+//
+#define TT_S1_ATTR_FAULT (0b00 << 0)
+#define TT_S1_ATTR_BLOCK (0b01 << 0) // Level 1/2
+#define TT_S1_ATTR_TABLE (0b11 << 0) // Level 0/1/2
+#define TT_S1_ATTR_PAGE (0b11 << 0) // Level 3
+
+#define TT_S1_ATTR_MATTR_LSB 2
+
+#define TT_S1_ATTR_NS (1 << 5)
+
+#define TT_S1_ATTR_AP_RW_PL1 (0b00 << 6)
+#define TT_S1_ATTR_AP_RW_ANY (0b01 << 6)
+#define TT_S1_ATTR_AP_RO_PL1 (0b10 << 6)
+#define TT_S1_ATTR_AP_RO_ANY (0b11 << 6)
+
+#define TT_S1_ATTR_SH_NONE (0b00 << 8)
+#define TT_S1_ATTR_SH_OUTER (0b10 << 8)
+#define TT_S1_ATTR_SH_INNER (0b11 << 8)
+
+#define TT_S1_ATTR_AF (1 << 10)
+#define TT_S1_ATTR_nG (1 << 11)
+
+// OA bits [15:12] - If Armv8.2-LPA is implemented, bits[15:12] are bits[51:48]
+// and bits[47:16] are bits[47:16] of the output address for a page of memory
+
+#define TT_S1_ATTR_nT (1 << 16) // Present if Armv8.4-TTRem is implemented, otherwise RES0
+
+#define TT_S1_ATTR_DBM (1 << 51) // Present if Armv8.1-TTHM is implemented, otherwise RES0
+
+#define TT_S1_ATTR_CONTIG (1 << 52)
+#define TT_S1_ATTR_PXN (1 << 53)
+#define TT_S1_ATTR_UXN (1 << 54)
+
+// PBHA bits[62:59] - If Armv8.2-TTPBHA is implemented, hardware can use these bits
+// for IMPLEMENTATIONDEFINED purposes, otherwise IGNORED
+
+#define TT_S1_MAIR_DEV_nGnRnE 0b00000000
+#define TT_S1_MAIR_DEV_nGnRE 0b00000100
+#define TT_S1_MAIR_DEV_nGRE 0b00001000
+#define TT_S1_MAIR_DEV_GRE 0b00001100
+
+//
+// Inner and Outer Normal memory attributes use the same bit patterns
+// Outer attributes just need to be shifted up
+//
+#define TT_S1_MAIR_OUTER_SHIFT 4
+
+#define TT_S1_MAIR_WT_TRANS_RA 0b0010
+
+#define TT_S1_MAIR_WB_TRANS_RA 0b0110
+#define TT_S1_MAIR_WB_TRANS_RWA 0b0111
+
+#define TT_S1_MAIR_WT_RA 0b1010
+
+#define TT_S1_MAIR_WB_RA 0b1110
+#define TT_S1_MAIR_WB_RWA 0b1111
+
+#endif // V8_MMU_H
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/v8_system.h b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_system.h
new file mode 100644
index 00000000..ff96deff
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_system.h
@@ -0,0 +1,115 @@
+//
+// Defines for v8 System Registers
+//
+// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_SYSTEM_H
+#define V8_SYSTEM_H
+
+//
+// AArch64 SPSR
+//
+#define AARCH64_SPSR_EL3h 0b1101
+#define AARCH64_SPSR_EL3t 0b1100
+#define AARCH64_SPSR_EL2h 0b1001
+#define AARCH64_SPSR_EL2t 0b1000
+#define AARCH64_SPSR_EL1h 0b0101
+#define AARCH64_SPSR_EL1t 0b0100
+#define AARCH64_SPSR_EL0t 0b0000
+#define AARCH64_SPSR_RW (1 << 4)
+#define AARCH64_SPSR_F (1 << 6)
+#define AARCH64_SPSR_I (1 << 7)
+#define AARCH64_SPSR_A (1 << 8)
+#define AARCH64_SPSR_D (1 << 9)
+#define AARCH64_SPSR_IL (1 << 20)
+#define AARCH64_SPSR_SS (1 << 21)
+#define AARCH64_SPSR_V (1 << 28)
+#define AARCH64_SPSR_C (1 << 29)
+#define AARCH64_SPSR_Z (1 << 30)
+#define AARCH64_SPSR_N (1 << 31)
+
+//
+// Multiprocessor Affinity Register
+//
+#define MPIDR_EL1_AFF3_LSB 32
+#define MPIDR_EL1_U (1 << 30)
+#define MPIDR_EL1_MT (1 << 24)
+#define MPIDR_EL1_AFF2_LSB 16
+#define MPIDR_EL1_AFF1_LSB 8
+#define MPIDR_EL1_AFF0_LSB 0
+#define MPIDR_EL1_AFF_WIDTH 8
+
+//
+// Data Cache Zero ID Register
+//
+#define DCZID_EL0_BS_LSB 0
+#define DCZID_EL0_BS_WIDTH 4
+#define DCZID_EL0_DZP_LSB 5
+#define DCZID_EL0_DZP (1 << 5)
+
+//
+// System Control Register
+//
+#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_EL1_E0E (1 << 24)
+#define SCTLR_ELx_WXN (1 << 19)
+#define SCTLR_EL1_nTWE (1 << 18)
+#define SCTLR_EL1_nTWI (1 << 16)
+#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_EL1_UMA (1 << 9)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
+#define SCTLR_EL1_THEE (1 << 6)
+#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M (1 << 0)
+
+//
+// Architectural Feature Access Control Register
+//
+#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_FPEN (3 << 20)
+
+//
+// Architectural Feature Trap Register
+//
+#define CPTR_ELx_TCPAC (1 << 31)
+#define CPTR_ELx_TTA (1 << 20)
+#define CPTR_ELx_TFP (1 << 10)
+
+//
+// Secure Configuration Register
+//
+#define SCR_EL3_TWE (1 << 13)
+#define SCR_EL3_TWI (1 << 12)
+#define SCR_EL3_ST (1 << 11)
+#define SCR_EL3_RW (1 << 10)
+#define SCR_EL3_SIF (1 << 9)
+#define SCR_EL3_HCE (1 << 8)
+#define SCR_EL3_SMD (1 << 7)
+#define SCR_EL3_EA (1 << 3)
+#define SCR_EL3_FIQ (1 << 2)
+#define SCR_EL3_IRQ (1 << 1)
+#define SCR_EL3_NS (1 << 0)
+
+//
+// Hypervisor Configuration Register
+//
+#define HCR_EL2_ID (1 << 33)
+#define HCR_EL2_CD (1 << 32)
+#define HCR_EL2_RW (1 << 31)
+#define HCR_EL2_TRVM (1 << 30)
+#define HCR_EL2_HVC (1 << 29)
+#define HCR_EL2_TDZ (1 << 28)
+
+#endif // V8_SYSTEM_H
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/v8_utils.S b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_utils.S
new file mode 100644
index 00000000..f0fcef26
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/v8_utils.S
@@ -0,0 +1,69 @@
+//
+// Simple utility routines for baremetal v8 code
+//
+// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+//
+// void *ZeroBlock(void *blockPtr, unsigned int nBytes)
+//
+// Zero fill a block of memory
+// Fill memory pages or similar structures with zeros.
+// The byte count must be a multiple of the block fill size (16 bytes)
+//
+// Inputs:
+// blockPtr - base address of block to fill
+// nBytes - block size, in bytes
+//
+// Returns:
+// pointer to just filled block, NULL if nBytes is
+// incompatible with block fill size
+//
+ .global ZeroBlock
+ .type ZeroBlock, "function"
+ .cfi_startproc
+ZeroBlock:
+
+ //
+ // we fill data by steam, 16 bytes at a time: check that
+ // blocksize is a multiple of that
+ //
+ ubfx x2, x1, #0, #4
+ cbnz x2, incompatible
+
+ //
+ // we already have one register full of zeros, get another
+ //
+ mov x3, x2
+
+ //
+ // OK, set temporary pointer and away we go
+ //
+ add x0, x0, x1
+
+loop0:
+ subs x1, x1, #16
+ stp x2, x3, [x0, #-16]!
+ b.ne loop0
+
+ //
+ // that's all - x0 will be back to its start value
+ //
+ ret
+
+ //
+ // parameters are incompatible with block size - return
+ // an indication that this is so
+ //
+incompatible:
+ mov x0,#0
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a34/gnu/example_build/sample_threadx/vectors.S b/ports/cortex_a34/gnu/example_build/sample_threadx/vectors.S
new file mode 100644
index 00000000..9e60e001
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/sample_threadx/vectors.S
@@ -0,0 +1,252 @@
+// ------------------------------------------------------------
+// Armv8-A Vector tables
+//
+// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+ .global c0sync1
+ .global irqHandler
+ .global fiqHandler
+ .global irqFirstLevelHandler
+ .global fiqFirstLevelHandler
+
+ .section EL1VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el1_vectors:
+c0sync1: B c0sync1
+
+ .balign 0x80
+c0irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr1: B c0serr1
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync1: B cxsync1
+
+ .balign 0x80
+cxirq1: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr1: B cxserr1
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync1: B l64sync1
+
+ .balign 0x80
+l64irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr1: B l64serr1
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync1: B l32sync1
+
+ .balign 0x80
+l32irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr1: B l32serr1
+
+//----------------------------------------------------------------
+
+ .section EL2VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el2_vectors:
+c0sync2: B c0sync2
+
+ .balign 0x80
+c0irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr2: B c0serr2
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync2: B cxsync2
+
+ .balign 0x80
+cxirq2: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr2: B cxserr2
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync2: B l64sync2
+
+ .balign 0x80
+l64irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr2: B l64serr2
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync2: B l32sync2
+
+ .balign 0x80
+l32irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr2: B l32serr2
+
+//----------------------------------------------------------------
+
+ .section EL3VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el3_vectors:
+c0sync3: B c0sync3
+
+ .balign 0x80
+c0irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr3: B c0serr3
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync3: B cxsync3
+
+ .balign 0x80
+cxirq3: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr3: B cxserr3
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync3: B l64sync3
+
+ .balign 0x80
+l64irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr3: B l64serr3
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync3: B l32sync3
+
+ .balign 0x80
+l32irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr3: B l32serr3
+
+
+ .section InterruptHandlers, "ax"
+ .balign 4
+
+ .type irqFirstLevelHandler, "function"
+irqFirstLevelHandler:
+ MSR SPSel, 0
+ STP x29, x30, [sp, #-16]!
+ BL _tx_thread_context_save
+ BL irqHandler
+ B _tx_thread_context_restore
+
+ .type fiqFirstLevelHandler, "function"
+fiqFirstLevelHandler:
+ STP x29, x30, [sp, #-16]!
+ STP x18, x19, [sp, #-16]!
+ STP x16, x17, [sp, #-16]!
+ STP x14, x15, [sp, #-16]!
+ STP x12, x13, [sp, #-16]!
+ STP x10, x11, [sp, #-16]!
+ STP x8, x9, [sp, #-16]!
+ STP x6, x7, [sp, #-16]!
+ STP x4, x5, [sp, #-16]!
+ STP x2, x3, [sp, #-16]!
+ STP x0, x1, [sp, #-16]!
+
+ BL fiqHandler
+
+ LDP x0, x1, [sp], #16
+ LDP x2, x3, [sp], #16
+ LDP x4, x5, [sp], #16
+ LDP x6, x7, [sp], #16
+ LDP x8, x9, [sp], #16
+ LDP x10, x11, [sp], #16
+ LDP x12, x13, [sp], #16
+ LDP x14, x15, [sp], #16
+ LDP x16, x17, [sp], #16
+ LDP x18, x19, [sp], #16
+ LDP x29, x30, [sp], #16
+ ERET
diff --git a/ports/cortex_a34/gnu/example_build/tx/.cproject b/ports/cortex_a34/gnu/example_build/tx/.cproject
new file mode 100644
index 00000000..e5ff05f4
--- /dev/null
+++ b/ports/cortex_a34/gnu/example_build/tx/.cproject
@@ -0,0 +1,234 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/arc_em/metaware/test_sandbox/tx/.project b/ports/cortex_a34/gnu/example_build/tx/.project
similarity index 100%
rename from ports/arc_em/metaware/test_sandbox/tx/.project
rename to ports/cortex_a34/gnu/example_build/tx/.project
diff --git a/ports/cortex_a34/gnu/inc/tx_port.h b/ports/cortex_a34/gnu/inc/tx_port.h
new file mode 100644
index 00000000..33bccbf1
--- /dev/null
+++ b/ports/cortex_a34/gnu/inc/tx_port.h
@@ -0,0 +1,379 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef int LONG;
+typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
+typedef short SHORT;
+typedef unsigned short USHORT;
+#define ULONG64_DEFINED
+
+/* Override the alignment type to use 64-bit alignment and storage for pointers. */
+
+#define ALIGN_TYPE_DEFINED
+typedef unsigned long long ALIGN_TYPE;
+
+
+/* Override the free block marker for byte pools to be a 64-bit constant. */
+
+#define TX_BYTE_BLOCK_FREE ((ALIGN_TYPE) 0xFFFFEEEEFFFFEEEE)
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_MISRA_ENABLE
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
+#define TX_INLINE_INITIALIZATION
+#endif
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifndef TX_MISRA_ENABLE
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#ifndef TX_DISABLE_INLINE
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) b = (UINT) __builtin_ctz((unsigned int) m);
+
+#endif
+
+
+/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
+ can figure out what thread timeout to process. */
+
+#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define the thread timeout setup logic in _tx_thread_create. */
+
+#define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
+ (t) -> tx_thread_timer.tx_timer_internal_timeout_param = 0; \
+ (t) -> tx_thread_timer.tx_timer_internal_thread_timeout_ptr = (VOID *) (t);
+
+
+/* Define the thread timeout pointer setup in _tx_thread_timeout. */
+
+#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifndef TX_DISABLE_INLINE
+
+/* Define macros, with in-line assembly for performance. */
+
+__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
+{
+
+unsigned long long daif_value;
+
+ __asm__ volatile (" MRS %0, DAIF ": "=r" (daif_value) );
+ __asm__ volatile (" MSR DAIFSet, 0x3" : : : "memory" );
+ return((unsigned int) daif_value);
+}
+
+__attribute__( ( always_inline ) ) static inline void __restore_interrupts(unsigned int daif_value)
+{
+
+unsigned long long temp;
+
+ temp = (unsigned long long) daif_value;
+ __asm__ volatile (" MSR DAIF,%0": : "r" (temp): "memory" );
+}
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+#define TX_DISABLE interrupt_save = __disable_interrupts();
+#define TX_RESTORE __restore_interrupts(interrupt_save);
+
+#else
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+#endif
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
diff --git a/ports/cortex_a34/gnu/src/tx_initialize_low_level.S b/ports/cortex_a34/gnu/src/tx_initialize_low_level.S
new file mode 100644
index 00000000..42800e0d
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_initialize_low_level.S
@@ -0,0 +1,95 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* Scott Larson, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* status Completion status */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_initialize_low_level(VOID)
+// {
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level, @function
+_tx_initialize_low_level:
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+
+ /* Save the system stack pointer. */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
+
+ LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
+ MOV x1, sp // Pickup SP
+ BIC x1, x1, #0xF // Get 16-bit alignment
+ STR x1, [x0] // Store system stack
+
+ /* Save the first available memory address. */
+ // _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit;
+
+ LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
+ LDR x1, =__top_of_ram // Pickup unused memory address
+ LDR x1, [x1] //
+ STR x1, [x0] // Store unused memory address
+
+ /* Done, return to caller. */
+
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_context_restore.S b/ports/cortex_a34/gnu/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..994c404d
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_context_restore.S
@@ -0,0 +1,287 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_restore(VOID)
+// {
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore, @function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ // if (--_tx_thread_system_state)
+ // {
+
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ SUB w2, w2, #1 // Decrement the counter
+ STR w2, [x3, #0] // Store the counter
+ CMP w2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup actual current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR x3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR w2, [x3, #0] // Pickup actual preempt disable flag
+ CMP w2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR x2, [x3, #0] // Pickup actual execute thread pointer
+ CMP x0, x2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+
+ /* Pickup the saved stack pointer. */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+ // else
+ // {
+__tx_thread_preempt_restore:
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ STP x20, x21, [sp, #-16]! // Save x20, x21
+ STP x22, x23, [sp, #-16]! // Save x22, x23
+ STP x24, x25, [sp, #-16]! // Save x24, x25
+ STP x26, x27, [sp, #-16]! // Save x26, x27
+ STP x28, x29, [sp, #-16]! // Save x28, x29
+#ifdef ENABLE_ARM_FP
+ LDR w3, [x0, #248] // Pickup FP enable flag
+ CMP w3, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q0, q1, [sp, #-32]! // Save q0, q1
+ STP q2, q3, [sp, #-32]! // Save q2, q3
+ STP q4, q5, [sp, #-32]! // Save q4, q5
+ STP q6, q7, [sp, #-32]! // Save q6, q7
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ STP q16, q17, [sp, #-32]! // Save q16, q17
+ STP q18, q19, [sp, #-32]! // Save q18, q19
+ STP q20, q21, [sp, #-32]! // Save q20, q21
+ STP q22, q23, [sp, #-32]! // Save q22, q23
+ STP q24, q25, [sp, #-32]! // Save q24, q25
+ STP q26, q27, [sp, #-32]! // Save q26, q27
+ STP q28, q29, [sp, #-32]! // Save q28, q29
+ STP q30, q31, [sp, #-32]! // Save q30, q31
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+ STP x4, x5, [sp, #-16]! // Save x4 (SPSR_EL3), x5 (ELR_E3)
+
+ MOV x3, sp // Move sp into x3
+ STR x3, [x0, #8] // Save stack pointer in thread control
+ // block
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+
+ /* Save the remaining time-slice and disable it. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w2, [x0, #36] // Save thread's time-slice
+ MOV w2, #0 // Clear value
+ STR w2, [x3, #0] // Disable global time-slice flag
+
+ // }
+__tx_thread_dont_save_ts:
+
+
+ /* Clear the current task pointer. */
+ /* _tx_thread_current_ptr = TX_NULL; */
+
+ MOV x0, #0 // NULL value
+ STR x0, [x1, #0] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ // _tx_thread_schedule();
+
+ // }
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ LDR x1, =_tx_thread_schedule // Build address for _tx_thread_schedule
+#ifdef EL1
+ MSR ELR_EL1, x1 // Setup point of interrupt
+// MOV x1, #0x4 // Setup EL1 return
+// MSR spsr_el1, x1 // Move into SPSR
+#else
+#ifdef EL2
+ MSR ELR_EL2, x1 // Setup point of interrupt
+// MOV x1, #0x8 // Setup EL2 return
+// MSR spsr_el2, x1 // Move into SPSR
+#else
+ MSR ELR_EL3, x1 // Setup point of interrupt
+// MOV x1, #0xC // Setup EL3 return
+// MSR spsr_el3, x1 // Move into SPSR
+#endif
+#endif
+ ERET // Return to scheduler
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_context_save.S b/ports/cortex_a34/gnu/src/tx_thread_context_save.S
new file mode 100644
index 00000000..859a1e44
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_context_save.S
@@ -0,0 +1,216 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_save(VOID)
+// {
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save, @function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ and all other registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+ // if (_tx_thread_system_state++)
+ // {
+
+ STP x0, x1, [sp, #-16]! // Save x0, x1
+ STP x2, x3, [sp, #-16]! // Save x2, x3
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ CMP w2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD w2, w2, #1 // Increment the nested interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x0, SPSR_EL1 // Pickup SPSR
+ MRS x1, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x0, SPSR_EL2 // Pickup SPSR
+ MRS x1, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x0, SPSR_EL3 // Pickup SPSR
+ MRS x1, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x0, x1, [sp, #-16]! // Save SPSR, ELR
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ /* Return to the ISR. */
+
+ RET // Return to ISR
+
+__tx_thread_not_nested_save:
+ // }
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ // else if (_tx_thread_current_ptr)
+ // {
+
+ ADD w2, w2, #1 // Increment the interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x4, SPSR_EL1 // Pickup SPSR
+ MRS x5, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x4, SPSR_EL2 // Pickup SPSR
+ MRS x5, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x4, SPSR_EL3 // Pickup SPSR
+ MRS x5, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x4, x5, [sp, #-16]! // Save SPSR, ELR
+
+ /* Save the current stack pointer in the thread's control block. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+
+ MOV x4, sp //
+ STR x4, [x0, #8] // Save thread stack pointer
+
+ /* Switch to the system stack. */
+ // sp = _tx_thread_system_stack_ptr;
+
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ RET // Return to caller
+
+ // }
+ // else
+ // {
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ ADD sp, sp, #48 // Recover saved registers
+ RET // Continue IRQ processing
+
+ // }
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_fp_disable.c b/ports/cortex_a34/gnu/src/tx_thread_fp_disable.c
new file mode 100644
index 00000000..3e5d7e21
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_fp_disable.c
@@ -0,0 +1,97 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_disable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_FALSE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a34/gnu/src/tx_thread_fp_enable.c b/ports/cortex_a34/gnu/src/tx_thread_fp_enable.c
new file mode 100644
index 00000000..4e69205c
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_fp_enable.c
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_enable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now setup the FP enable flag in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_TRUE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a34/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a34/gnu/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..6a5a7741
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_interrupt_control.S
@@ -0,0 +1,81 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control, @function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x1, DAIF // Pickup current interrupt posture
+
+ /* Apply the new interrupt posture. */
+
+ MSR DAIF, x0 // Set new interrupt posture
+ MOV x0, x1 // Setup return value
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a34/gnu/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..d0062ef8
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,79 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_disable(void)
+// {
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable, @function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x0, DAIF // Pickup current interrupt lockout posture
+
+ /* Mask interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a34/gnu/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..1b6261ba
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,77 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore, @function
+_tx_thread_interrupt_restore:
+
+ /* Restore the old interrupt posture. */
+
+ MSR DAIF, x0 // Setup the old posture
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_schedule.S b/ports/cortex_a34/gnu/src/tx_thread_schedule.S
new file mode 100644
index 00000000..9a7a7262
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_schedule.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_schedule(VOID)
+// {
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule, @function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+ MSR DAIFClr, 0x3 // Enable interrupts
+
+ /* Wait for a thread to execute. */
+ // do
+ // {
+
+ LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+#ifdef TX_ENABLE_WFI
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BNE _tx_thread_schedule_thread //
+ WFI //
+ B __tx_thread_schedule_loop // Keep looking for a thread
+_tx_thread_schedule_thread:
+#else
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+#endif
+
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+ /* Setup the current thread pointer. */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR x0, [x1, #0] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
+
+ LDR w2, [x0, #4] // Pickup run counter
+ LDR w3, [x0, #36] // Pickup time-slice for this thread
+ ADD w2, w2, #1 // Increment thread run-counter
+ STR w2, [x0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ // variable
+ LDR x4, [x0, #8] // Switch stack pointers
+ MOV sp, x4 //
+ STR w3, [x2, #0] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV x19, x0 // Save x0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV x0, x19 // Restore x0
+#endif
+
+ /* Switch to the thread's stack. */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL)
+ BEQ _tx_solicited_return
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_interrupt_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q30, q31, [sp], #32 // Recover q30, q31
+ LDP q28, q29, [sp], #32 // Recover q28, q29
+ LDP q26, q27, [sp], #32 // Recover q26, q27
+ LDP q24, q25, [sp], #32 // Recover q24, q25
+ LDP q22, q23, [sp], #32 // Recover q22, q23
+ LDP q20, q21, [sp], #32 // Recover q20, q21
+ LDP q18, q19, [sp], #32 // Recover q18, q19
+ LDP q16, q17, [sp], #32 // Recover q16, q17
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+ LDP q6, q7, [sp], #32 // Recover q6, q7
+ LDP q4, q5, [sp], #32 // Recover q4, q5
+ LDP q2, q3, [sp], #32 // Recover q2, q3
+ LDP q0, q1, [sp], #32 // Recover q0, q1
+_skip_interrupt_fp_restore:
+#endif
+ LDP x28, x29, [sp], #16 // Recover x28
+ LDP x26, x27, [sp], #16 // Recover x26, x27
+ LDP x24, x25, [sp], #16 // Recover x24, x25
+ LDP x22, x23, [sp], #16 // Recover x22, x23
+ LDP x20, x21, [sp], #16 // Recover x20, x21
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+_tx_solicited_return:
+
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_solicited_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+_skip_solicited_fp_restore:
+#endif
+ LDP x27, x28, [sp], #16 // Recover x27, x28
+ LDP x25, x26, [sp], #16 // Recover x25, x26
+ LDP x23, x24, [sp], #16 // Recover x23, x24
+ LDP x21, x22, [sp], #16 // Recover x21, x22
+ LDP x19, x20, [sp], #16 // Recover x19, x20
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ MSR DAIF, x4 // Recover DAIF
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_stack_build.S b/ports/cortex_a34/gnu/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..5b7e945a
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_stack_build.S
@@ -0,0 +1,158 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build, @function
+_tx_thread_stack_build:
+
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
+ Stack Top: SSPR Initial SSPR
+ ELR Point of interrupt
+ x28 Initial value for x28
+ not used Not used
+ x26 Initial value for x26
+ x27 Initial value for x27
+ x24 Initial value for x24
+ x25 Initial value for x25
+ x22 Initial value for x22
+ x23 Initial value for x23
+ x20 Initial value for x20
+ x21 Initial value for x21
+ x18 Initial value for x18
+ x19 Initial value for x19
+ x16 Initial value for x16
+ x17 Initial value for x17
+ x14 Initial value for x14
+ x15 Initial value for x15
+ x12 Initial value for x12
+ x13 Initial value for x13
+ x10 Initial value for x10
+ x11 Initial value for x11
+ x8 Initial value for x8
+ x9 Initial value for x9
+ x6 Initial value for x6
+ x7 Initial value for x7
+ x4 Initial value for x4
+ x5 Initial value for x5
+ x2 Initial value for x2
+ x3 Initial value for x3
+ x0 Initial value for x0
+ x1 Initial value for x1
+ x29 Initial value for x29 (frame pointer)
+ x30 Initial value for x30 (link register)
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR x4, [x0, #24] // Pickup end of stack area
+ BIC x4, x4, #0xF // Ensure 16-byte alignment
+
+ /* Actually build the stack frame. */
+
+ MOV x2, #0 // Build clear value
+ MOV x3, #0 //
+
+ STP x2, x3, [x4, #-16]! // Set backtrace to 0
+ STP x2, x3, [x4, #-16]! // Set initial x29, x30
+ STP x2, x3, [x4, #-16]! // Set initial x0, x1
+ STP x2, x3, [x4, #-16]! // Set initial x2, x3
+ STP x2, x3, [x4, #-16]! // Set initial x4, x5
+ STP x2, x3, [x4, #-16]! // Set initial x6, x7
+ STP x2, x3, [x4, #-16]! // Set initial x8, x9
+ STP x2, x3, [x4, #-16]! // Set initial x10, x11
+ STP x2, x3, [x4, #-16]! // Set initial x12, x13
+ STP x2, x3, [x4, #-16]! // Set initial x14, x15
+ STP x2, x3, [x4, #-16]! // Set initial x16, x17
+ STP x2, x3, [x4, #-16]! // Set initial x18, x19
+ STP x2, x3, [x4, #-16]! // Set initial x20, x21
+ STP x2, x3, [x4, #-16]! // Set initial x22, x23
+ STP x2, x3, [x4, #-16]! // Set initial x24, x25
+ STP x2, x3, [x4, #-16]! // Set initial x26, x27
+ STP x2, x3, [x4, #-16]! // Set initial x28
+#ifdef EL1
+ MOV x2, #0x4 // Build initial SPSR (EL1)
+#else
+#ifdef EL2
+ MOV x2, #0x8 // Build initial SPSR (EL2)
+#else
+ MOV x2, #0xC // Build initial SPSR (EL3)
+#endif
+#endif
+ MOV x3, x1 // Build initial ELR
+ STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
+
+ /* Setup stack pointer. */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
+
+ STR x4, [x0, #8] // Save stack pointer in thread's
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_thread_system_return.S b/ports/cortex_a34/gnu/src/tx_thread_system_return.S
new file mode 100644
index 00000000..7d42b63d
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_thread_system_return.S
@@ -0,0 +1,151 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_system_return(VOID)
+// {
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return, @function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ MRS x0, DAIF // Pickup DAIF
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+ STP x19, x20, [sp, #-16]! // Save x19, x20
+ STP x21, x22, [sp, #-16]! // Save x21, x22
+ STP x23, x24, [sp, #-16]! // Save x23, x24
+ STP x25, x26, [sp, #-16]! // Save x25, x26
+ STP x27, x28, [sp, #-16]! // Save x27, x28
+ LDR x5, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR x6, [x5, #0] // Pickup current thread pointer
+
+#ifdef ENABLE_ARM_FP
+ LDR w7, [x6, #248] // Pickup FP enable flag
+ CMP w7, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+
+ MOV x1, #0 // Clear x1
+ STP x0, x1, [sp, #-16]! // Save DAIF and clear value for ELR_EK1
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ MOV x19, x5 // Save x5
+ MOV x20, x6 // Save x6
+ BL _tx_execution_thread_exit // Call the thread exit function
+ MOV x5, x19 // Restore x5
+ MOV x6, x20 // Restore x6
+#endif
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR w1, [x2, #0] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
+
+ MOV x4, sp //
+ STR x4, [x6, #8] // Save thread stack pointer
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+ /* Determine if the time-slice is active. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ MOV x4, #0 // Build clear value
+ CMP w1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save the current remaining time-slice. */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w4, [x2, #0] // Clear time-slice
+ STR w1, [x6, #36] // Store current time-slice
+
+ // }
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+ // _tx_thread_current_ptr = TX_NULL;
+
+ STR x4, [x5, #0] // Clear current thread pointer
+
+ B _tx_thread_schedule // Jump to scheduler!
+
+// }
diff --git a/ports/cortex_a34/gnu/src/tx_timer_interrupt.S b/ports/cortex_a34/gnu/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..5810b5c2
--- /dev/null
+++ b/ports/cortex_a34/gnu/src/tx_timer_interrupt.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_timer_interrupt(VOID)
+// {
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt, @function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+ // _tx_timer_system_clock++;
+
+ LDR x1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR w0, [x1, #0] // Pickup system clock
+ ADD w0, w0, #1 // Increment system clock
+ STR w0, [x1, #0] // Store new system clock
+
+ /* Test for time-slice expiration. */
+ /* if (_tx_timer_time_slice)
+ { */
+
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+ /* _tx_timer_time_slice--; */
+
+ SUB w2, w2, #1 // Decrement the time-slice
+ STR w2, [x3, #0] // Store new time-slice value
+
+ /* Check for expiration. */
+ /* if (__tx_timer_time_slice == 0) */
+
+ CMP w2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+ /* _tx_timer_expired_time_slice = TX_TRUE; */
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV w0, #1 // Build expired value
+ STR w0, [x3, #0] // Set time-slice expiration flag
+
+ /* } */
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+ // if (*_tx_timer_current_ptr)
+ // {
+
+ LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
+ LDR x0, [x1, #0] // Pickup current timer
+ LDR x2, [x0, #0] // Pickup timer list entry
+ CMP x2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+ // _tx_timer_expired = TX_TRUE;
+
+ LDR x3, =_tx_timer_expired // Pickup expiration flag address
+ MOV w2, #1 // Build expired value
+ STR w2, [x3, #0] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+ // }
+ // else
+ // {
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ // _tx_timer_current_ptr++;
+
+ ADD x0, x0, #8 // Move to next timer
+
+ /* Check for wrap-around. */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
+
+ LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
+ LDR x2, [x3, #0] // Pickup list end
+ CMP x0, x2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wrap-around logic
+
+ /* Wrap to beginning of list. */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
+
+ LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
+ LDR x0, [x3, #0] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR x0, [x1, #0] // Store new current timer pointer
+ // }
+
+__tx_timer_done:
+
+
+ /* See if anything has expired. */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
+ LDR w2, [x3, #0] // Pickup time-slice expired flag
+ CMP w2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR x1, =_tx_timer_expired // Pickup addr of other expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+
+ /* Did a timer expire? */
+ // if (_tx_timer_expired)
+ // {
+
+ LDR x1, =_tx_timer_expired // Pickup addr of expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ // _tx_timer_expiration_process();
+
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+ // }
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+ // if (_tx_timer_expired_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR w2, [x3, #0] // Pickup the actual flag
+ CMP w2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+ // _tx_thread_time_slice();
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+ // }/
+
+__tx_timer_not_ts_expiration:
+
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ // }
+
+__tx_timer_nothing_expired:
+
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3.h b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3.h
index 23bc7fd8..dfe37586 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3.h
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#ifndef GICV3_h
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_aliases.h b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_aliases.h
new file mode 100644
index 00000000..826ba973
--- /dev/null
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_aliases.h
@@ -0,0 +1,113 @@
+//
+// Aliases for GICv3 registers
+//
+// Copyright (c) 2016-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef GICV3_ALIASES_H
+#define GICV3_ALIASES_H
+
+#ifndef __clang__
+
+/*
+ * Mapping of MSR and MRS to physical and virtual CPU interface registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-5
+ */
+#define ICC_AP0R0_EL1 S3_0_C12_C8_4
+#define ICC_AP0R1_EL1 S3_0_C12_C8_5
+#define ICC_AP0R2_EL1 S3_0_C12_C8_6
+#define ICC_AP0R3_EL1 S3_0_C12_C8_7
+
+#define ICC_AP1R0_EL1 S3_0_C12_C9_0
+#define ICC_AP1R1_EL1 S3_0_C12_C9_1
+#define ICC_AP1R2_EL1 S3_0_C12_C9_2
+#define ICC_AP1R3_EL1 S3_0_C12_C9_3
+
+#define ICC_ASGI1R_EL1 S3_0_C12_C11_6
+
+#define ICC_BPR0_EL1 S3_0_C12_C8_3
+#define ICC_BPR1_EL1 S3_0_C12_C12_3
+
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+
+#define ICC_DIR_EL1 S3_0_C12_C11_1
+
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+#define ICC_SGI1R_EL1 S3_0_C12_C11_5
+
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+
+/*
+ * Mapping of MSR and MRS to virtual interface control registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-6
+ */
+#define ICH_AP0R0_EL2 S3_4_C12_C8_0
+#define ICH_AP0R1_EL2 S3_4_C12_C8_1
+#define ICH_AP0R2_EL2 S3_4_C12_C8_2
+#define ICH_AP0R3_EL2 S3_4_C12_C8_3
+
+#define ICH_AP1R0_EL2 S3_4_C12_C9_0
+#define ICH_AP1R1_EL2 S3_4_C12_C9_1
+#define ICH_AP1R2_EL2 S3_4_C12_C9_2
+#define ICH_AP1R3_EL2 S3_4_C12_C9_3
+
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+
+#define ICH_VTR_EL2 S3_4_C12_C11_1
+
+#define ICH_MISR_EL2 S3_4_C12_C11_2
+
+#define ICH_EISR_EL2 S3_4_C12_C11_3
+
+#define ICH_ELRSR_EL2 S3_4_C12_C11_5
+
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+
+#define ICH_LR0_EL2 S3_4_C12_C12_0
+#define ICH_LR1_EL2 S3_4_C12_C12_1
+#define ICH_LR2_EL2 S3_4_C12_C12_2
+#define ICH_LR3_EL2 S3_4_C12_C12_3
+#define ICH_LR4_EL2 S3_4_C12_C12_4
+#define ICH_LR5_EL2 S3_4_C12_C12_5
+#define ICH_LR6_EL2 S3_4_C12_C12_6
+#define ICH_LR7_EL2 S3_4_C12_C12_7
+#define ICH_LR8_EL2 S3_4_C12_C13_0
+#define ICH_LR9_EL2 S3_4_C12_C13_1
+#define ICH_LR10_EL2 S3_4_C12_C13_2
+#define ICH_LR11_EL2 S3_4_C12_C13_3
+#define ICH_LR12_EL2 S3_4_C12_C13_4
+#define ICH_LR13_EL2 S3_4_C12_C13_5
+#define ICH_LR14_EL2 S3_4_C12_C13_6
+#define ICH_LR15_EL2 S3_4_C12_C13_7
+
+#endif /* not __clang__ */
+
+#endif /* GICV3_ALIASES */
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicc.h
index 8e6f0acc..998d92b5 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicc.h
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicc.h
@@ -3,12 +3,17 @@
*
* Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#ifndef GICV3_gicc_h
#define GICV3_gicc_h
+#include "GICv3_aliases.h"
+
+#define stringify_no_expansion(x) #x
+#define stringify(x) stringify_no_expansion(x)
+
/**********************************************************************/
typedef enum
@@ -21,42 +26,42 @@ typedef enum
static inline void setICC_SRE_EL1(ICC_SREBits_t mode)
{
- asm("msr ICC_SRE_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_SRE_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline uint64_t getICC_SRE_EL1(void)
{
uint64_t retc;
- asm("mrs %0, ICC_SRE_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_SRE_EL1)"\n" : "=r" (retc));
return retc;
}
static inline void setICC_SRE_EL2(ICC_SREBits_t mode)
{
- asm("msr ICC_SRE_EL2, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_SRE_EL2)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline uint64_t getICC_SRE_EL2(void)
{
uint64_t retc;
- asm("mrs %0, ICC_SRE_EL2\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_SRE_EL2)"\n" : "=r" (retc));
return retc;
}
static inline void setICC_SRE_EL3(ICC_SREBits_t mode)
{
- asm("msr ICC_SRE_EL3, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_SRE_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline uint64_t getICC_SRE_EL3(void)
{
uint64_t retc;
- asm("mrs %0, ICC_SRE_EL3\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_SRE_EL3)"\n" : "=r" (retc));
return retc;
}
@@ -72,17 +77,17 @@ typedef enum
static inline void setICC_IGRPEN0_EL1(ICC_IGRPBits_t mode)
{
- asm("msr ICC_IGRPEN0_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_IGRPEN0_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline void setICC_IGRPEN1_EL1(ICC_IGRPBits_t mode)
{
- asm("msr ICC_IGRPEN1_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_IGRPEN1_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline void setICC_IGRPEN1_EL3(ICC_IGRPBits_t mode)
{
- asm("msr ICC_IGRPEN1_EL3, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_IGRPEN1_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
}
/**********************************************************************/
@@ -102,28 +107,28 @@ typedef enum
static inline void setICC_CTLR_EL1(ICC_CTLRBits_t mode)
{
- asm("msr ICC_CTLR_EL1, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_CTLR_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline uint64_t getICC_CTLR_EL1(void)
{
uint64_t retc;
- asm("mrs %0, ICC_CTLR_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_CTLR_EL1)"\n" : "=r" (retc));
return retc;
}
static inline void setICC_CTLR_EL3(ICC_CTLRBits_t mode)
{
- asm("msr ICC_CTLR_EL3, %0\n; isb" :: "r" ((uint64_t)mode));
+ asm("msr "stringify(ICC_CTLR_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
}
static inline uint64_t getICC_CTLR_EL3(void)
{
uint64_t retc;
- asm("mrs %0, ICC_CTLR_EL3\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_CTLR_EL3)"\n" : "=r" (retc));
return retc;
}
@@ -134,7 +139,7 @@ static inline uint64_t getICC_IAR0(void)
{
uint64_t retc;
- asm("mrs %0, ICC_IAR0_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_IAR0_EL1)"\n" : "=r" (retc));
return retc;
}
@@ -143,46 +148,46 @@ static inline uint64_t getICC_IAR1(void)
{
uint64_t retc;
- asm("mrs %0, ICC_IAR1_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_IAR1_EL1)"\n" : "=r" (retc));
return retc;
}
static inline void setICC_EOIR0(uint32_t interrupt)
{
- asm("msr ICC_EOIR0_EL1, %0\n; isb" :: "r" ((uint64_t)interrupt));
+ asm("msr "stringify(ICC_EOIR0_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
}
static inline void setICC_EOIR1(uint32_t interrupt)
{
- asm("msr ICC_EOIR1_EL1, %0\n; isb" :: "r" ((uint64_t)interrupt));
+ asm("msr "stringify(ICC_EOIR1_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
}
static inline void setICC_DIR(uint32_t interrupt)
{
- asm("msr ICC_DIR_EL1, %0\n; isb" :: "r" ((uint64_t)interrupt));
+ asm("msr "stringify(ICC_DIR_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
}
static inline void setICC_PMR(uint32_t priority)
{
- asm("msr ICC_PMR_EL1, %0\n; isb" :: "r" ((uint64_t)priority));
+ asm("msr "stringify(ICC_PMR_EL1)", %0\n; isb" :: "r" ((uint64_t)priority));
}
static inline void setICC_BPR0(uint32_t binarypoint)
{
- asm("msr ICC_BPR0_EL1, %0\n; isb" :: "r" ((uint64_t)binarypoint));
+ asm("msr "stringify(ICC_BPR0_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
}
static inline void setICC_BPR1(uint32_t binarypoint)
{
- asm("msr ICC_BPR1_EL1, %0\n; isb" :: "r" ((uint64_t)binarypoint));
+ asm("msr "stringify(ICC_BPR1_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
}
static inline uint64_t getICC_BPR0(void)
{
uint64_t retc;
- asm("mrs %0, ICC_BPR0_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_BPR0_EL1)"\n" : "=r" (retc));
return retc;
}
@@ -191,7 +196,7 @@ static inline uint64_t getICC_BPR1(void)
{
uint64_t retc;
- asm("mrs %0, ICC_BPR1_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_BPR1_EL1)"\n" : "=r" (retc));
return retc;
}
@@ -200,7 +205,7 @@ static inline uint64_t getICC_RPR(void)
{
uint64_t retc;
- asm("mrs %0, ICC_RPR_EL1\n" : "=r" (retc));
+ asm("mrs %0, "stringify(ICC_RPR_EL1)"\n" : "=r" (retc));
return retc;
}
@@ -221,7 +226,7 @@ static inline void setICC_SGI0R(uint8_t aff3, uint8_t aff2,
((uint64_t)aff1 << 16) | irm | targetlist | \
((uint64_t)(intid & 0x0f) << 24));
- asm("msr ICC_SGI0R_EL1, %0\n; isb" :: "r" (packedbits));
+ asm("msr "stringify(ICC_SGI0R_EL1)", %0\n; isb" :: "r" (packedbits));
}
static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
@@ -232,7 +237,7 @@ static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
((uint64_t)aff1 << 16) | irm | targetlist | \
((uint64_t)(intid & 0x0f) << 24));
- asm("msr ICC_SGI1R_EL1, %0\n; isb" :: "r" (packedbits));
+ asm("msr "stringify(ICC_SGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
}
static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
@@ -243,7 +248,7 @@ static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
((uint64_t)aff1 << 16) | irm | targetlist | \
((uint64_t)(intid & 0x0f) << 24));
- asm("msr ICC_ASGI1R_EL1, %0\n; isb" :: "r" (packedbits));
+ asm("msr "stringify(ICC_ASGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
}
#endif /* ndef GICV3_gicc_h */
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicd.c
index 3bfb4a93..2cf1553b 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicd.c
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicd.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#include
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicr.c
index 7b437b18..d91aeb27 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicr.c
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/GICv3_gicr.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#include "GICv3.h"
@@ -293,7 +293,7 @@ void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
uint32_t groupmod;
/*
- * get each bit of group config duplicated over all 32 bits
+ * get each bit of group config duplicated over all 32-bits
*/
groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
group = (uint32_t)(((int32_t)group << nbits) >> 31);
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a35/ac6/example_build/sample_threadx/MP_Mutexes.S
index e7f95aa7..c787c3f5 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/MP_Mutexes.S
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/MP_Mutexes.S
@@ -4,7 +4,7 @@
//
// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a35/ac6/example_build/sample_threadx/PPM_AEM.h
index 52c9a0fe..f7501eeb 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/PPM_AEM.h
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/PPM_AEM.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.c
index 8898ff39..17cceb01 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.c
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.c
@@ -1,14 +1,24 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
-#include
-#define DEMO_STACK_SIZE 2048
-#define DEMO_BYTE_POOL_SIZE 64000
-#define DEMO_BLOCK_POOL_SIZE 100
-#define DEMO_QUEUE_SIZE 100
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
/* Define the ThreadX object control blocks... */
@@ -21,6 +31,7 @@ TX_THREAD thread_4;
TX_THREAD thread_5;
TX_THREAD thread_6;
TX_THREAD thread_7;
+TX_TIMER timer_0;
TX_QUEUE queue_0;
TX_SEMAPHORE semaphore_0;
TX_MUTEX mutex_0;
@@ -43,8 +54,6 @@ ULONG thread_6_counter;
ULONG thread_7_counter;
-UCHAR memory_pool[DEMO_BYTE_POOL_SIZE];
-
/* Define thread prototypes. */
void thread_0_entry(ULONG thread_input);
@@ -54,18 +63,26 @@ void thread_3_and_4_entry(ULONG thread_input);
void thread_5_entry(ULONG thread_input);
void thread_6_and_7_entry(ULONG thread_input);
-void init_timer();
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
/* Define main entry point. */
-int main()
+int main(void)
{
- /* Initialize timer for ThreadX. */
- init_timer();
+ /* Initialize timer. */
+ init_timer();
- /* Enter the ThreadX kernel. */
+ /* Enter ThreadX. */
tx_kernel_enter();
+
+ return 0;
}
@@ -74,55 +91,56 @@ int main()
void tx_application_define(void *first_unused_memory)
{
-UCHAR *pointer = TX_NULL;
+CHAR *pointer = TX_NULL;
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
/* Create a byte memory pool from which to allocate the thread stacks. */
- tx_byte_pool_create(&byte_pool_0, "byte pool 0", memory_pool, DEMO_BYTE_POOL_SIZE);
-
- /* Put system definition stuff in here, e.g. thread creates and other assorted
- create information. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
/* Allocate the stack for thread 0. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
-
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -130,23 +148,23 @@ UCHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -178,7 +196,6 @@ UCHAR *pointer = TX_NULL;
}
-
/* Define the test threads. */
void thread_0_entry(ULONG thread_input)
@@ -239,7 +256,6 @@ void thread_2_entry(ULONG thread_input)
ULONG received_message;
UINT status;
-
/* This thread retrieves messages placed on the queue by thread 1. */
while(1)
{
@@ -250,11 +266,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -313,7 +329,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -366,7 +382,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.launch
index f0856def..4aff966d 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.launch
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -5,66 +5,67 @@
-
-
+
+
-
+
-
+
-
-
+
+
-
-
+
+
-
+
-
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
+
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
+
@@ -84,29 +85,29 @@
-
+
-
+
-
+
-
+
-
+
-
+
@@ -147,6 +148,8 @@
+
+
@@ -178,7 +181,7 @@
-
+
@@ -223,15 +226,16 @@
-
-
+
+
-
+
+
@@ -278,7 +282,7 @@
-
+
@@ -296,15 +300,19 @@
-
+
-
+
+
+
+
+
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.c
index 4dc009b2..c2ce6faa 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.c
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.c
@@ -3,7 +3,7 @@
//
// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.h
index 777062cc..4d423904 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.h
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/sp804_timer.h
@@ -4,7 +4,7 @@
//
// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a35/ac6/example_build/sample_threadx/startup.S
index de100e56..3952a200 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/startup.S
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/startup.S
@@ -7,13 +7,13 @@
//
// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#include "v8_mmu.h"
#include "v8_system.h"
-
+#include "GICv3_aliases.h"
.section StartUp, "ax"
.balign 4
@@ -328,7 +328,7 @@ el1_entry_aarch64:
//
// Cortex-A processors automatically invalidate their caches on reset
// (unless suppressed with the DBGL1RSTDISABLE or L2RSTDISABLE pins).
- // It is therefore not necessary for software to invalidate the caches
+ // It is therefore not necessary for software to invalidate the caches
// on startup, however, this is done here in case of a warm reset.
bl InvalidateUDCaches
tlbi VMALLE1
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_aarch64.S
index f8db3bfe..45445a98 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_aarch64.S
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_aarch64.S
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_mmu.h
index ee8834fa..bce62b54 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_mmu.h
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_mmu.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_system.h b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_system.h
index ff96deff..a62d2a33 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_system.h
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_system.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_utils.S b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_utils.S
index f0fcef26..888892a0 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/v8_utils.S
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/v8_utils.S
@@ -3,7 +3,7 @@
//
// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/ac6/example_build/sample_threadx/vectors.S b/ports/cortex_a35/ac6/example_build/sample_threadx/vectors.S
index 9e60e001..7784f98e 100644
--- a/ports/cortex_a35/ac6/example_build/sample_threadx/vectors.S
+++ b/ports/cortex_a35/ac6/example_build/sample_threadx/vectors.S
@@ -3,7 +3,7 @@
//
// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/ac6/inc/tx_port.h b/ports/cortex_a35/ac6/inc/tx_port.h
index a6bc8a6a..33bccbf1 100644
--- a/ports/cortex_a35/ac6/inc/tx_port.h
+++ b/ports/cortex_a35/ac6/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,36 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A35/AC6 */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
-/* resulting in version 6.1.6 */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -63,7 +63,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +76,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -85,9 +85,10 @@ typedef int INT;
typedef unsigned int UINT;
typedef int LONG;
typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
-
+#define ULONG64_DEFINED
/* Override the alignment type to use 64-bit alignment and storage for pointers. */
@@ -123,19 +124,19 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -143,8 +144,15 @@ typedef unsigned long long ALIGN_TYPE;
*/
+#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -172,35 +180,41 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_FIQ_NESTING_ENABLED 0
#endif
-#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
#define TX_INLINE_INITIALIZATION
+#endif
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
+#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
+#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -214,11 +228,11 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -226,8 +240,8 @@ typedef unsigned long long ALIGN_TYPE;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -254,8 +268,8 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#ifndef TX_DISABLE_INLINE
@@ -267,7 +281,7 @@ typedef unsigned long long ALIGN_TYPE;
/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
can figure out what thread timeout to process. */
-
+
#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
@@ -283,9 +297,9 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -331,18 +345,6 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define FP extension for the Cortex-A5x. Each is assumed to be called in the context of the executing
- thread. */
-
-#ifndef TX_SOURCE_CODE
-#define tx_thread_fp_enable _tx_thread_fp_enable
-#define tx_thread_fp_disable _tx_thread_fp_disable
-#endif
-
-VOID tx_thread_fp_enable(VOID);
-VOID tx_thread_fp_disable(VOID);
-
-
/* Define the interrupt lockout macros for each ThreadX object. */
#define TX_BLOCK_POOL_DISABLE TX_DISABLE
@@ -353,18 +355,25 @@ VOID tx_thread_fp_disable(VOID);
#define TX_SEMAPHORE_DISABLE TX_DISABLE
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A35/AC6 Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
#else
extern CHAR _tx_version_id[];
#endif
#endif
-
-
-
-
diff --git a/ports/cortex_a35/ac6/src/tx_initialize_low_level.S b/ports/cortex_a35/ac6/src/tx_initialize_low_level.S
index f7843ed3..d0b541f1 100644
--- a/ports/cortex_a35/ac6/src/tx_initialize_low_level.S
+++ b/ports/cortex_a35/ac6/src/tx_initialize_low_level.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Initialize */
/** */
@@ -21,63 +21,53 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_initialize.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_initialize_low_level Cortex-A35/AC6 */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for any low-level processor */
-/* initialization, including setting up interrupt vectors, setting */
-/* up a periodic timer interrupt source, saving the system stack */
-/* pointer for use in ISR processing later, and finding the first */
-/* available RAM memory address for tx_application_define. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_initialize_kernel_enter ThreadX entry function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* VOID _tx_initialize_low_level(VOID)
-{ */
+// VOID _tx_initialize_low_level(VOID)
+// {
.global _tx_initialize_low_level
.type _tx_initialize_low_level, @function
_tx_initialize_low_level:
@@ -86,15 +76,16 @@ _tx_initialize_low_level:
/* Save the system stack pointer. */
- /* _tx_thread_system_stack_ptr = (VOID_PTR) (sp); */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
MOV x1, sp // Pickup SP
+ SUB x1, x1, #15 //
BIC x1, x1, #0xF // Get 16-bit alignment
STR x1, [x0] // Store system stack
/* Save the first available memory address. */
- /* _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit; */
+ // _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit;
LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
LDR x1, =zi_limit // Pickup unused memory address
@@ -104,7 +95,7 @@ _tx_initialize_low_level:
/* Done, return to caller. */
RET // Return to caller
-/* } */
+// }
zi_limit:
diff --git a/ports/cortex_a35/ac6/src/tx_thread_context_restore.S b/ports/cortex_a35/ac6/src/tx_thread_context_restore.S
index 72e32baf..994c404d 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_context_restore.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_context_restore.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,66 +21,50 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
-/* .set ENABLE_ARM_FP,1 */
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_context_restore Cortex-A35/AC6 */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function restores the interrupt context if it is processing a */
-/* nested interrupt. If not, it returns to the interrupt thread if no */
-/* preemption is necessary. Otherwise, if preemption is necessary or */
-/* if no thread was running, the function returns to the scheduler. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling routine */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs Interrupt Service Routines */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_context_restore(VOID)
-{ */
+// VOID _tx_thread_context_restore(VOID)
+// {
.global _tx_thread_context_restore
.type _tx_thread_context_restore, @function
_tx_thread_context_restore:
@@ -97,19 +81,19 @@ _tx_thread_context_restore:
#endif
/* Determine if interrupts are nested. */
- /* if (--_tx_thread_system_state)
- { */
+ // if (--_tx_thread_system_state)
+ // {
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, #0] // Pickup system state
SUB w2, w2, #1 // Decrement the counter
- STR w2, [x3, #0] // Store the counter
+ STR w2, [x3, #0] // Store the counter
CMP w2, #0 // Was this the first interrupt?
BEQ __tx_thread_not_nested_restore // If so, not a nested restore
/* Interrupts are nested. */
- /* Just recover the saved registers and return to the point of
+ /* Just recover the saved registers and return to the point of
interrupt. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
@@ -138,13 +122,13 @@ _tx_thread_context_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
- /* } */
+ // }
__tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
- /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
- || (_tx_thread_preempt_disable))
- { */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, #0] // Pickup actual current thread pointer
@@ -166,7 +150,7 @@ __tx_thread_no_preempt_restore:
/* Restore interrupted thread or ISR. */
/* Pickup the saved stack pointer. */
- /* sp = _tx_thread_current_ptr -> tx_thread_stack_ptr; */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -199,9 +183,9 @@ __tx_thread_no_preempt_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_thread_preempt_restore:
LDR x4, [x0, #8] // Switch to thread stack pointer
@@ -249,22 +233,22 @@ _skip_fp_save:
/* Save the remaining time-slice and disable it. */
- /* if (_tx_timer_time_slice)
- { */
+ // if (_tx_timer_time_slice)
+ // {
LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
LDR w2, [x3, #0] // Pickup time-slice
CMP w2, #0 // Is it active?
BEQ __tx_thread_dont_save_ts // No, don't save it
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
STR w2, [x3, #0] // Disable global time-slice flag
- /* } */
+ // }
__tx_thread_dont_save_ts:
@@ -275,9 +259,9 @@ __tx_thread_dont_save_ts:
STR x0, [x1, #0] // Clear current thread pointer
/* Return to the scheduler. */
- /* _tx_thread_schedule(); */
+ // _tx_thread_schedule();
- /* } */
+ // }
__tx_thread_idle_system_restore:
@@ -300,6 +284,4 @@ __tx_thread_idle_system_restore:
#endif
#endif
ERET // Return to scheduler
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_context_save.S b/ports/cortex_a35/ac6/src/tx_thread_context_save.S
index f36a8c7a..859a1e44 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_context_save.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_context_save.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,73 +21,60 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_context_save Cortex-A35/AC6 */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function saves the context of an executing thread in the */
-/* beginning of interrupt processing. The function also ensures that */
-/* the system stack is used upon return to the calling ISR. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_context_save(VOID)
-{ */
+// VOID _tx_thread_context_save(VOID)
+// {
.global _tx_thread_context_save
.type _tx_thread_context_save, @function
_tx_thread_context_save:
/* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
- out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
and all other registers are intact. */
/* Check for a nested interrupt condition. */
- /* if (_tx_thread_system_state++)
- { */
+ // if (_tx_thread_system_state++)
+ // {
STP x0, x1, [sp, #-16]! // Save x0, x1
STP x2, x3, [sp, #-16]! // Save x2, x3
@@ -140,18 +127,18 @@ _tx_thread_context_save:
RET // Return to ISR
__tx_thread_not_nested_save:
- /* } */
+ // }
/* Otherwise, not nested, check to see if a thread was running. */
- /* else if (_tx_thread_current_ptr)
- { */
+ // else if (_tx_thread_current_ptr)
+ // {
ADD w2, w2, #1 // Increment the interrupt counter
STR w2, [x3, #0] // Store it back in the variable
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, #0] // Pickup current thread pointer
CMP x0, #0 // Is it NULL?
- BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
// scheduling loop - nothing needs saving!
/* Save minimal context of interrupted thread. */
@@ -179,19 +166,19 @@ __tx_thread_not_nested_save:
STP x4, x5, [sp, #-16]! // Save SPSR, ELR
/* Save the current stack pointer in the thread's control block. */
- /* _tx_thread_current_ptr -> tx_thread_stack_ptr = sp; */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
MOV x4, sp //
STR x4, [x0, #8] // Save thread stack pointer
/* Switch to the system stack. */
- /* sp = _tx_thread_system_stack_ptr; */
+ // sp = _tx_thread_system_stack_ptr;
- LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
LDR x4, [x3, #0] // Pickup system stack pointer
MOV sp, x4 // Setup system stack pointer
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the ISR enter function to indicate an ISR is executing. */
@@ -200,17 +187,17 @@ __tx_thread_not_nested_save:
LDP x29, x30, [sp], #16 // Recover x29, x30
#endif
- RET // Return to caller
+ RET // Return to caller
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_thread_idle_system_save:
/* Interrupt occurred in the scheduling loop. */
- /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
processing. */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
@@ -223,9 +210,7 @@ __tx_thread_idle_system_save:
#endif
ADD sp, sp, #48 // Recover saved registers
- RET // Continue IRQ processing
-
- /* }
-} */
-
+ RET // Continue IRQ processing
+ // }
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_fp_disable.c b/ports/cortex_a35/ac6/src/tx_thread_fp_disable.c
index 2b7a0aac..3e5d7e21 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_fp_disable.c
+++ b/ports/cortex_a35/ac6/src/tx_thread_fp_disable.c
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -29,41 +29,43 @@
#include "tx_thread.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fp_disable Cortex-A35/AC6 */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function disables the FP for the currently executing thread. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_thread_fp_disable(VOID)
@@ -82,14 +84,14 @@ ULONG system_state;
/* Make sure it is not NULL. */
if (thread_ptr != TX_NULL)
{
-
+
/* Thread is running... make sure the call is from the thread context. */
if (system_state == 0)
{
-
+
/* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
thread_ptr -> tx_thread_fp_enable = TX_FALSE;
}
}
-}
+}
diff --git a/ports/cortex_a35/ac6/src/tx_thread_fp_enable.c b/ports/cortex_a35/ac6/src/tx_thread_fp_enable.c
index 431d5598..4e69205c 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_fp_enable.c
+++ b/ports/cortex_a35/ac6/src/tx_thread_fp_enable.c
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -28,42 +28,43 @@
#include "tx_api.h"
#include "tx_thread.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fp_enable Cortex-A35/AC6 */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function enabled the FP for the currently executing thread. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_thread_fp_enable(VOID)
@@ -82,14 +83,14 @@ ULONG system_state;
/* Make sure it is not NULL. */
if (thread_ptr != TX_NULL)
{
-
+
/* Thread is running... make sure the call is from the thread context. */
if (system_state == 0)
{
-
+
/* Yes, now setup the FP enable flag in the TX_THREAD structure. */
thread_ptr -> tx_thread_fp_enable = TX_TRUE;
}
}
-}
+}
diff --git a/ports/cortex_a35/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a35/ac6/src/tx_thread_interrupt_control.S
index b177e05b..6a5a7741 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_interrupt_control.S
@@ -12,66 +12,59 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/*#define TX_SOURCE_CODE */
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_control Cortex-A35/AC6 */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for changing the interrupt lockout */
-/* posture of the system. */
-/* */
-/* INPUT */
-/* */
-/* new_posture New interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* UINT _tx_thread_interrupt_control(UINT new_posture)
-{ */
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control, @function
_tx_thread_interrupt_control:
@@ -85,5 +78,4 @@ _tx_thread_interrupt_control:
MSR DAIF, x0 // Set new interrupt posture
MOV x0, x1 // Setup return value
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a35/ac6/src/tx_thread_interrupt_disable.S
index 11846ef0..d0062ef8 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_interrupt_disable.S
@@ -12,65 +12,58 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_disable Cortex-A35/AC6 */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for disabling interrupts */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* UINT _tx_thread_interrupt_disable(void)
-{ */
+// UINT _tx_thread_interrupt_disable(void)
+// {
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable, @function
_tx_thread_interrupt_disable:
@@ -83,5 +76,4 @@ _tx_thread_interrupt_disable:
MSR DAIFSet, 0x3 // Lockout interrupts
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a35/ac6/src/tx_thread_interrupt_restore.S
index 8c8bb1b7..1b6261ba 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_interrupt_restore.S
@@ -12,66 +12,59 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_restore Cortex-A35/AC6 */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
+/* */
/* This function is responsible for restoring interrupts to the state */
/* returned by a previous _tx_thread_interrupt_disable call. */
-/* */
-/* INPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* UINT _tx_thread_interrupt_restore(UINT old_posture)
-{ */
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore, @function
_tx_thread_interrupt_restore:
@@ -81,5 +74,4 @@ _tx_thread_interrupt_restore:
MSR DAIF, x0 // Setup the old posture
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_schedule.S b/ports/cortex_a35/ac6/src/tx_thread_schedule.S
index fb1411fa..9a7a7262 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_schedule.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,67 +21,54 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
-/* .set ENABLE_ARM_FP,1 */
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_schedule Cortex-A35/AC6 */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function waits for a thread control block pointer to appear in */
-/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-/* in the variable, the corresponding thread is resumed. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_initialize_kernel_enter ThreadX entry function */
-/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_schedule(VOID)
-{ */
+// VOID _tx_thread_schedule(VOID)
+// {
.global _tx_thread_schedule
.type _tx_thread_schedule, @function
_tx_thread_schedule:
@@ -91,17 +78,17 @@ _tx_thread_schedule:
MSR DAIFClr, 0x3 // Enable interrupts
/* Wait for a thread to execute. */
- /* do
- { */
-
+ // do
+ // {
+
LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
#ifdef TX_ENABLE_WFI
__tx_thread_schedule_loop:
LDR x0, [x1, #0] // Pickup next thread to execute
CMP x0, #0 // Is it NULL?
- BNE _tx_thread_schedule_thread //
- WFI //
+ BNE _tx_thread_schedule_thread //
+ WFI //
B __tx_thread_schedule_loop // Keep looking for a thread
_tx_thread_schedule_thread:
#else
@@ -111,22 +98,22 @@ __tx_thread_schedule_loop:
BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
#endif
- /* }
- while(_tx_thread_execute_ptr == TX_NULL); */
-
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
/* Yes! We have a thread to execute. Lockout interrupts and
transfer control to it. */
MSR DAIFSet, 0x3 // Lockout interrupts
/* Setup the current thread pointer. */
- /* _tx_thread_current_ptr = _tx_thread_execute_ptr; */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
- LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
STR x0, [x1, #0] // Setup current thread pointer
/* Increment the run count for this thread. */
- /* _tx_thread_current_ptr -> tx_thread_run_count++; */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
LDR w2, [x0, #4] // Pickup run counter
LDR w3, [x0, #36] // Pickup time-slice for this thread
@@ -134,9 +121,9 @@ __tx_thread_schedule_loop:
STR w2, [x0, #4] // Store the new run counter
/* Setup time-slice, if present. */
- /* _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice; */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
- LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
// variable
LDR x4, [x0, #8] // Switch stack pointers
MOV sp, x4 //
@@ -152,7 +139,7 @@ __tx_thread_schedule_loop:
#endif
/* Switch to the thread's stack. */
- /* sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr; */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
/* Determine if an interrupt frame or a synchronous task suspension frame
is present. */
@@ -237,7 +224,5 @@ _skip_solicited_fp_restore:
LDP x19, x20, [sp], #16 // Recover x19, x20
LDP x29, x30, [sp], #16 // Recover x29, x30
MSR DAIF, x4 // Recover DAIF
- RET // Return to caller
-/* } */
-
-
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_stack_build.S b/ports/cortex_a35/ac6/src/tx_thread_stack_build.S
index 06007fca..5b7e945a 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_stack_build.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_stack_build.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,69 +21,59 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_stack_build Cortex-A35/AC6 */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
+/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
-/* function pointer. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread control blk */
-/* function_ptr Pointer to return function */
-/* */
-/* OUTPUT */
-/* */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* CALLS */
+/* */
/* None */
-/* */
-/* CALLED BY */
-/* */
+/* */
+/* CALLED BY */
+/* */
/* _tx_thread_create Create thread service */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-{ */
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
.global _tx_thread_stack_build
.type _tx_thread_stack_build, @function
_tx_thread_stack_build:
-
- /* Build a fake interrupt frame. The form of the fake interrupt stack
- on the Cortex-A5x should look like the following after it is built:
-
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
Stack Top: SSPR Initial SSPR
ELR Point of interrupt
x28 Initial value for x28
@@ -129,7 +119,7 @@ _tx_thread_stack_build:
MOV x2, #0 // Build clear value
MOV x3, #0 //
-
+
STP x2, x3, [x4, #-16]! // Set backtrace to 0
STP x2, x3, [x4, #-16]! // Set initial x29, x30
STP x2, x3, [x4, #-16]! // Set initial x0, x1
@@ -160,11 +150,9 @@ _tx_thread_stack_build:
STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
/* Setup stack pointer. */
- /* thread_ptr -> tx_thread_stack_ptr = x2; */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
STR x4, [x0, #8] // Save stack pointer in thread's
RET // Return to caller
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_thread_system_return.S b/ports/cortex_a35/ac6/src/tx_thread_system_return.S
index 5e338bb1..7d42b63d 100644
--- a/ports/cortex_a35/ac6/src/tx_thread_system_return.S
+++ b/ports/cortex_a35/ac6/src/tx_thread_system_return.S
@@ -12,80 +12,65 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
-/* .set ENABLE_ARM_FP,1 */
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_system_return Cortex-A35/AC6 */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is target processor specific. It is used to transfer */
-/* control from a thread back to the ThreadX system. Only a */
-/* minimal context is saved since the compiler assumes temp registers */
-/* are going to get slicked by a function call anyway. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling loop */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX components */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_system_return(VOID)
-{ */
+// VOID _tx_thread_system_return(VOID)
+// {
.global _tx_thread_system_return
.type _tx_thread_system_return, @function
_tx_thread_system_return:
-;
-; /* Save minimal context on the stack. */
-;
+
+ /* Save minimal context on the stack. */
+
MRS x0, DAIF // Pickup DAIF
MSR DAIFSet, 0x3 // Lockout interrupts
STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
@@ -129,8 +114,8 @@ _skip_fp_save:
LDR w1, [x2, #0] // Pickup current time slice
/* Save current stack and switch to system stack. */
- /* _tx_thread_current_ptr -> tx_thread_stack_ptr = sp; */
- /* sp = _tx_thread_system_stack_ptr; */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
MOV x4, sp //
STR x4, [x6, #8] // Save thread stack pointer
@@ -139,30 +124,28 @@ _skip_fp_save:
MOV sp, x4 // Setup system stack pointer
/* Determine if the time-slice is active. */
- /* if (_tx_timer_time_slice)
- { */
+ // if (_tx_timer_time_slice)
+ // {
MOV x4, #0 // Build clear value
CMP w1, #0 // Is a time-slice active?
BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
/* Save the current remaining time-slice. */
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w4, [x2, #0] // Clear time-slice
STR w1, [x6, #36] // Store current time-slice
- /* } */
+ // }
__tx_thread_dont_save_ts:
/* Clear the current thread pointer. */
- /* _tx_thread_current_ptr = TX_NULL; */
+ // _tx_thread_current_ptr = TX_NULL;
STR x4, [x5, #0] // Clear current thread pointer
B _tx_thread_schedule // Jump to scheduler!
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/ac6/src/tx_timer_interrupt.S b/ports/cortex_a35/ac6/src/tx_timer_interrupt.S
index a81edb23..5810b5c2 100644
--- a/ports/cortex_a35/ac6/src/tx_timer_interrupt.S
+++ b/ports/cortex_a35/ac6/src/tx_timer_interrupt.S
@@ -12,71 +12,61 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_timer.h"
-#include "tx_thread.h"
-*/
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_timer_interrupt Cortex-A35/AC6 */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function processes the hardware timer interrupt. This */
-/* processing includes incrementing the system clock and checking for */
-/* time slice and/or timer expiration. If either is found, the */
-/* interrupt context save/restore functions are called along with the */
-/* expiration functions. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_timer_expiration_process Timer expiration processing */
-/* _tx_thread_time_slice Time slice interrupted thread */
-/* */
-/* CALLED BY */
-/* */
-/* interrupt vector */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* */
/**************************************************************************/
-/* VOID _tx_timer_interrupt(VOID)
-{ */
+// VOID _tx_timer_interrupt(VOID)
+// {
.global _tx_timer_interrupt
.type _tx_timer_interrupt, @function
_tx_timer_interrupt:
@@ -86,7 +76,7 @@ _tx_timer_interrupt:
for use. */
/* Increment the system clock. */
- /* _tx_timer_system_clock++; */
+ // _tx_timer_system_clock++;
LDR x1, =_tx_timer_system_clock // Pickup address of system clock
LDR w0, [x1, #0] // Pickup system clock
@@ -97,7 +87,7 @@ _tx_timer_interrupt:
/* if (_tx_timer_time_slice)
{ */
- LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
LDR w2, [x3, #0] // Pickup time-slice
CMP w2, #0 // Is it non-active?
BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
@@ -126,8 +116,8 @@ _tx_timer_interrupt:
__tx_timer_no_time_slice:
/* Test for timer expiration. */
- /* if (*_tx_timer_current_ptr)
- { */
+ // if (*_tx_timer_current_ptr)
+ // {
LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
LDR x0, [x1, #0] // Pickup current timer
@@ -136,25 +126,25 @@ __tx_timer_no_time_slice:
BEQ __tx_timer_no_timer // No, just increment the timer
/* Set expiration flag. */
- /* _tx_timer_expired = TX_TRUE; */
+ // _tx_timer_expired = TX_TRUE;
LDR x3, =_tx_timer_expired // Pickup expiration flag address
MOV w2, #1 // Build expired value
STR w2, [x3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
- /* _tx_timer_current_ptr++; */
+ // _tx_timer_current_ptr++;
ADD x0, x0, #8 // Move to next timer
/* Check for wrap-around. */
- /* if (_tx_timer_current_ptr == _tx_timer_list_end) */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
LDR x2, [x3, #0] // Pickup list end
@@ -162,7 +152,7 @@ __tx_timer_no_timer:
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
- /* _tx_timer_current_ptr = _tx_timer_list_start; */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
LDR x0, [x3, #0] // Set current pointer to list start
@@ -170,14 +160,14 @@ __tx_timer_no_timer:
__tx_timer_skip_wrap:
STR x0, [x1, #0] // Store new current timer pointer
- /* } */
+ // }
__tx_timer_done:
/* See if anything has expired. */
- /* if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
- { */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR w2, [x3, #0] // Pickup time-slice expired flag
@@ -194,8 +184,8 @@ __tx_something_expired:
STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
/* Did a timer expire? */
- /* if (_tx_timer_expired)
- { */
+ // if (_tx_timer_expired)
+ // {
LDR x1, =_tx_timer_expired // Pickup addr of expired flag
LDR w0, [x1, #0] // Pickup timer expired flag
@@ -203,38 +193,36 @@ __tx_something_expired:
BEQ __tx_timer_dont_activate // If not set, skip timer activation
/* Process timer expiration. */
- /* _tx_timer_expiration_process(); */
+ // _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
- /* } */
+ // }
__tx_timer_dont_activate:
/* Did time slice expire? */
- /* if (_tx_timer_expired_time_slice)
- { */
+ // if (_tx_timer_expired_time_slice)
+ // {
- LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR w2, [x3, #0] // Pickup the actual flag
CMP w2, #0 // See if the flag is set
BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
/* Time slice interrupted thread. */
- /* _tx_thread_time_slice(); */
+ // _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
- /* } */
+ // }/
__tx_timer_not_ts_expiration:
LDP x29, x30, [sp], #16 // Recover x29, x30
- /* } */
+ // }
__tx_timer_nothing_expired:
RET // Return to caller
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/.cproject b/ports/cortex_a35/gnu/example_build/sample_threadx/.cproject
index 8a0cf9ac..1c32cb32 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/.cproject
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/.cproject
@@ -97,7 +97,7 @@
-
+
@@ -130,78 +130,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3.h b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3.h
index 23bc7fd8..dfe37586 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#ifndef GICV3_h
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_aliases.h b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_aliases.h
index 0928d14c..826ba973 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_aliases.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_aliases.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2016-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicc.h
index 2b8a2d3e..998d92b5 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicc.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicc.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#ifndef GICV3_gicc_h
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicd.c
index 2cf9e843..464ecced 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicd.c
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicd.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#include
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicr.c
index b0d22c40..26b5af8a 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicr.c
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/GICv3_gicr.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
* Use, modification and redistribution of this file is subject to your possession of a
- * valid End User License Agreement for the Arm Product of which these examples are part of
+ * valid End User License Agreement for the Arm Product of which these examples are part of
* and your compliance with all applicable terms and conditions of such licence agreement.
*/
#include "GICv3.h"
@@ -293,7 +293,7 @@ void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
uint32_t groupmod;
/*
- * get each bit of group config duplicated over all 32 bits
+ * get each bit of group config duplicated over all 32-bits
*/
groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
group = (uint32_t)(((int32_t)group << nbits) >> 31);
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a35/gnu/example_build/sample_threadx/MP_Mutexes.S
index e7f95aa7..c787c3f5 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/MP_Mutexes.S
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/MP_Mutexes.S
@@ -4,7 +4,7 @@
//
// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a35/gnu/example_build/sample_threadx/PPM_AEM.h
index 52c9a0fe..f7501eeb 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/PPM_AEM.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/PPM_AEM.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a35/gnu/example_build/sample_threadx/sample_threadx.c
index 8898ff39..17cceb01 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/sample_threadx.c
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/sample_threadx.c
@@ -1,14 +1,24 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
-#include
-#define DEMO_STACK_SIZE 2048
-#define DEMO_BYTE_POOL_SIZE 64000
-#define DEMO_BLOCK_POOL_SIZE 100
-#define DEMO_QUEUE_SIZE 100
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
/* Define the ThreadX object control blocks... */
@@ -21,6 +31,7 @@ TX_THREAD thread_4;
TX_THREAD thread_5;
TX_THREAD thread_6;
TX_THREAD thread_7;
+TX_TIMER timer_0;
TX_QUEUE queue_0;
TX_SEMAPHORE semaphore_0;
TX_MUTEX mutex_0;
@@ -43,8 +54,6 @@ ULONG thread_6_counter;
ULONG thread_7_counter;
-UCHAR memory_pool[DEMO_BYTE_POOL_SIZE];
-
/* Define thread prototypes. */
void thread_0_entry(ULONG thread_input);
@@ -54,18 +63,26 @@ void thread_3_and_4_entry(ULONG thread_input);
void thread_5_entry(ULONG thread_input);
void thread_6_and_7_entry(ULONG thread_input);
-void init_timer();
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
/* Define main entry point. */
-int main()
+int main(void)
{
- /* Initialize timer for ThreadX. */
- init_timer();
+ /* Initialize timer. */
+ init_timer();
- /* Enter the ThreadX kernel. */
+ /* Enter ThreadX. */
tx_kernel_enter();
+
+ return 0;
}
@@ -74,55 +91,56 @@ int main()
void tx_application_define(void *first_unused_memory)
{
-UCHAR *pointer = TX_NULL;
+CHAR *pointer = TX_NULL;
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
/* Create a byte memory pool from which to allocate the thread stacks. */
- tx_byte_pool_create(&byte_pool_0, "byte pool 0", memory_pool, DEMO_BYTE_POOL_SIZE);
-
- /* Put system definition stuff in here, e.g. thread creates and other assorted
- create information. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
/* Allocate the stack for thread 0. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
-
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -130,23 +148,23 @@ UCHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -178,7 +196,6 @@ UCHAR *pointer = TX_NULL;
}
-
/* Define the test threads. */
void thread_0_entry(ULONG thread_input)
@@ -239,7 +256,6 @@ void thread_2_entry(ULONG thread_input)
ULONG received_message;
UINT status;
-
/* This thread retrieves messages placed on the queue by thread 1. */
while(1)
{
@@ -250,11 +266,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -313,7 +329,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -366,7 +382,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.c
index 4dc009b2..c2ce6faa 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.c
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.c
@@ -3,7 +3,7 @@
//
// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.h
index 777062cc..4d423904 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/sp804_timer.h
@@ -4,7 +4,7 @@
//
// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/startup.S b/ports/cortex_a35/gnu/example_build/sample_threadx/startup.S
index 67dd8a6a..b71b45f8 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/startup.S
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/startup.S
@@ -7,7 +7,7 @@
//
// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_aarch64.S
index f8db3bfe..45445a98 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_aarch64.S
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_aarch64.S
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_mmu.h
index ee8834fa..bce62b54 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_mmu.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_mmu.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_system.h b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_system.h
index ff96deff..a62d2a33 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_system.h
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_system.h
@@ -3,7 +3,7 @@
//
// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_utils.S b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_utils.S
index f0fcef26..888892a0 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/v8_utils.S
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/v8_utils.S
@@ -3,7 +3,7 @@
//
// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
diff --git a/ports/cortex_a35/gnu/example_build/sample_threadx/vectors.S b/ports/cortex_a35/gnu/example_build/sample_threadx/vectors.S
index 9e60e001..7784f98e 100644
--- a/ports/cortex_a35/gnu/example_build/sample_threadx/vectors.S
+++ b/ports/cortex_a35/gnu/example_build/sample_threadx/vectors.S
@@ -3,7 +3,7 @@
//
// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
diff --git a/ports/cortex_a35/gnu/example_build/tx/.cproject b/ports/cortex_a35/gnu/example_build/tx/.cproject
index 01370489..01bcd509 100644
--- a/ports/cortex_a35/gnu/example_build/tx/.cproject
+++ b/ports/cortex_a35/gnu/example_build/tx/.cproject
@@ -122,78 +122,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ports/cortex_a35/gnu/inc/tx_port.h b/ports/cortex_a35/gnu/inc/tx_port.h
index a5d4ba1d..33bccbf1 100644
--- a/ports/cortex_a35/gnu/inc/tx_port.h
+++ b/ports/cortex_a35/gnu/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,36 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A35/GNU */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
-/* resulting in version 6.1.6 */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -63,7 +63,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +76,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -85,9 +85,10 @@ typedef int INT;
typedef unsigned int UINT;
typedef int LONG;
typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
-
+#define ULONG64_DEFINED
/* Override the alignment type to use 64-bit alignment and storage for pointers. */
@@ -123,19 +124,19 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -143,8 +144,15 @@ typedef unsigned long long ALIGN_TYPE;
*/
+#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -172,35 +180,41 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_FIQ_NESTING_ENABLED 0
#endif
-#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
#define TX_INLINE_INITIALIZATION
+#endif
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
+#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
+#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -214,11 +228,11 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -226,8 +240,8 @@ typedef unsigned long long ALIGN_TYPE;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -254,8 +268,8 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#ifndef TX_DISABLE_INLINE
@@ -267,7 +281,7 @@ typedef unsigned long long ALIGN_TYPE;
/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
can figure out what thread timeout to process. */
-
+
#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
@@ -283,9 +297,9 @@ typedef unsigned long long ALIGN_TYPE;
#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -331,18 +345,6 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define FP extension for the Cortex-A5x. Each is assumed to be called in the context of the executing
- thread. */
-
-#ifndef TX_SOURCE_CODE
-#define tx_thread_fp_enable _tx_thread_fp_enable
-#define tx_thread_fp_disable _tx_thread_fp_disable
-#endif
-
-VOID tx_thread_fp_enable(VOID);
-VOID tx_thread_fp_disable(VOID);
-
-
/* Define the interrupt lockout macros for each ThreadX object. */
#define TX_BLOCK_POOL_DISABLE TX_DISABLE
@@ -353,18 +355,25 @@ VOID tx_thread_fp_disable(VOID);
#define TX_SEMAPHORE_DISABLE TX_DISABLE
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A35/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
#else
extern CHAR _tx_version_id[];
#endif
#endif
-
-
-
-
diff --git a/ports/cortex_a35/gnu/src/tx_initialize_low_level.S b/ports/cortex_a35/gnu/src/tx_initialize_low_level.S
index 6dca4867..bf04784e 100644
--- a/ports/cortex_a35/gnu/src/tx_initialize_low_level.S
+++ b/ports/cortex_a35/gnu/src/tx_initialize_low_level.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Initialize */
/** */
@@ -21,63 +21,53 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_initialize.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_initialize_low_level Cortex-A35/GNU */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for any low-level processor */
-/* initialization, including setting up interrupt vectors, setting */
-/* up a periodic timer interrupt source, saving the system stack */
-/* pointer for use in ISR processing later, and finding the first */
-/* available RAM memory address for tx_application_define. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_initialize_kernel_enter ThreadX entry function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* VOID _tx_initialize_low_level(VOID)
-{ */
+// VOID _tx_initialize_low_level(VOID)
+// {
.global _tx_initialize_low_level
.type _tx_initialize_low_level, @function
_tx_initialize_low_level:
@@ -86,15 +76,16 @@ _tx_initialize_low_level:
/* Save the system stack pointer. */
- /* _tx_thread_system_stack_ptr = (VOID_PTR) (sp); */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
MOV x1, sp // Pickup SP
+ SUB x1, x1, #15 //
BIC x1, x1, #0xF // Get 16-bit alignment
STR x1, [x0] // Store system stack
/* Save the first available memory address. */
- /* _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit; */
+ // _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit;
LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
LDR x1, =__top_of_ram // Pickup unused memory address
@@ -104,5 +95,4 @@ _tx_initialize_low_level:
/* Done, return to caller. */
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_context_restore.S b/ports/cortex_a35/gnu/src/tx_thread_context_restore.S
index 63d95ddc..994c404d 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_context_restore.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_context_restore.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,66 +21,50 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
-/* .set ENABLE_ARM_FP,1 */
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_context_restore Cortex-A35/GNU */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function restores the interrupt context if it is processing a */
-/* nested interrupt. If not, it returns to the interrupt thread if no */
-/* preemption is necessary. Otherwise, if preemption is necessary or */
-/* if no thread was running, the function returns to the scheduler. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling routine */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs Interrupt Service Routines */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_context_restore(VOID)
-{ */
+// VOID _tx_thread_context_restore(VOID)
+// {
.global _tx_thread_context_restore
.type _tx_thread_context_restore, @function
_tx_thread_context_restore:
@@ -97,19 +81,19 @@ _tx_thread_context_restore:
#endif
/* Determine if interrupts are nested. */
- /* if (--_tx_thread_system_state)
- { */
+ // if (--_tx_thread_system_state)
+ // {
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, #0] // Pickup system state
SUB w2, w2, #1 // Decrement the counter
- STR w2, [x3, #0] // Store the counter
+ STR w2, [x3, #0] // Store the counter
CMP w2, #0 // Was this the first interrupt?
BEQ __tx_thread_not_nested_restore // If so, not a nested restore
/* Interrupts are nested. */
- /* Just recover the saved registers and return to the point of
+ /* Just recover the saved registers and return to the point of
interrupt. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
@@ -138,13 +122,13 @@ _tx_thread_context_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
- /* } */
+ // }
__tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
- /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
- || (_tx_thread_preempt_disable))
- { */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, #0] // Pickup actual current thread pointer
@@ -166,7 +150,7 @@ __tx_thread_no_preempt_restore:
/* Restore interrupted thread or ISR. */
/* Pickup the saved stack pointer. */
- /* sp = _tx_thread_current_ptr -> tx_thread_stack_ptr; */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -199,9 +183,9 @@ __tx_thread_no_preempt_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_thread_preempt_restore:
LDR x4, [x0, #8] // Switch to thread stack pointer
@@ -249,22 +233,22 @@ _skip_fp_save:
/* Save the remaining time-slice and disable it. */
- /* if (_tx_timer_time_slice)
- { */
+ // if (_tx_timer_time_slice)
+ // {
LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
LDR w2, [x3, #0] // Pickup time-slice
CMP w2, #0 // Is it active?
BEQ __tx_thread_dont_save_ts // No, don't save it
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
STR w2, [x3, #0] // Disable global time-slice flag
- /* } */
+ // }
__tx_thread_dont_save_ts:
@@ -275,9 +259,9 @@ __tx_thread_dont_save_ts:
STR x0, [x1, #0] // Clear current thread pointer
/* Return to the scheduler. */
- /* _tx_thread_schedule(); */
+ // _tx_thread_schedule();
- /* } */
+ // }
__tx_thread_idle_system_restore:
@@ -300,6 +284,4 @@ __tx_thread_idle_system_restore:
#endif
#endif
ERET // Return to scheduler
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_context_save.S b/ports/cortex_a35/gnu/src/tx_thread_context_save.S
index 3f7ffc31..859a1e44 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_context_save.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_context_save.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,73 +21,60 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_context_save Cortex-A35/GNU */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function saves the context of an executing thread in the */
-/* beginning of interrupt processing. The function also ensures that */
-/* the system stack is used upon return to the calling ISR. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_context_save(VOID)
-{ */
+// VOID _tx_thread_context_save(VOID)
+// {
.global _tx_thread_context_save
.type _tx_thread_context_save, @function
_tx_thread_context_save:
/* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
- out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
and all other registers are intact. */
/* Check for a nested interrupt condition. */
- /* if (_tx_thread_system_state++)
- { */
+ // if (_tx_thread_system_state++)
+ // {
STP x0, x1, [sp, #-16]! // Save x0, x1
STP x2, x3, [sp, #-16]! // Save x2, x3
@@ -140,18 +127,18 @@ _tx_thread_context_save:
RET // Return to ISR
__tx_thread_not_nested_save:
- /* } */
+ // }
/* Otherwise, not nested, check to see if a thread was running. */
- /* else if (_tx_thread_current_ptr)
- { */
+ // else if (_tx_thread_current_ptr)
+ // {
ADD w2, w2, #1 // Increment the interrupt counter
STR w2, [x3, #0] // Store it back in the variable
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, #0] // Pickup current thread pointer
CMP x0, #0 // Is it NULL?
- BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
// scheduling loop - nothing needs saving!
/* Save minimal context of interrupted thread. */
@@ -179,19 +166,19 @@ __tx_thread_not_nested_save:
STP x4, x5, [sp, #-16]! // Save SPSR, ELR
/* Save the current stack pointer in the thread's control block. */
- /* _tx_thread_current_ptr -> tx_thread_stack_ptr = sp; */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
MOV x4, sp //
STR x4, [x0, #8] // Save thread stack pointer
/* Switch to the system stack. */
- /* sp = _tx_thread_system_stack_ptr; */
+ // sp = _tx_thread_system_stack_ptr;
- LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
LDR x4, [x3, #0] // Pickup system stack pointer
MOV sp, x4 // Setup system stack pointer
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the ISR enter function to indicate an ISR is executing. */
@@ -200,17 +187,17 @@ __tx_thread_not_nested_save:
LDP x29, x30, [sp], #16 // Recover x29, x30
#endif
- RET // Return to caller
+ RET // Return to caller
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_thread_idle_system_save:
/* Interrupt occurred in the scheduling loop. */
- /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
processing. */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
@@ -223,9 +210,7 @@ __tx_thread_idle_system_save:
#endif
ADD sp, sp, #48 // Recover saved registers
- RET // Continue IRQ processing
-
- /* }
-} */
-
+ RET // Continue IRQ processing
+ // }
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_fp_disable.c b/ports/cortex_a35/gnu/src/tx_thread_fp_disable.c
index 0f59986e..3e5d7e21 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_fp_disable.c
+++ b/ports/cortex_a35/gnu/src/tx_thread_fp_disable.c
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -29,41 +29,43 @@
#include "tx_thread.h"
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fp_disable Cortex-A35/GNU */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function disables the FP for the currently executing thread. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_thread_fp_disable(VOID)
@@ -82,14 +84,14 @@ ULONG system_state;
/* Make sure it is not NULL. */
if (thread_ptr != TX_NULL)
{
-
+
/* Thread is running... make sure the call is from the thread context. */
if (system_state == 0)
{
-
+
/* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
thread_ptr -> tx_thread_fp_enable = TX_FALSE;
}
}
-}
+}
diff --git a/ports/cortex_a35/gnu/src/tx_thread_fp_enable.c b/ports/cortex_a35/gnu/src/tx_thread_fp_enable.c
index 4a4751ab..4e69205c 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_fp_enable.c
+++ b/ports/cortex_a35/gnu/src/tx_thread_fp_enable.c
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -28,42 +28,43 @@
#include "tx_api.h"
#include "tx_thread.h"
-
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fp_enable Cortex-A35/GNU */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function enabled the FP for the currently executing thread. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
VOID _tx_thread_fp_enable(VOID)
@@ -82,14 +83,14 @@ ULONG system_state;
/* Make sure it is not NULL. */
if (thread_ptr != TX_NULL)
{
-
+
/* Thread is running... make sure the call is from the thread context. */
if (system_state == 0)
{
-
+
/* Yes, now setup the FP enable flag in the TX_THREAD structure. */
thread_ptr -> tx_thread_fp_enable = TX_TRUE;
}
}
-}
+}
diff --git a/ports/cortex_a35/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a35/gnu/src/tx_thread_interrupt_control.S
index f387c449..6a5a7741 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_interrupt_control.S
@@ -12,66 +12,59 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/*#define TX_SOURCE_CODE */
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_control Cortex-A35/GNU */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for changing the interrupt lockout */
-/* posture of the system. */
-/* */
-/* INPUT */
-/* */
-/* new_posture New interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* UINT _tx_thread_interrupt_control(UINT new_posture)
-{ */
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control, @function
_tx_thread_interrupt_control:
@@ -85,5 +78,4 @@ _tx_thread_interrupt_control:
MSR DAIF, x0 // Set new interrupt posture
MOV x0, x1 // Setup return value
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a35/gnu/src/tx_thread_interrupt_disable.S
index 0e31281c..d0062ef8 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_interrupt_disable.S
@@ -12,65 +12,58 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_disable Cortex-A35/GNU */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for disabling interrupts */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* UINT _tx_thread_interrupt_disable(void)
-{ */
+// UINT _tx_thread_interrupt_disable(void)
+// {
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable, @function
_tx_thread_interrupt_disable:
@@ -83,5 +76,4 @@ _tx_thread_interrupt_disable:
MSR DAIFSet, 0x3 // Lockout interrupts
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a35/gnu/src/tx_thread_interrupt_restore.S
index 7edae01a..1b6261ba 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_interrupt_restore.S
@@ -12,66 +12,59 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
- .text
- .align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_restore Cortex-A35/GNU */
-/* 6.1 */
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
+/* */
/* This function is responsible for restoring interrupts to the state */
/* returned by a previous _tx_thread_interrupt_disable call. */
-/* */
-/* INPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* UINT _tx_thread_interrupt_restore(UINT old_posture)
-{ */
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore, @function
_tx_thread_interrupt_restore:
@@ -81,5 +74,4 @@ _tx_thread_interrupt_restore:
MSR DAIF, x0 // Setup the old posture
RET // Return to caller
-/* } */
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_schedule.S b/ports/cortex_a35/gnu/src/tx_thread_schedule.S
index b59e9250..9a7a7262 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_schedule.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,67 +21,54 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
-/* .set ENABLE_ARM_FP,1 */
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_schedule Cortex-A35/GNU */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function waits for a thread control block pointer to appear in */
-/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-/* in the variable, the corresponding thread is resumed. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_initialize_kernel_enter ThreadX entry function */
-/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_schedule(VOID)
-{ */
+// VOID _tx_thread_schedule(VOID)
+// {
.global _tx_thread_schedule
.type _tx_thread_schedule, @function
_tx_thread_schedule:
@@ -91,17 +78,17 @@ _tx_thread_schedule:
MSR DAIFClr, 0x3 // Enable interrupts
/* Wait for a thread to execute. */
- /* do
- { */
-
+ // do
+ // {
+
LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
#ifdef TX_ENABLE_WFI
__tx_thread_schedule_loop:
LDR x0, [x1, #0] // Pickup next thread to execute
CMP x0, #0 // Is it NULL?
- BNE _tx_thread_schedule_thread //
- WFI //
+ BNE _tx_thread_schedule_thread //
+ WFI //
B __tx_thread_schedule_loop // Keep looking for a thread
_tx_thread_schedule_thread:
#else
@@ -111,22 +98,22 @@ __tx_thread_schedule_loop:
BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
#endif
- /* }
- while(_tx_thread_execute_ptr == TX_NULL); */
-
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
/* Yes! We have a thread to execute. Lockout interrupts and
transfer control to it. */
MSR DAIFSet, 0x3 // Lockout interrupts
/* Setup the current thread pointer. */
- /* _tx_thread_current_ptr = _tx_thread_execute_ptr; */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
- LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
STR x0, [x1, #0] // Setup current thread pointer
/* Increment the run count for this thread. */
- /* _tx_thread_current_ptr -> tx_thread_run_count++; */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
LDR w2, [x0, #4] // Pickup run counter
LDR w3, [x0, #36] // Pickup time-slice for this thread
@@ -134,9 +121,9 @@ __tx_thread_schedule_loop:
STR w2, [x0, #4] // Store the new run counter
/* Setup time-slice, if present. */
- /* _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice; */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
- LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
// variable
LDR x4, [x0, #8] // Switch stack pointers
MOV sp, x4 //
@@ -152,7 +139,7 @@ __tx_thread_schedule_loop:
#endif
/* Switch to the thread's stack. */
- /* sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr; */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
/* Determine if an interrupt frame or a synchronous task suspension frame
is present. */
@@ -237,7 +224,5 @@ _skip_solicited_fp_restore:
LDP x19, x20, [sp], #16 // Recover x19, x20
LDP x29, x30, [sp], #16 // Recover x29, x30
MSR DAIF, x4 // Recover DAIF
- RET // Return to caller
-/* } */
-
-
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_stack_build.S b/ports/cortex_a35/gnu/src/tx_thread_stack_build.S
index d052a9e0..5b7e945a 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_stack_build.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_stack_build.S
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -21,69 +21,59 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-*/
-
-
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_stack_build Cortex-A35/GNU */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
+/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
-/* function pointer. */
-/* */
-/* INPUT */
-/* */
-/* thread_ptr Pointer to thread control blk */
-/* function_ptr Pointer to return function */
-/* */
-/* OUTPUT */
-/* */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* CALLS */
+/* */
/* None */
-/* */
-/* CALLED BY */
-/* */
+/* */
+/* CALLED BY */
+/* */
/* _tx_thread_create Create thread service */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-{ */
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
.global _tx_thread_stack_build
.type _tx_thread_stack_build, @function
_tx_thread_stack_build:
-
- /* Build a fake interrupt frame. The form of the fake interrupt stack
- on the Cortex-A5x should look like the following after it is built:
-
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
Stack Top: SSPR Initial SSPR
ELR Point of interrupt
x28 Initial value for x28
@@ -129,7 +119,7 @@ _tx_thread_stack_build:
MOV x2, #0 // Build clear value
MOV x3, #0 //
-
+
STP x2, x3, [x4, #-16]! // Set backtrace to 0
STP x2, x3, [x4, #-16]! // Set initial x29, x30
STP x2, x3, [x4, #-16]! // Set initial x0, x1
@@ -160,11 +150,9 @@ _tx_thread_stack_build:
STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
/* Setup stack pointer. */
- /* thread_ptr -> tx_thread_stack_ptr = x2; */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
STR x4, [x0, #8] // Save stack pointer in thread's
RET // Return to caller
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_thread_system_return.S b/ports/cortex_a35/gnu/src/tx_thread_system_return.S
index 24802f60..7d42b63d 100644
--- a/ports/cortex_a35/gnu/src/tx_thread_system_return.S
+++ b/ports/cortex_a35/gnu/src/tx_thread_system_return.S
@@ -12,80 +12,65 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
-/* .set ENABLE_ARM_FP,1 */
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_system_return Cortex-A35/GNU */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is target processor specific. It is used to transfer */
-/* control from a thread back to the ThreadX system. Only a */
-/* minimal context is saved since the compiler assumes temp registers */
-/* are going to get slicked by a function call anyway. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling loop */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX components */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_system_return(VOID)
-{ */
+// VOID _tx_thread_system_return(VOID)
+// {
.global _tx_thread_system_return
.type _tx_thread_system_return, @function
_tx_thread_system_return:
-;
-; /* Save minimal context on the stack. */
-;
+
+ /* Save minimal context on the stack. */
+
MRS x0, DAIF // Pickup DAIF
MSR DAIFSet, 0x3 // Lockout interrupts
STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
@@ -129,8 +114,8 @@ _skip_fp_save:
LDR w1, [x2, #0] // Pickup current time slice
/* Save current stack and switch to system stack. */
- /* _tx_thread_current_ptr -> tx_thread_stack_ptr = sp; */
- /* sp = _tx_thread_system_stack_ptr; */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
MOV x4, sp //
STR x4, [x6, #8] // Save thread stack pointer
@@ -139,30 +124,28 @@ _skip_fp_save:
MOV sp, x4 // Setup system stack pointer
/* Determine if the time-slice is active. */
- /* if (_tx_timer_time_slice)
- { */
+ // if (_tx_timer_time_slice)
+ // {
MOV x4, #0 // Build clear value
CMP w1, #0 // Is a time-slice active?
BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
/* Save the current remaining time-slice. */
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w4, [x2, #0] // Clear time-slice
STR w1, [x6, #36] // Store current time-slice
- /* } */
+ // }
__tx_thread_dont_save_ts:
/* Clear the current thread pointer. */
- /* _tx_thread_current_ptr = TX_NULL; */
+ // _tx_thread_current_ptr = TX_NULL;
STR x4, [x5, #0] // Clear current thread pointer
B _tx_thread_schedule // Jump to scheduler!
-/* } */
-
-
+// }
diff --git a/ports/cortex_a35/gnu/src/tx_timer_interrupt.S b/ports/cortex_a35/gnu/src/tx_timer_interrupt.S
index a2808e78..5810b5c2 100644
--- a/ports/cortex_a35/gnu/src/tx_timer_interrupt.S
+++ b/ports/cortex_a35/gnu/src/tx_timer_interrupt.S
@@ -12,71 +12,61 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_timer.h"
-#include "tx_thread.h"
-*/
.text
.align 3
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_timer_interrupt Cortex-A35/GNU */
-/* 6.1 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function processes the hardware timer interrupt. This */
-/* processing includes incrementing the system clock and checking for */
-/* time slice and/or timer expiration. If either is found, the */
-/* interrupt context save/restore functions are called along with the */
-/* expiration functions. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_timer_expiration_process Timer expiration processing */
-/* _tx_thread_time_slice Time slice interrupted thread */
-/* */
-/* CALLED BY */
-/* */
-/* interrupt vector */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* */
/**************************************************************************/
-/* VOID _tx_timer_interrupt(VOID)
-{ */
+// VOID _tx_timer_interrupt(VOID)
+// {
.global _tx_timer_interrupt
.type _tx_timer_interrupt, @function
_tx_timer_interrupt:
@@ -86,7 +76,7 @@ _tx_timer_interrupt:
for use. */
/* Increment the system clock. */
- /* _tx_timer_system_clock++; */
+ // _tx_timer_system_clock++;
LDR x1, =_tx_timer_system_clock // Pickup address of system clock
LDR w0, [x1, #0] // Pickup system clock
@@ -97,7 +87,7 @@ _tx_timer_interrupt:
/* if (_tx_timer_time_slice)
{ */
- LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
LDR w2, [x3, #0] // Pickup time-slice
CMP w2, #0 // Is it non-active?
BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
@@ -126,8 +116,8 @@ _tx_timer_interrupt:
__tx_timer_no_time_slice:
/* Test for timer expiration. */
- /* if (*_tx_timer_current_ptr)
- { */
+ // if (*_tx_timer_current_ptr)
+ // {
LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
LDR x0, [x1, #0] // Pickup current timer
@@ -136,25 +126,25 @@ __tx_timer_no_time_slice:
BEQ __tx_timer_no_timer // No, just increment the timer
/* Set expiration flag. */
- /* _tx_timer_expired = TX_TRUE; */
+ // _tx_timer_expired = TX_TRUE;
LDR x3, =_tx_timer_expired // Pickup expiration flag address
MOV w2, #1 // Build expired value
STR w2, [x3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
- /* _tx_timer_current_ptr++; */
+ // _tx_timer_current_ptr++;
ADD x0, x0, #8 // Move to next timer
/* Check for wrap-around. */
- /* if (_tx_timer_current_ptr == _tx_timer_list_end) */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
LDR x2, [x3, #0] // Pickup list end
@@ -162,7 +152,7 @@ __tx_timer_no_timer:
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
- /* _tx_timer_current_ptr = _tx_timer_list_start; */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
LDR x0, [x3, #0] // Set current pointer to list start
@@ -170,14 +160,14 @@ __tx_timer_no_timer:
__tx_timer_skip_wrap:
STR x0, [x1, #0] // Store new current timer pointer
- /* } */
+ // }
__tx_timer_done:
/* See if anything has expired. */
- /* if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
- { */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
LDR w2, [x3, #0] // Pickup time-slice expired flag
@@ -194,8 +184,8 @@ __tx_something_expired:
STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
/* Did a timer expire? */
- /* if (_tx_timer_expired)
- { */
+ // if (_tx_timer_expired)
+ // {
LDR x1, =_tx_timer_expired // Pickup addr of expired flag
LDR w0, [x1, #0] // Pickup timer expired flag
@@ -203,38 +193,36 @@ __tx_something_expired:
BEQ __tx_timer_dont_activate // If not set, skip timer activation
/* Process timer expiration. */
- /* _tx_timer_expiration_process(); */
+ // _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
- /* } */
+ // }
__tx_timer_dont_activate:
/* Did time slice expire? */
- /* if (_tx_timer_expired_time_slice)
- { */
+ // if (_tx_timer_expired_time_slice)
+ // {
- LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
LDR w2, [x3, #0] // Pickup the actual flag
CMP w2, #0 // See if the flag is set
BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
/* Time slice interrupted thread. */
- /* _tx_thread_time_slice(); */
+ // _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
- /* } */
+ // }/
__tx_timer_not_ts_expiration:
LDP x29, x30, [sp], #16 // Recover x29, x30
- /* } */
+ // }
__tx_timer_nothing_expired:
RET // Return to caller
-/* } */
-
-
+// }
diff --git a/ports/cortex_a5/green/example_build/azure_rtos_workspace.gpj b/ports/cortex_a5/ghs/example_build/azure_rtos_workspace.gpj
similarity index 100%
rename from ports/cortex_a5/green/example_build/azure_rtos_workspace.gpj
rename to ports/cortex_a5/ghs/example_build/azure_rtos_workspace.gpj
diff --git a/ports/cortex_a5/green/example_build/reset.arm b/ports/cortex_a5/ghs/example_build/reset.arm
similarity index 100%
rename from ports/cortex_a5/green/example_build/reset.arm
rename to ports/cortex_a5/ghs/example_build/reset.arm
diff --git a/ports/cortex_a7/green/example_build/sample_threadx.c b/ports/cortex_a5/ghs/example_build/sample_threadx.c
similarity index 93%
rename from ports/cortex_a7/green/example_build/sample_threadx.c
rename to ports/cortex_a5/ghs/example_build/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a7/green/example_build/sample_threadx.c
+++ b/ports/cortex_a5/ghs/example_build/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a5/green/example_build/sample_threadx.con b/ports/cortex_a5/ghs/example_build/sample_threadx.con
similarity index 87%
rename from ports/cortex_a5/green/example_build/sample_threadx.con
rename to ports/cortex_a5/ghs/example_build/sample_threadx.con
index 7d427f10..8784cc88 100644
--- a/ports/cortex_a5/green/example_build/sample_threadx.con
+++ b/ports/cortex_a5/ghs/example_build/sample_threadx.con
@@ -1,4 +1,4 @@
-target_connection.00000000.title="Simulator connection for ThreadX"
+target_connection.00000000.title="Simulator"
target_connection.00000000.type="Custom"
target_connection.00000000.short_type="Custom"
target_connection.00000000.args="simarm -cpu=cortexa5 -fpu -rom"
diff --git a/ports/cortex_a5/green/example_build/sample_threadx.gpj b/ports/cortex_a5/ghs/example_build/sample_threadx.gpj
similarity index 100%
rename from ports/cortex_a5/green/example_build/sample_threadx.gpj
rename to ports/cortex_a5/ghs/example_build/sample_threadx.gpj
diff --git a/ports/cortex_a8/green/example_build/sample_threadx.ld b/ports/cortex_a5/ghs/example_build/sample_threadx.ld
similarity index 81%
rename from ports/cortex_a8/green/example_build/sample_threadx.ld
rename to ports/cortex_a5/ghs/example_build/sample_threadx.ld
index 8d1ab4df..a93f11ea 100644
--- a/ports/cortex_a8/green/example_build/sample_threadx.ld
+++ b/ports/cortex_a5/ghs/example_build/sample_threadx.ld
@@ -19,25 +19,25 @@
-sec
{
- .reset 0x000000 :
- .picbase 0x1000 :
- .text :
+ .reset 0x000000 :
+ .picbase 0x1000 :
+ .text :
.comment :
.intercall :
.interfunc :
- .syscall :
+ .syscall :
.fixaddr :
.fixtype :
- .rodata :
+ .rodata :
.romdata ROM(.data) :
.romsdata ROM(.sdata) :
- .secinfo :
- .pidbase align(16) :
- .sdabase :
- .sbss :
- .sdata :
- .data :
- .bss :
+ .secinfo :
+ .pidbase align(16) :
+ .sdabase :
+ .sbss :
+ .sdata :
+ .data :
+ .bss :
.heap align(16) pad(0x10000) :
.stack align(16) pad(0x1000) :
.free_mem align(16) pad(0x10000) :
diff --git a/ports/cortex_a5/green/example_build/sample_threadx_el.gpj b/ports/cortex_a5/ghs/example_build/sample_threadx_el.gpj
similarity index 100%
rename from ports/cortex_a5/green/example_build/sample_threadx_el.gpj
rename to ports/cortex_a5/ghs/example_build/sample_threadx_el.gpj
diff --git a/ports/cortex_a5/green/example_build/sample_threadx_el.ld b/ports/cortex_a5/ghs/example_build/sample_threadx_el.ld
similarity index 82%
rename from ports/cortex_a5/green/example_build/sample_threadx_el.ld
rename to ports/cortex_a5/ghs/example_build/sample_threadx_el.ld
index 33c0f934..65d9de03 100644
--- a/ports/cortex_a5/green/example_build/sample_threadx_el.ld
+++ b/ports/cortex_a5/ghs/example_build/sample_threadx_el.ld
@@ -19,25 +19,25 @@
-sec
{
- .reset 0x000000 :
- .picbase 0x1000 :
- .text :
+ .reset 0x000000 :
+ .picbase 0x1000 :
+ .text :
.comment :
.intercall :
.interfunc :
- .syscall :
+ .syscall :
.fixaddr :
.fixtype :
- .rodata :
+ .rodata :
.romdata ROM(.data) :
.romsdata ROM(.sdata) :
- .secinfo :
- .pidbase align(16) :
- .sdabase :
- .sbss :
- .sdata :
- .data :
- .bss :
+ .secinfo :
+ .pidbase align(16) :
+ .sdabase :
+ .sbss :
+ .sdata :
+ .data :
+ .bss :
.heap align(16) pad(0x1000) :
.stack align(16) pad(0x1000) :
.eventlog align(16) pad(0x10000) :
diff --git a/ports/cortex_a8/green/example_build/tx.gpj b/ports/cortex_a5/ghs/example_build/tx.gpj
similarity index 72%
rename from ports/cortex_a8/green/example_build/tx.gpj
rename to ports/cortex_a5/ghs/example_build/tx.gpj
index afbd6bef..a8a7bbb8 100644
--- a/ports/cortex_a8/green/example_build/tx.gpj
+++ b/ports/cortex_a5/ghs/example_build/tx.gpj
@@ -1,7 +1,6 @@
#!gbuild
[Library]
-I../../../../common/inc
- -I../../../../ports_common_green/inc
-I../inc
..\..\..\..\common\inc\tx_api.h
..\..\..\..\common\inc\tx_block_pool.h
@@ -16,8 +15,11 @@
..\..\..\..\common\inc\tx_trace.h
..\..\..\..\common\inc\tx_user_sample.h
..\inc\tx_port.h
-..\..\..\..\ports_common_green\inc\tx_el.h
-..\..\..\..\ports_common_green\inc\tx_ghs.h
+..\inc\tx_el.h
+..\inc\tx_ghs.h
+..\src\tx_el.c
+..\src\tx_ghs.c
+..\src\tx_ghse.c
..\src\tx_thread_context_restore.arm
..\src\tx_thread_context_save.arm
..\src\tx_thread_fiq_context_restore.arm
@@ -218,66 +220,3 @@
..\..\..\..\common\src\txe_timer_deactivate.c
..\..\..\..\common\src\txe_timer_delete.c
..\..\..\..\common\src\txe_timer_info_get.c
-..\..\..\..\ports_common_green\src\tx_el.c
-..\..\..\..\ports_common_green\src\tx_ghs.c
-..\..\..\..\ports_common_green\src\tx_ghse.c
-..\..\..\..\ports_common_green\src\txr_block_allocate.c
-..\..\..\..\ports_common_green\src\txr_block_pool_create.c
-..\..\..\..\ports_common_green\src\txr_block_pool_delete.c
-..\..\..\..\ports_common_green\src\txr_block_pool_info_get.c
-..\..\..\..\ports_common_green\src\txr_block_pool_prioritize.c
-..\..\..\..\ports_common_green\src\txr_block_release.c
-..\..\..\..\ports_common_green\src\txr_byte_allocate.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_create.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_delete.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_info_get.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_prioritize.c
-..\..\..\..\ports_common_green\src\txr_byte_release.c
-..\..\..\..\ports_common_green\src\txr_event_flags_create.c
-..\..\..\..\ports_common_green\src\txr_event_flags_delete.c
-..\..\..\..\ports_common_green\src\txr_event_flags_get.c
-..\..\..\..\ports_common_green\src\txr_event_flags_info_get.c
-..\..\..\..\ports_common_green\src\txr_event_flags_set.c
-..\..\..\..\ports_common_green\src\txr_event_flags_set_notify.c
-..\..\..\..\ports_common_green\src\txr_ghs.c
-..\..\..\..\ports_common_green\src\txr_mutex_create.c
-..\..\..\..\ports_common_green\src\txr_mutex_delete.c
-..\..\..\..\ports_common_green\src\txr_mutex_get.c
-..\..\..\..\ports_common_green\src\txr_mutex_info_get.c
-..\..\..\..\ports_common_green\src\txr_mutex_prioritize.c
-..\..\..\..\ports_common_green\src\txr_mutex_put.c
-..\..\..\..\ports_common_green\src\txr_queue_create.c
-..\..\..\..\ports_common_green\src\txr_queue_delete.c
-..\..\..\..\ports_common_green\src\txr_queue_flush.c
-..\..\..\..\ports_common_green\src\txr_queue_front_send.c
-..\..\..\..\ports_common_green\src\txr_queue_info_get.c
-..\..\..\..\ports_common_green\src\txr_queue_prioritize.c
-..\..\..\..\ports_common_green\src\txr_queue_receive.c
-..\..\..\..\ports_common_green\src\txr_queue_send.c
-..\..\..\..\ports_common_green\src\txr_queue_send_notify.c
-..\..\..\..\ports_common_green\src\txr_semaphore_ceiling_put.c
-..\..\..\..\ports_common_green\src\txr_semaphore_create.c
-..\..\..\..\ports_common_green\src\txr_semaphore_delete.c
-..\..\..\..\ports_common_green\src\txr_semaphore_get.c
-..\..\..\..\ports_common_green\src\txr_semaphore_info_get.c
-..\..\..\..\ports_common_green\src\txr_semaphore_prioritize.c
-..\..\..\..\ports_common_green\src\txr_semaphore_put.c
-..\..\..\..\ports_common_green\src\txr_semaphore_put_notify.c
-..\..\..\..\ports_common_green\src\txr_thread_create.c
-..\..\..\..\ports_common_green\src\txr_thread_delete.c
-..\..\..\..\ports_common_green\src\txr_thread_entry_exit_notify.c
-..\..\..\..\ports_common_green\src\txr_thread_info_get.c
-..\..\..\..\ports_common_green\src\txr_thread_preemption_change.c
-..\..\..\..\ports_common_green\src\txr_thread_priority_change.c
-..\..\..\..\ports_common_green\src\txr_thread_reset.c
-..\..\..\..\ports_common_green\src\txr_thread_resume.c
-..\..\..\..\ports_common_green\src\txr_thread_suspend.c
-..\..\..\..\ports_common_green\src\txr_thread_terminate.c
-..\..\..\..\ports_common_green\src\txr_thread_time_slice_change.c
-..\..\..\..\ports_common_green\src\txr_thread_wait_abort.c
-..\..\..\..\ports_common_green\src\txr_timer_activate.c
-..\..\..\..\ports_common_green\src\txr_timer_change.c
-..\..\..\..\ports_common_green\src\txr_timer_create.c
-..\..\..\..\ports_common_green\src\txr_timer_deactivate.c
-..\..\..\..\ports_common_green\src\txr_timer_delete.c
-..\..\..\..\ports_common_green\src\txr_timer_info_get.c
diff --git a/ports/cortex_a7/green/example_build/tx_initialize_low_level.arm b/ports/cortex_a5/ghs/example_build/tx_initialize_low_level.arm
similarity index 94%
rename from ports/cortex_a7/green/example_build/tx_initialize_low_level.arm
rename to ports/cortex_a5/ghs/example_build/tx_initialize_low_level.arm
index e01e1206..60916fac 100644
--- a/ports/cortex_a7/green/example_build/tx_initialize_low_level.arm
+++ b/ports/cortex_a5/ghs/example_build/tx_initialize_low_level.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Initialize */
/** */
@@ -42,42 +42,42 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_initialize_low_level Cortex-A7/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level Cortex-A5/GHS */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for any low-level processor */
-/* initialization, including setting up interrupt vectors, setting */
-/* up a periodic timer interrupt source, saving the system stack */
-/* pointer for use in ISR processing later, and finding the first */
-/* available RAM memory address for tx_application_define. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_initialize_kernel_enter ThreadX entry function */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@@ -96,7 +96,7 @@ _tx_initialize_low_level:
/* _tx_thread_system_stack_ptr = (VOID_PTR) (sp); */
LDR r1,=_tx_thread_system_stack_ptr # Pickup address of system stack ptr
- STR sp, [r1] # Save system stack
+ STR sp, [r1] # Save system stack
/* Pickup the first available memory address. */
@@ -146,8 +146,8 @@ _tx_initialize_low_level:
STR r0, [r2] # Save first free memory address
- /* Setup Timer for periodic interrupts. To generate timer interrupts with
- the Green Hills simulator, enter the following command in the target
+ /* Setup Timer for periodic interrupts. To generate timer interrupts with
+ the Green Hills simulator, enter the following command in the target
window: timer 9999 irq */
/* Done, return to caller. */
@@ -197,7 +197,7 @@ __tx_reserved_handler:
.size __tx_reserved_handler,.-__tx_reserved_handler
.globl __tx_irq_handler
- .globl __tx_irq_processing_return
+ .globl __tx_irq_processing_return
__tx_irq_handler:
/* Jump to context save to save system context. */
@@ -209,18 +209,18 @@ __tx_irq_handler:
__tx_irq_processing_return:
/* At this point execution is still in the IRQ mode. The CPSR, point of
- interrupt, and all C scratch registers are available for use. */
+ interrupt, and all C scratch registers are available for use. */
#ifdef TX_ENABLE_EVENT_LOGGING
MOV r0, 0 # Build interrupt code
BL _tx_el_interrupt # Call interrupt event logging
#endif
- /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
from IRQ mode with interrupts disabled. This routine switches to the
- system mode and returns with IRQ interrupts enabled.
-
- NOTE: It is very important to ensure all IRQ interrupts are cleared
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
@@ -235,7 +235,7 @@ __tx_irq_processing_return:
/* Application IRQ handlers can be called here! */
/* If interrupt nesting was started earlier, the end of interrupt nesting
- service must be called before returning to _tx_thread_context_restore.
+ service must be called before returning to _tx_thread_context_restore.
This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
@@ -272,12 +272,12 @@ __tx_fiq_processing_return:
MOV r0, 1 # Build interrupt code
BL _tx_el_interrupt # Call interrupt event logging
#endif
-
- /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
from FIQ mode with interrupts disabled. This routine switches to the
system mode and returns with FIQ interrupts enabled.
- NOTE: It is very important to ensure all FIQ interrupts are cleared
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
diff --git a/ports/cortex_a5/green/example_build/txe.gpj b/ports/cortex_a5/ghs/example_build/txe.gpj
similarity index 72%
rename from ports/cortex_a5/green/example_build/txe.gpj
rename to ports/cortex_a5/ghs/example_build/txe.gpj
index 662bcd16..302c0cb2 100644
--- a/ports/cortex_a5/green/example_build/txe.gpj
+++ b/ports/cortex_a5/ghs/example_build/txe.gpj
@@ -2,7 +2,6 @@
[Library]
-DTX_ENABLE_EVENT_LOGGING
-I../../../../common/inc
- -I../../../../ports_common_green/inc
-I../inc
..\..\..\..\common\inc\tx_api.h
..\..\..\..\common\inc\tx_block_pool.h
@@ -17,8 +16,11 @@
..\..\..\..\common\inc\tx_trace.h
..\..\..\..\common\inc\tx_user_sample.h
..\inc\tx_port.h
-..\..\..\..\ports_common_green\inc\tx_el.h
-..\..\..\..\ports_common_green\inc\tx_ghs.h
+..\inc\tx_el.h
+..\inc\tx_ghs.h
+..\src\tx_el.c
+..\src\tx_ghs.c
+..\src\tx_ghse.c
..\src\tx_thread_context_restore.arm
..\src\tx_thread_context_save.arm
..\src\tx_thread_fiq_context_restore.arm
@@ -219,66 +221,3 @@
..\..\..\..\common\src\txe_timer_deactivate.c
..\..\..\..\common\src\txe_timer_delete.c
..\..\..\..\common\src\txe_timer_info_get.c
-..\..\..\..\ports_common_green\src\tx_el.c
-..\..\..\..\ports_common_green\src\tx_ghs.c
-..\..\..\..\ports_common_green\src\tx_ghse.c
-..\..\..\..\ports_common_green\src\txr_block_allocate.c
-..\..\..\..\ports_common_green\src\txr_block_pool_create.c
-..\..\..\..\ports_common_green\src\txr_block_pool_delete.c
-..\..\..\..\ports_common_green\src\txr_block_pool_info_get.c
-..\..\..\..\ports_common_green\src\txr_block_pool_prioritize.c
-..\..\..\..\ports_common_green\src\txr_block_release.c
-..\..\..\..\ports_common_green\src\txr_byte_allocate.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_create.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_delete.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_info_get.c
-..\..\..\..\ports_common_green\src\txr_byte_pool_prioritize.c
-..\..\..\..\ports_common_green\src\txr_byte_release.c
-..\..\..\..\ports_common_green\src\txr_event_flags_create.c
-..\..\..\..\ports_common_green\src\txr_event_flags_delete.c
-..\..\..\..\ports_common_green\src\txr_event_flags_get.c
-..\..\..\..\ports_common_green\src\txr_event_flags_info_get.c
-..\..\..\..\ports_common_green\src\txr_event_flags_set.c
-..\..\..\..\ports_common_green\src\txr_event_flags_set_notify.c
-..\..\..\..\ports_common_green\src\txr_ghs.c
-..\..\..\..\ports_common_green\src\txr_mutex_create.c
-..\..\..\..\ports_common_green\src\txr_mutex_delete.c
-..\..\..\..\ports_common_green\src\txr_mutex_get.c
-..\..\..\..\ports_common_green\src\txr_mutex_info_get.c
-..\..\..\..\ports_common_green\src\txr_mutex_prioritize.c
-..\..\..\..\ports_common_green\src\txr_mutex_put.c
-..\..\..\..\ports_common_green\src\txr_queue_create.c
-..\..\..\..\ports_common_green\src\txr_queue_delete.c
-..\..\..\..\ports_common_green\src\txr_queue_flush.c
-..\..\..\..\ports_common_green\src\txr_queue_front_send.c
-..\..\..\..\ports_common_green\src\txr_queue_info_get.c
-..\..\..\..\ports_common_green\src\txr_queue_prioritize.c
-..\..\..\..\ports_common_green\src\txr_queue_receive.c
-..\..\..\..\ports_common_green\src\txr_queue_send.c
-..\..\..\..\ports_common_green\src\txr_queue_send_notify.c
-..\..\..\..\ports_common_green\src\txr_semaphore_ceiling_put.c
-..\..\..\..\ports_common_green\src\txr_semaphore_create.c
-..\..\..\..\ports_common_green\src\txr_semaphore_delete.c
-..\..\..\..\ports_common_green\src\txr_semaphore_get.c
-..\..\..\..\ports_common_green\src\txr_semaphore_info_get.c
-..\..\..\..\ports_common_green\src\txr_semaphore_prioritize.c
-..\..\..\..\ports_common_green\src\txr_semaphore_put.c
-..\..\..\..\ports_common_green\src\txr_semaphore_put_notify.c
-..\..\..\..\ports_common_green\src\txr_thread_create.c
-..\..\..\..\ports_common_green\src\txr_thread_delete.c
-..\..\..\..\ports_common_green\src\txr_thread_entry_exit_notify.c
-..\..\..\..\ports_common_green\src\txr_thread_info_get.c
-..\..\..\..\ports_common_green\src\txr_thread_preemption_change.c
-..\..\..\..\ports_common_green\src\txr_thread_priority_change.c
-..\..\..\..\ports_common_green\src\txr_thread_reset.c
-..\..\..\..\ports_common_green\src\txr_thread_resume.c
-..\..\..\..\ports_common_green\src\txr_thread_suspend.c
-..\..\..\..\ports_common_green\src\txr_thread_terminate.c
-..\..\..\..\ports_common_green\src\txr_thread_time_slice_change.c
-..\..\..\..\ports_common_green\src\txr_thread_wait_abort.c
-..\..\..\..\ports_common_green\src\txr_timer_activate.c
-..\..\..\..\ports_common_green\src\txr_timer_change.c
-..\..\..\..\ports_common_green\src\txr_timer_create.c
-..\..\..\..\ports_common_green\src\txr_timer_deactivate.c
-..\..\..\..\ports_common_green\src\txr_timer_delete.c
-..\..\..\..\ports_common_green\src\txr_timer_info_get.c
diff --git a/ports/cortex_a5/ghs/inc/tx_el.h b/ports/cortex_a5/ghs/inc/tx_el.h
new file mode 100644
index 00000000..66cc0d7c
--- /dev/null
+++ b/ports/cortex_a5/ghs/inc/tx_el.h
@@ -0,0 +1,765 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX/GHS Event Log (EL) */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* COMPONENT DEFINITION RELEASE */
+/* */
+/* tx_el.h PORTABLE C/GHS */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file defines the ThreadX event log functions for the GHS MULTI */
+/* EventAnalyzer. It is assumed that tx_api.h and tx_port.h have */
+/* already been included. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_EL_H
+#define TX_EL_H
+
+
+/* Define Event Log specific data definitions. */
+
+#define TX_EL_VERSION_ID 2 /* Event log version ID */
+#define TX_EL_HEADER_SIZE 24 /* Event log header size */
+#define TX_EL_TNIS 16 /* Number of thread names supported */
+ /* If the application needs to */
+ /* track more thread names, just */
+ /* increase this number and re- */
+ /* build the ThreadX library. */
+#define TX_EL_TNI_ENTRY_SIZE 44 /* Thread name entries are 44 bytes */
+#define TX_EL_TNI_NAME_SIZE 34 /* Thread name size in TNI */
+#define TX_EL_NO_MORE_TNI_ROOM 1 /* Error return from thread register*/
+#define TX_EL_NAME_NOT_FOUND 2 /* Error return from un-register */
+#define TX_EL_EVENT_SIZE 32 /* Number of bytes in each event */
+#define TX_EL_VALID_ENTRY 1 /* Valid log entry */
+#define TX_EL_INVALID_ENTRY 0 /* Invalid log entry */
+
+
+/* Define necessary offsets. */
+
+#define TX_EL_TNI_VALID_OFFSET 34
+#define TX_EL_TNI_THREAD_ID_OFFSET 36
+#define TX_EL_TNI_THREAD_PRIORITY_OFF 40
+#define TX_EL_EVENT_TYPE_OFFSET 0
+#define TX_EL_EVENT_SUBTYPE_OFFSET 2
+#define TX_EL_EVENT_TIME_UPPER_OFFSET 4
+#define TX_EL_EVENT_TIME_LOWER_OFFSET 8
+#define TX_EL_EVENT_THREAD_OFFSET 12
+#define TX_EL_EVENT_INFO_1_OFFSET 16
+#define TX_EL_EVENT_INFO_2_OFFSET 20
+#define TX_EL_EVENT_INFO_3_OFFSET 24
+#define TX_EL_EVENT_INFO_4_OFFSET 28
+
+
+/* Undefine constants that might be been defined previously by tx_api.h. */
+
+#undef TX_EL_INITIALIZE
+#undef TX_EL_THREAD_REGISTER
+#undef TX_EL_THREAD_UNREGISTER
+#undef TX_EL_THREAD_STATUS_CHANGE_INSERT
+#undef TX_EL_BYTE_ALLOCATE_INSERT
+#undef TX_EL_BYTE_POOL_CREATE_INSERT
+#undef TX_EL_BYTE_POOL_DELETE_INSERT
+#undef TX_EL_BYTE_RELEASE_INSERT
+#undef TX_EL_BLOCK_ALLOCATE_INSERT
+#undef TX_EL_BLOCK_POOL_CREATE_INSERT
+#undef TX_EL_BLOCK_POOL_DELETE_INSERT
+#undef TX_EL_BLOCK_RELEASE_INSERT
+#undef TX_EL_EVENT_FLAGS_CREATE_INSERT
+#undef TX_EL_EVENT_FLAGS_DELETE_INSERT
+#undef TX_EL_EVENT_FLAGS_GET_INSERT
+#undef TX_EL_EVENT_FLAGS_SET_INSERT
+#undef TX_EL_INTERRUPT_CONTROL_INSERT
+#undef TX_EL_QUEUE_CREATE_INSERT
+#undef TX_EL_QUEUE_DELETE_INSERT
+#undef TX_EL_QUEUE_FLUSH_INSERT
+#undef TX_EL_QUEUE_RECEIVE_INSERT
+#undef TX_EL_QUEUE_SEND_INSERT
+#undef TX_EL_SEMAPHORE_CREATE_INSERT
+#undef TX_EL_SEMAPHORE_DELETE_INSERT
+#undef TX_EL_SEMAPHORE_GET_INSERT
+#undef TX_EL_SEMAPHORE_PUT_INSERT
+#undef TX_EL_THREAD_CREATE_INSERT
+#undef TX_EL_THREAD_DELETE_INSERT
+#undef TX_EL_THREAD_IDENTIFY_INSERT
+#undef TX_EL_THREAD_PREEMPTION_CHANGE_INSERT
+#undef TX_EL_THREAD_PRIORITY_CHANGE_INSERT
+#undef TX_EL_THREAD_RELINQUISH_INSERT
+#undef TX_EL_THREAD_RESUME_INSERT
+#undef TX_EL_THREAD_SLEEP_INSERT
+#undef TX_EL_THREAD_SUSPEND_INSERT
+#undef TX_EL_THREAD_TERMINATE_INSERT
+#undef TX_EL_THREAD_TIME_SLICE_CHANGE_INSERT
+#undef TX_EL_TIME_GET_INSERT
+#undef TX_EL_TIME_SET_INSERT
+#undef TX_EL_TIMER_ACTIVATE_INSERT
+#undef TX_EL_TIMER_CHANGE_INSERT
+#undef TX_EL_TIMER_CREATE_INSERT
+#undef TX_EL_TIMER_DEACTIVATE_INSERT
+#undef TX_EL_TIMER_DELETE_INSERT
+#undef TX_EL_BLOCK_POOL_INFO_GET_INSERT
+#undef TX_EL_BLOCK_POOL_PRIORITIZE_INSERT
+#undef TX_EL_BYTE_POOL_INFO_GET_INSERT
+#undef TX_EL_BYTE_POOL_PRIORITIZE_INSERT
+#undef TX_EL_EVENT_FLAGS_INFO_GET_INSERT
+#undef TX_EL_MUTEX_CREATE_INSERT
+#undef TX_EL_MUTEX_DELETE_INSERT
+#undef TX_EL_MUTEX_GET_INSERT
+#undef TX_EL_MUTEX_INFO_GET_INSERT
+#undef TX_EL_MUTEX_PRIORITIZE_INSERT
+#undef TX_EL_MUTEX_PUT_INSERT
+#undef TX_EL_QUEUE_INFO_GET_INSERT
+#undef TX_EL_QUEUE_FRONT_SEND_INSERT
+#undef TX_EL_QUEUE_PRIORITIZE_INSERT
+#undef TX_EL_SEMAPHORE_INFO_GET_INSERT
+#undef TX_EL_SEMAPHORE_PRIORITIZE_INSERT
+#undef TX_EL_THREAD_INFO_GET_INSERT
+#undef TX_EL_THREAD_WAIT_ABORT_INSERT
+#undef TX_EL_TIMER_INFO_GET_INSERT
+#undef TX_EL_BLOCK_POOL_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_BYTE_POOL_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_EVENT_FLAGS_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_EVENT_FLAGS_SET_NOTIFY_INSERT
+#undef TX_EL_MUTEX_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_MUTEX_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_QUEUE_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_QUEUE_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_QUEUE_SEND_NOTIFY_INSERT
+#undef TX_EL_SEMAPHORE_CEILING_PUT_INSERT
+#undef TX_EL_SEMAPHORE_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_SEMAPHORE_PUT_NOTIFY_INSERT
+#undef TX_EL_THREAD_ENTRY_EXIT_NOTIFY_INSERT
+#undef TX_EL_THREAD_RESET_INSERT
+#undef TX_EL_THREAD_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_THREAD_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+#undef TX_EL_THREAD_STACK_ERROR_NOTIFY_INSERT
+#undef TX_EL_TIMER_PERFORMANCE_INFO_GET_INSERT
+#undef TX_EL_TIMER_PERFORMANCE_SYSTEM_INFO_GET_INSERT
+
+
+/* Define Event Types. */
+
+#define TX_EL_THREAD_CHANGE 1
+#define TX_EL_INTERRUPT 2
+#define TX_EL_THREADX_CALL 3
+#define TX_EL_USER_EVENT 4
+#define TX_EL_THREAD_STATUS_CHANGE 5
+#define TX_EL_REFRESH 6 /* Not implemented */
+#define TX_EL_TIMER 7 /* Not implemented */
+#define TX_EL_TIMESOURCE_DELTA 8 /* Not implemented */
+
+
+/* Define TX_EL_THREADX_CALL event sub-types. */
+
+#define TX_EL_BYTE_ALLOCATE 0
+#define TX_EL_BYTE_POOL_CREATE 1
+#define TX_EL_BYTE_POOL_DELETE 2
+#define TX_EL_BYTE_RELEASE 3
+#define TX_EL_BLOCK_ALLOCATE 4
+#define TX_EL_BLOCK_POOL_CREATE 5
+#define TX_EL_BLOCK_POOL_DELETE 6
+#define TX_EL_BLOCK_RELEASE 7
+#define TX_EL_EVENT_FLAGS_CREATE 8
+#define TX_EL_EVENT_FLAGS_DELETE 9
+#define TX_EL_EVENT_FLAGS_GET 10
+#define TX_EL_EVENT_FLAGS_SET 11
+#define TX_EL_INTERRUPT_CONTROL 12
+#define TX_EL_QUEUE_CREATE 13
+#define TX_EL_QUEUE_DELETE 14
+#define TX_EL_QUEUE_FLUSH 15
+#define TX_EL_QUEUE_RECEIVE 16
+#define TX_EL_QUEUE_SEND 17
+#define TX_EL_SEMAPHORE_CREATE 18
+#define TX_EL_SEMAPHORE_DELETE 19
+#define TX_EL_SEMAPHORE_GET 20
+#define TX_EL_SEMAPHORE_PUT 21
+#define TX_EL_THREAD_CREATE 22
+#define TX_EL_THREAD_DELETE 23
+#define TX_EL_THREAD_IDENTIFY 24
+#define TX_EL_THREAD_PREEMPTION_CHANGE 25
+#define TX_EL_THREAD_PRIORITY_CHANGE 26
+#define TX_EL_THREAD_RELINQUISH 27
+#define TX_EL_THREAD_RESUME 28
+#define TX_EL_THREAD_SLEEP 29
+#define TX_EL_THREAD_SUSPEND 30
+#define TX_EL_THREAD_TERMINATE 31
+#define TX_EL_THREAD_TIME_SLICE_CHANGE 32
+#define TX_EL_TIME_GET 33
+#define TX_EL_TIME_SET 34
+#define TX_EL_TIMER_ACTIVATE 35
+#define TX_EL_TIMER_CHANGE 36
+#define TX_EL_TIMER_CREATE 37
+#define TX_EL_TIMER_DEACTIVATE 38
+#define TX_EL_TIMER_DELETE 39
+#define TX_EL_BLOCK_POOL_INFO_GET 40
+#define TX_EL_BLOCK_POOL_PRIORITIZE 41
+#define TX_EL_BYTE_POOL_INFO_GET 42
+#define TX_EL_BYTE_POOL_PRIORITIZE 43
+#define TX_EL_EVENT_FLAGS_INFO_GET 44
+#define TX_EL_MUTEX_CREATE 45
+#define TX_EL_MUTEX_DELETE 46
+#define TX_EL_MUTEX_GET 47
+#define TX_EL_MUTEX_INFO_GET 48
+#define TX_EL_MUTEX_PRIORITIZE 49
+#define TX_EL_MUTEX_PUT 50
+#define TX_EL_QUEUE_INFO_GET 51
+#define TX_EL_QUEUE_FRONT_SEND 52
+#define TX_EL_QUEUE_PRIORITIZE 53
+#define TX_EL_SEMAPHORE_INFO_GET 54
+#define TX_EL_SEMAPHORE_PRIORITIZE 55
+#define TX_EL_THREAD_INFO_GET 56
+#define TX_EL_THREAD_WAIT_ABORT 57
+#define TX_EL_TIMER_INFO_GET 58
+#define TX_EL_BLOCK_POOL_PERFORMANCE_INFO_GET 59
+#define TX_EL_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET 60
+#define TX_EL_BYTE_POOL_PERFORMANCE_INFO_GET 61
+#define TX_EL_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET 62
+#define TX_EL_EVENT_FLAGS_PERFORMANCE_INFO_GET 63
+#define TX_EL_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET 64
+#define TX_EL_EVENT_FLAGS_SET_NOTIFY 65
+#define TX_EL_MUTEX_PERFORMANCE_INFO_GET 66
+#define TX_EL_MUTEX_PERFORMANCE_SYSTEM_INFO_GET 67
+#define TX_EL_QUEUE_PERFORMANCE_INFO_GET 68
+#define TX_EL_QUEUE_PERFORMANCE_SYSTEM_INFO_GET 69
+#define TX_EL_QUEUE_SEND_NOTIFY 70
+#define TX_EL_SEMAPHORE_CEILING_PUT 71
+#define TX_EL_SEMAPHORE_PERFORMANCE_INFO_GET 72
+#define TX_EL_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET 73
+#define TX_EL_SEMAPHORE_PUT_NOTIFY 74
+#define TX_EL_THREAD_ENTRY_EXIT_NOTIFY 75
+#define TX_EL_THREAD_RESET 76
+#define TX_EL_THREAD_PERFORMANCE_INFO_GET 77
+#define TX_EL_THREAD_PERFORMANCE_SYSTEM_INFO_GET 78
+#define TX_EL_THREAD_STACK_ERROR_NOTIFY 79
+#define TX_EL_TIMER_PERFORMANCE_INFO_GET 80
+#define TX_EL_TIMER_PERFORMANCE_SYSTEM_INFO_GET 81
+
+
+/* Define ThreadX sub-types. */
+
+#define TX_EL_INTERRUPT_SUB_TYPE 1
+#define TX_EL_END_OF_INTERRUPT 3
+
+
+/* Define event logging filters, which may be used by the application program to
+ dynamically enable/disable events in run-time. */
+
+#define TX_EL_FILTER_STATUS_CHANGE 0x0001
+#define TX_EL_FILTER_INTERRUPTS 0x0002
+#define TX_EL_FILTER_THREAD_CALLS 0x0004
+#define TX_EL_FILTER_TIMER_CALLS 0x0008
+#define TX_EL_FILTER_EVENT_FLAG_CALLS 0x0010
+#define TX_EL_FILTER_SEMAPHORE_CALLS 0x0020
+#define TX_EL_FILTER_QUEUE_CALLS 0x0040
+#define TX_EL_FILTER_BLOCK_CALLS 0x0080
+#define TX_EL_FILTER_BYTE_CALLS 0x0100
+#define TX_EL_FILTER_MUTEX_CALLS 0x0200
+#define TX_EL_FILTER_ALL_EVENTS 0xFFFF
+#define TX_EL_ENABLE_ALL_EVENTS 0x0000
+
+
+/* Define filter macros that are inserted in-line with the other macros below. */
+
+#ifdef TX_ENABLE_EVENT_FILTERS
+#define TX_EL_NO_STATUS_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_STATUS_CHANGE)) {
+#define TX_EL_NO_INTERRUPT_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_INTERRUPTS)) {
+#define TX_EL_NO_THREAD_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_THREAD_CALLS)) {
+#define TX_EL_NO_TIMER_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_TIMER_CALLS)) {
+#define TX_EL_NO_EVENT_FLAG_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_EVENT_FLAG_CALLS)) {
+#define TX_EL_NO_SEMAPHORE_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_SEMAPHORE_CALLS)) {
+#define TX_EL_NO_QUEUE_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_QUEUE_CALLS)) {
+#define TX_EL_NO_BLOCK_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_BLOCK_CALLS)) {
+#define TX_EL_NO_BYTE_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_BYTE_CALLS)) {
+#define TX_EL_NO_MUTEX_EVENTS if (!(_tx_el_event_filter & TX_EL_FILTER_MUTEX_CALLS)) {
+#define TX_EL_END_FILTER }
+#else
+#define TX_EL_NO_STATUS_EVENTS
+#define TX_EL_NO_INTERRUPT_EVENTS
+#define TX_EL_NO_THREAD_EVENTS
+#define TX_EL_NO_TIMER_EVENTS
+#define TX_EL_NO_EVENT_FLAG_EVENTS
+#define TX_EL_NO_SEMAPHORE_EVENTS
+#define TX_EL_NO_QUEUE_EVENTS
+#define TX_EL_NO_BLOCK_EVENTS
+#define TX_EL_NO_BYTE_EVENTS
+#define TX_EL_NO_MUTEX_EVENTS
+#define TX_EL_END_FILTER
+#endif
+
+/* Define externs and constants for non-event log source modules. This is for
+ the in-line macros below. */
+
+#ifndef TX_EL_SOURCE_CODE
+extern UCHAR *_tx_el_tni_start;
+extern UCHAR **_tx_el_current_event;
+extern UCHAR *_tx_el_event_area_start;
+extern UCHAR *_tx_el_event_area_end;
+extern UINT _tx_el_maximum_events;
+extern ULONG _tx_el_total_events;
+extern TX_THREAD *_tx_thread_current_ptr;
+extern UINT _tx_el_event_filter;
+extern ULONG _tx_el_time_base_upper;
+extern ULONG _tx_el_time_base_lower;
+
+
+/* Define macros for event logging functions. */
+
+#define TX_EL_THREAD_CREATE_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(TX_EL_THREAD_CREATE, thread_ptr, stack_start, stack_size, priority); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_SET_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_EVENT_FLAGS_SET, group_ptr, flags_to_set, set_option); TX_EL_END_FILTER
+#define TX_EL_THREAD_DELETE_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_DELETE, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_INFO_GET_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_INFO_GET, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_TIME_SLICE_CHANGE_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_THREAD_TIME_SLICE_CHANGE, thread_ptr, thread_ptr -> tx_thread_new_time_slice, new_time_slice); TX_EL_END_FILTER
+#define TX_EL_THREAD_TERMINATE_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_TERMINATE, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_SLEEP_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_SLEEP, timer_ticks); TX_EL_END_FILTER
+#define TX_EL_THREAD_SUSPEND_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_SUSPEND, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_RELINQUISH_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_THREAD_RELINQUISH); TX_EL_END_FILTER
+#define TX_EL_THREAD_RESUME_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_RESUME, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_PRIORITY_CHANGE_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_THREAD_PRIORITY_CHANGE, thread_ptr, thread_ptr -> tx_thread_priority, new_priority); TX_EL_END_FILTER
+#define TX_EL_THREAD_PREEMPTION_CHANGE_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_THREAD_PREEMPTION_CHANGE, thread_ptr, thread_ptr -> tx_thread_preempt_threshold, new_threshold); TX_EL_END_FILTER
+#define TX_EL_THREAD_WAIT_ABORT_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_WAIT_ABORT, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_ENTRY_EXIT_NOTIFY_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_THREAD_ENTRY_EXIT_NOTIFY, thread_ptr, thread_entry_exit_notify); TX_EL_END_FILTER
+#define TX_EL_THREAD_RESET_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_RESET, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_PERFORMANCE_INFO_GET, thread_ptr); TX_EL_END_FILTER
+#define TX_EL_THREAD_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_THREAD_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_THREAD_STACK_ERROR_NOTIFY_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_THREAD_STACK_ERROR_NOTIFY, stack_error_handler); TX_EL_END_FILTER
+#define TX_EL_TIME_SET_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIME_SET, new_time); TX_EL_END_FILTER
+#define TX_EL_TIME_GET_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIME_GET, _tx_timer_system_clock); TX_EL_END_FILTER
+#define TX_EL_TIMER_DELETE_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIMER_DELETE, timer_ptr); TX_EL_END_FILTER
+#define TX_EL_TIMER_CREATE_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(TX_EL_TIMER_CREATE, timer_ptr, initial_ticks, reschedule_ticks, auto_activate); TX_EL_END_FILTER
+#define TX_EL_TIMER_CHANGE_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_TIMER_CHANGE, timer_ptr, initial_ticks, reschedule_ticks); TX_EL_END_FILTER
+#define TX_EL_THREAD_IDENTIFY_INSERT TX_EL_NO_THREAD_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_THREAD_IDENTIFY); TX_EL_END_FILTER
+#define TX_EL_TIMER_DEACTIVATE_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIMER_DEACTIVATE, timer_ptr); TX_EL_END_FILTER
+#define TX_EL_TIMER_ACTIVATE_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIMER_ACTIVATE, timer_ptr); TX_EL_END_FILTER
+#define TX_EL_TIMER_INFO_GET_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIMER_INFO_GET, timer_ptr); TX_EL_END_FILTER
+#define TX_EL_TIMER_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_TIMER_PERFORMANCE_INFO_GET, timer_ptr); TX_EL_END_FILTER
+#define TX_EL_TIMER_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_TIMER_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_TIMER_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_PUT_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_SEMAPHORE_PUT, semaphore_ptr, semaphore_ptr -> tx_semaphore_count); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_GET_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_SEMAPHORE_GET, semaphore_ptr, semaphore_ptr -> tx_semaphore_count); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_DELETE_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_SEMAPHORE_DELETE, semaphore_ptr); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_CREATE_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_SEMAPHORE_CREATE, semaphore_ptr, initial_count); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_INFO_GET_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_SEMAPHORE_INFO_GET, semaphore_ptr); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_PRIORITIZE_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_SEMAPHORE_PRIORITIZE, semaphore_ptr); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_CEILING_PUT_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_SEMAPHORE_CEILING_PUT, semaphore_ptr, semaphore_ptr -> tx_semaphore_count, ceiling); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_SEMAPHORE_PERFORMANCE_INFO_GET, semaphore_ptr); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_SEMAPHORE_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_SEMAPHORE_PUT_NOTIFY_INSERT TX_EL_NO_SEMAPHORE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_SEMAPHORE_PUT_NOTIFY, semaphore_ptr, semaphore_put_notify); TX_EL_END_FILTER
+#define TX_EL_QUEUE_FRONT_SEND_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_QUEUE_FRONT_SEND, queue_ptr, source_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_SEND_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_QUEUE_SEND, queue_ptr, source_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_RECEIVE_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_QUEUE_RECEIVE, queue_ptr, destination_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_FLUSH_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_QUEUE_FLUSH, queue_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_DELETE_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_QUEUE_DELETE, queue_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_CREATE_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(TX_EL_QUEUE_CREATE, queue_ptr, queue_start, queue_size, message_size); TX_EL_END_FILTER
+#define TX_EL_QUEUE_INFO_GET_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_QUEUE_INFO_GET, queue_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_PRIORITIZE_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_QUEUE_PRIORITIZE, queue_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_QUEUE_PERFORMANCE_INFO_GET, queue_ptr); TX_EL_END_FILTER
+#define TX_EL_QUEUE_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_QUEUE_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_QUEUE_SEND_NOTIFY_INSERT TX_EL_NO_QUEUE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_QUEUE_SEND_NOTIFY, queue_ptr, queue_send_notify); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_GET_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_EVENT_FLAGS_GET, group_ptr, requested_flags, get_option); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_DELETE_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_EVENT_FLAGS_DELETE, group_ptr); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_CREATE_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_EVENT_FLAGS_CREATE, group_ptr); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_INFO_GET_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_EVENT_FLAGS_INFO_GET, group_ptr); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_EVENT_FLAGS_PERFORMANCE_INFO_GET, group_ptr); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_EVENT_FLAGS_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_EVENT_FLAGS_SET_NOTIFY_INSERT TX_EL_NO_EVENT_FLAG_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_EVENT_FLAGS_SET_NOTIFY, group_ptr, events_set_notify); TX_EL_END_FILTER
+#define TX_EL_BYTE_RELEASE_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_BYTE_RELEASE, pool_ptr, memory_ptr); TX_EL_END_FILTER
+#define TX_EL_BYTE_POOL_DELETE_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BYTE_POOL_DELETE, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BYTE_POOL_CREATE_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_BYTE_POOL_CREATE, pool_ptr, pool_start, pool_size); TX_EL_END_FILTER
+#define TX_EL_BYTE_POOL_INFO_GET_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BYTE_POOL_INFO_GET, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BYTE_POOL_PRIORITIZE_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BYTE_POOL_PRIORITIZE, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BYTE_ALLOCATE_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_BYTE_ALLOCATE, pool_ptr, memory_ptr, memory_size); TX_EL_END_FILTER
+#define TX_EL_BYTE_POOL_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BYTE_POOL_PERFORMANCE_INFO_GET, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_BYTE_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_BYTE_POOL_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_BLOCK_RELEASE_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_BLOCK_RELEASE, pool_ptr, block_ptr); TX_EL_END_FILTER
+#define TX_EL_BLOCK_POOL_DELETE_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BLOCK_POOL_DELETE, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BLOCK_POOL_CREATE_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(TX_EL_BLOCK_POOL_CREATE, pool_ptr, pool_start, pool_size, block_size); TX_EL_END_FILTER
+#define TX_EL_BLOCK_POOL_INFO_GET_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BLOCK_POOL_INFO_GET, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BLOCK_POOL_PRIORITIZE_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BLOCK_POOL_PRIORITIZE, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BLOCK_ALLOCATE_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_BLOCK_ALLOCATE, pool_ptr, block_ptr); TX_EL_END_FILTER
+#define TX_EL_BLOCK_POOL_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_BLOCK_POOL_PERFORMANCE_INFO_GET, pool_ptr); TX_EL_END_FILTER
+#define TX_EL_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_BLOCK_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_BLOCK_POOL_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+#define TX_EL_MUTEX_CREATE_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_MUTEX_CREATE, mutex_ptr, inherit); TX_EL_END_FILTER
+#define TX_EL_MUTEX_DELETE_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_MUTEX_DELETE, mutex_ptr); TX_EL_END_FILTER
+#define TX_EL_MUTEX_GET_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_MUTEX_GET, mutex_ptr, mutex_ptr -> tx_mutex_owner, mutex_ptr -> tx_mutex_ownership_count); TX_EL_END_FILTER
+#define TX_EL_MUTEX_INFO_GET_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_MUTEX_INFO_GET, mutex_ptr); TX_EL_END_FILTER
+#define TX_EL_MUTEX_PRIORITIZE_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_MUTEX_PRIORITIZE, mutex_ptr); TX_EL_END_FILTER
+#define TX_EL_MUTEX_PUT_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(TX_EL_MUTEX_PUT, mutex_ptr, mutex_ptr -> tx_mutex_owner, mutex_ptr -> tx_mutex_ownership_count); TX_EL_END_FILTER
+#define TX_EL_MUTEX_PERFORMANCE_INFO_GET_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(TX_EL_MUTEX_PERFORMANCE_INFO_GET, mutex_ptr); TX_EL_END_FILTER
+#define TX_EL_MUTEX_PERFORMANCE_SYSTEM_INFO_GET_INSERT TX_EL_NO_MUTEX_EVENTS TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(TX_EL_MUTEX_PERFORMANCE_SYSTEM_INFO_GET); TX_EL_END_FILTER
+
+
+#endif
+
+
+/* Define Event Log function prototypes. */
+
+VOID _tx_el_initialize(VOID);
+UINT _tx_el_thread_register(TX_THREAD *thread_ptr);
+UINT _tx_el_thread_unregister(TX_THREAD *thread_ptr);
+VOID _tx_el_user_event_insert(UINT sub_type, ULONG info_1, ULONG info_2,
+ ULONG info_3, ULONG info_4);
+VOID _tx_el_thread_running(TX_THREAD *thread_ptr);
+VOID _tx_el_thread_preempted(TX_THREAD *thread_ptr);
+VOID _tx_el_interrupt(UINT interrupt_number);
+VOID _tx_el_interrupt_end(UINT interrupt_number);
+VOID _tx_el_interrupt_control_call(void);
+VOID _tx_el_event_log_on(void);
+VOID _tx_el_event_log_off(void);
+VOID _tx_el_event_filter_set(UINT filter);
+
+
+/* Define macros that are used inside the ThreadX source code.
+ If event logging is disabled, these macros will be defined
+ as white space. */
+
+#ifdef TX_ENABLE_EVENT_LOGGING
+#ifndef TX_NO_EVENT_INFO
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(a, b, c, d, e) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =\
+ (ULONG) b;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_2_OFFSET)) =\
+ (ULONG) c;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_3_OFFSET)) =\
+ (ULONG) d;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_4_OFFSET)) =\
+ (ULONG) e;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(a, b, c, d) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =\
+ (ULONG) b;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_2_OFFSET)) =\
+ (ULONG) c;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_3_OFFSET)) =\
+ (ULONG) d;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(a, b, c) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =\
+ (ULONG) b;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_2_OFFSET)) =\
+ (ULONG) c;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(a, b) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =\
+ (ULONG) b;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(a) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_THREAD_STATUS_CHANGE_INSERT(a, b) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ TX_EL_NO_STATUS_EVENTS \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREAD_STATUS_CHANGE; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) b; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) a;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ TX_EL_END_FILTER \
+ }
+#define TX_EL_THREAD_REGISTER(a) \
+ _tx_el_thread_register(a);
+#define TX_EL_THREAD_UNREGISTER(a) \
+ _tx_el_thread_unregister(a);
+#define TX_EL_INITIALIZE _tx_el_initialize();
+#else
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(a, b, c, d, e) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(a, b, c, d) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(a, b, c) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(a, b) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(a) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREADX_CALL; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) a; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) _tx_thread_current_ptr;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ }
+#define TX_EL_THREAD_STATUS_CHANGE_INSERT(a, b) \
+ { \
+ UCHAR *entry_ptr; \
+ ULONG upper_tbu; \
+ TX_EL_NO_STATUS_EVENTS \
+ entry_ptr = *_tx_el_current_event; \
+ *((unsigned short *) entry_ptr) = TX_EL_THREAD_STATUS_CHANGE; \
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) = (unsigned short) b; \
+ do { \
+ upper_tbu = read_tbu(); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) = upper_tbu; \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =\
+ (ULONG) read_tbl();\
+ } while (upper_tbu != read_tbu()); \
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =\
+ (ULONG) a;\
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;\
+ if (entry_ptr >= _tx_el_event_area_end) \
+ {\
+ entry_ptr = _tx_el_event_area_start;\
+ }\
+ *_tx_el_current_event = entry_ptr;\
+ TX_EL_END_FILTER \
+ }
+#define TX_EL_THREAD_REGISTER(a) \
+ _tx_el_thread_register(a);
+#define TX_EL_THREAD_UNREGISTER(a) \
+ _tx_el_thread_unregister(a);
+#define TX_EL_INITIALIZE _tx_el_initialize();
+#endif
+#else
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO4(a, b, c, d, e)
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO3(a, b, c, d)
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(a, b, c)
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO1(a, b)
+#define TX_EL_KERNEL_CALL_EVENT_INSERT_INFO0(a)
+#define TX_EL_THREAD_STATUS_CHANGE_INSERT(a, b)
+#define TX_EL_THREAD_REGISTER(a)
+#define TX_EL_THREAD_UNREGISTER(a)
+#define TX_EL_INITIALIZE
+#endif
+
+#endif
+
diff --git a/ports/cortex_a5/ghs/inc/tx_ghs.h b/ports/cortex_a5/ghs/inc/tx_ghs.h
new file mode 100644
index 00000000..ca976916
--- /dev/null
+++ b/ports/cortex_a5/ghs/inc/tx_ghs.h
@@ -0,0 +1,77 @@
+/*
+ * ThreadX C/C++ Library Support
+ *
+ * Copyright 1983-2019 Green Hills Software LLC.
+ *
+ * This program is the property of Green Hills Software LLC.,
+ * its contents are proprietary information and no part of it
+ * is to be disclosed to anyone except employees of Green Hills
+ * Software LLC., or as agreed in writing signed by the President
+ * of Green Hills Software LLC.
+ */
+
+#ifndef _TX_GHS_H_
+#define _TX_GHS_H_
+
+#include
+#include
+#include
+#include
+
+#if defined(__ghs) && (__GHS_VERSION_NUMBER >= 500)
+extern void *__ghs_GetThreadLocalStorageItem(int specifier);
+
+/* Thread-local storage routines for Green Hills releases 5.x and beyond.
+ The following specifiers are used when calling
+ __ghs_GetThreadLocalStorageItem.
+
+ If __ghs_GetThreadLocalStorageItem is customized to
+ return a per-thread errno value, define the preprocessor symbol
+ USE_THREAD_LOCAL_ERRNO in ind_errn.c.
+ */
+
+enum __ghs_ThreadLocalStorage_specifier {
+ __ghs_TLS_asctime_buff,
+ __ghs_TLS_tmpnam_space,
+ __ghs_TLS_strtok_saved_pos,
+ __ghs_TLS_Errno,
+ __ghs_TLS_gmtime_temp,
+ __ghs_TLS___eh_globals,
+ __ghs_TLS_SignalHandlers
+};
+#else
+/* Thread-local storage routines for Green Hills releases 4.x and 3.x . */
+typedef void (*SignalHandler)(int);
+
+typedef struct
+{
+ int Errno; /* errno. */
+ SignalHandler SignalHandlers[_SIGMAX]; /* signal() buffer. */
+ char tmpnam_space[L_tmpnam]; /* tmpnam(NULL) buffer. */
+ char asctime_buff[30]; /* . */
+ char *strtok_saved_pos; /* strtok() position. */
+ struct tm gmtime_temp; /* gmtime() and localtime() buffer. */
+ void *__eh_globals; /* Pointer for C++ exception handling. */
+} ThreadLocalStorage;
+
+ThreadLocalStorage *GetThreadLocalStorage(void);
+#endif
+
+
+void __ghsLock(void);
+void __ghsUnlock(void);
+
+int __ghs_SaveSignalContext(jmp_buf);
+void __ghs_RestoreSignalContext(jmp_buf);
+
+/* prototypes for FILE lock routines. */
+void __ghs_flock_file(void *);
+void __ghs_funlock_file(void *);
+int __ghs_ftrylock_file(void *);
+void __ghs_flock_create(void **);
+void __ghs_flock_destroy(void *);
+
+/* prototype for GHS/ThreadX error shell checking. */
+void __ghs_rnerr(char *errMsg, int stackLevels, int stackTraceDisplay, void *hexVal);
+
+#endif /* _TX_GHS_H_ */
diff --git a/ports/cortex_a5/green/inc/tx_port.h b/ports/cortex_a5/ghs/inc/tx_port.h
similarity index 91%
rename from ports/cortex_a5/green/inc/tx_port.h
rename to ports/cortex_a5/ghs/inc/tx_port.h
index 83153e97..b50dcfdc 100644
--- a/ports/cortex_a5/green/inc/tx_port.h
+++ b/ports/cortex_a5/ghs/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,36 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A5/Green Hills */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h Cortex-A5/GHS */
+/* 6.1.10 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
-/* resulting in version 6.1.6 */
+/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -63,7 +63,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -78,7 +78,7 @@
#include "tx_ghs.h"
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -114,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -134,13 +134,13 @@ typedef unsigned short USHORT;
/* Define the number of ticks per second. This informs the EventAnalyzer what the timestamps
represent. By default, this is set to 1,000,000 i.e., one tick every microsecond. */
-#define TX_EL_TICKS_PER_SECOND 1000000
+#define TX_EL_TICKS_PER_SECOND 1000000
/* Define the method of how to get the upper and lower 32-bits of the time stamp. By default, simply
- simulate the time-stamp source with a counter. */
+ simulate the time-stamp source with a counter. */
-#define read_tbu() _tx_el_time_base_upper
-#define read_tbl() ++_tx_el_time_base_lower
+#define read_tbu() _tx_el_time_base_upper
+#define read_tbl() ++_tx_el_time_base_lower
/* Define the port specific options for the _tx_build_options variable. This variable indicates
@@ -174,7 +174,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -186,16 +186,16 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable; \
VOID * tx_thread_eh_globals; \
int Errno; /* errno. */ \
char * strtok_saved_pos; /* strtok() position. */
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -209,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -243,7 +243,7 @@ typedef unsigned short USHORT;
extern void __tx_cpp_exception_cleanup(TX_THREAD *thread_ptr); \
__tx_cpp_exception_cleanup(thread_ptr); \
}
-#else
+#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) \
{ \
#pragma weak __cpp_exception_cleanup \
@@ -281,18 +281,18 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
b = __CLZ32(m); \
- b = 31 - b;
+ b = 31 - b;
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -302,7 +302,7 @@ typedef unsigned short USHORT;
unsigned int _tx_thread_interrupt_disable(void);
void _tx_thread_interrupt_restore(unsigned int new_posture);
-#define TX_INTERRUPT_SAVE_AREA register INT interrupt_save;
+#define TX_INTERRUPT_SAVE_AREA register int interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
@@ -310,7 +310,7 @@ void _tx_thread_interrupt_restore(unsigned int new_po
#else
-#define TX_INTERRUPT_SAVE_AREA register INT interrupt_save;
+#define TX_INTERRUPT_SAVE_AREA register int interrupt_save;
#if defined(__GHS_VERSION_NUMBER) && (__GHS_VERSION_NUMBER >= 350)
@@ -349,7 +349,7 @@ asm int disable_ints(void)
MSR CPSR_c,r1
#else
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if
+ CPSID if
#else
CPSID i
#endif
@@ -395,7 +395,7 @@ void tx_thread_vfp_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A5/Green Hills Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A5/Green Hills Version 6.1.10 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a5/green/readme_threadx.txt b/ports/cortex_a5/ghs/readme_threadx.txt
similarity index 100%
rename from ports/cortex_a5/green/readme_threadx.txt
rename to ports/cortex_a5/ghs/readme_threadx.txt
diff --git a/ports/cortex_a5/ghs/src/tx_el.c b/ports/cortex_a5/ghs/src/tx_el.c
new file mode 100644
index 00000000..d8f056d7
--- /dev/null
+++ b/ports/cortex_a5/ghs/src/tx_el.c
@@ -0,0 +1,1165 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX/GHS Event Log (EL) */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+#define TX_EL_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_el.h"
+#include "string.h"
+
+
+/* Define global variables used to manage the event pool. */
+
+UCHAR *_tx_el_tni_start;
+UCHAR **_tx_el_current_event;
+UCHAR *_tx_el_event_area_start;
+UCHAR *_tx_el_event_area_end;
+UINT _tx_el_maximum_events;
+ULONG _tx_el_total_events;
+UINT _tx_el_event_filter;
+ULONG _tx_el_time_base_upper;
+ULONG _tx_el_time_base_lower;
+
+extern char __ghsbegin_eventlog[];
+extern char __ghsend_eventlog[];
+
+extern TX_THREAD *_tx_thread_current_ptr;
+UINT _tx_thread_interrupt_control(UINT new_posture);
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_initialize PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function creates the Event Log (in the format dictated by the */
+/* GHS Event Analyzer) and sets up various information for subsequent */
+/* operation. The start and end of the Event Log is determined by the */
+/* .eventlog section in the linker control file. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_initialize(VOID)
+{
+
+UCHAR *work_ptr;
+UCHAR *read_ptr;
+ULONG event_log_size;
+UCHAR *end_ptr;
+UINT i;
+
+
+ /* Clear total event counter. */
+ _tx_el_total_events = 0;
+
+ /* Clear event filter. */
+ _tx_el_event_filter = 0;
+
+ /* First, pickup the starting and ending address of the Event Log memory. */
+ work_ptr = (unsigned char *) __ghsbegin_eventlog;
+ end_ptr = (unsigned char *) __ghsend_eventlog;
+
+ /* Calculate the event log size. */
+ event_log_size = end_ptr - work_ptr;
+
+ /* Subtract off the number of bytes in the header and the TNI area. */
+ event_log_size = event_log_size - (TX_EL_HEADER_SIZE +
+ (TX_EL_TNI_ENTRY_SIZE * TX_EL_TNIS));
+
+ /* Make sure the event log is evenly divisible by the event size. */
+ event_log_size = (event_log_size/TX_EL_EVENT_SIZE) * TX_EL_EVENT_SIZE;
+
+ /* Build the Event Log header. */
+
+ /* Setup the Event Log Version ID. */
+ *((unsigned short *) work_ptr) = (unsigned short) TX_EL_VERSION_ID;
+ work_ptr = work_ptr + sizeof(unsigned short);
+
+ /* Setup the TNIS (number of thread names) field. */
+ *((unsigned short *) work_ptr) = (unsigned short) TX_EL_TNIS;
+ work_ptr = work_ptr + sizeof(unsigned short);
+
+ /* Setup the EVPS (event pool size) field. */
+ *((ULONG *) work_ptr) = event_log_size;
+ work_ptr = work_ptr + sizeof(ULONG);
+
+ /* Remember the maximum number of events. */
+ _tx_el_maximum_events = event_log_size/TX_EL_EVENT_SIZE;
+
+ /* Setup max_events field. */
+ *((ULONG *) work_ptr) = _tx_el_maximum_events;
+ work_ptr = work_ptr + sizeof(ULONG);
+
+ /* Setup the evploc (location of event pool). */
+ *((ULONG *) work_ptr) = (ULONG) (((ULONG) __ghsbegin_eventlog) + TX_EL_HEADER_SIZE +
+ (TX_EL_TNIS * TX_EL_TNI_ENTRY_SIZE));
+ work_ptr = work_ptr + sizeof(ULONG);
+
+ /* Save the current event pointer. */
+ _tx_el_current_event = (UCHAR **) work_ptr;
+
+ /* Setup event_ptr (pointer to oldest event) field to the start
+ of the event pool. */
+ *_tx_el_current_event = (UCHAR *) (((ULONG) __ghsbegin_eventlog) + TX_EL_HEADER_SIZE +
+ (TX_EL_TNIS * TX_EL_TNI_ENTRY_SIZE));
+ work_ptr = work_ptr + sizeof(ULONG);
+
+ /* Setup tbfreq (the number of ticks in a second) field. */
+ *((ULONG *) work_ptr) = TX_EL_TICKS_PER_SECOND;
+ work_ptr = work_ptr + sizeof(ULONG);
+
+ /* At this point we are pointing at the Thread Name Information (TNI) array. */
+
+ /* Remember the start of this for future updates. */
+ _tx_el_tni_start = work_ptr;
+
+ /* Clear the entire TNI array, this is the initial setting. */
+ end_ptr = work_ptr + (TX_EL_TNIS * TX_EL_TNI_ENTRY_SIZE);
+ memset((void *)work_ptr, 0, (TX_EL_TNIS * TX_EL_TNI_ENTRY_SIZE));
+ work_ptr = end_ptr;
+
+ /* At this point, we are pointing at the actual Event Entry area. */
+
+ /* Remember the start of the actual event log area. */
+ _tx_el_event_area_start = work_ptr;
+
+ /* Clear the entire Event area. */
+ end_ptr = work_ptr + event_log_size;
+ memset((void *)work_ptr, 0, event_log_size);
+ work_ptr = end_ptr;
+
+ /* Save the end pointer for later use. */
+ _tx_el_event_area_end = work_ptr;
+
+ /* Setup an entry to resolve all activities from initialization and from
+ an idle system. */
+ work_ptr = _tx_el_tni_start;
+ read_ptr = (UCHAR *) "Initialization/System Idle";
+ i = 0;
+ while ((i < TX_EL_TNI_NAME_SIZE) && (*read_ptr))
+ {
+
+ /* Copy a character of thread's name into TNI area of log. */
+ *work_ptr++ = *read_ptr++;
+
+ /* Increment the character count. */
+ i++;
+ }
+
+ /* Determine if a NULL needs to be inserted. */
+ if (i < TX_EL_TNI_NAME_SIZE)
+ {
+
+ /* Yes, insert a NULL into the event log string. */
+ *work_ptr = (unsigned char) 0;
+ }
+
+ /* Setup the thread ID to NULL. */
+ *((ULONG *) (_tx_el_tni_start + TX_EL_TNI_THREAD_ID_OFFSET)) = (ULONG) TX_NULL;
+
+ /* Set the valid field to indicate the entry is complete. */
+ *((UCHAR *) (_tx_el_tni_start + TX_EL_TNI_VALID_OFFSET)) = (ULONG) TX_EL_VALID_ENTRY;
+
+ /* Clear the time base global variables. */
+ _tx_el_time_base_upper = 0;
+ _tx_el_time_base_lower = 0;
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_thread_register PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function registers a thread in the event log for future */
+/* display purposes. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_SUCCESS Thread was placed in TNI area */
+/* TX_ERROR No more room in the TNI area */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create ThreadX thread create function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+UINT _tx_el_thread_register(TX_THREAD *thread_ptr)
+{
+
+UCHAR *entry_ptr;
+UCHAR *work_ptr;
+UCHAR *read_ptr;
+UINT i;
+
+
+ /* First of all, search for a free slot in the TNI area. */
+ entry_ptr = _tx_el_tni_start;
+ i = 0;
+ while (i < TX_EL_TNIS)
+ {
+
+ /* Determine if this entry is available. */
+ if (*(entry_ptr + TX_EL_TNI_VALID_OFFSET) == TX_EL_INVALID_ENTRY)
+ break;
+
+ /* Otherwise, increment the associated pointers and indices. */
+ i++;
+ entry_ptr = entry_ptr + TX_EL_TNI_ENTRY_SIZE;
+ }
+
+ /* Check to see if there were no more valid entries. */
+ if (i >= TX_EL_TNIS)
+ return(TX_EL_NO_MORE_TNI_ROOM);
+
+ /* Otherwise, we have room in the TNI and a valid record. */
+
+ /* Setup the thread's name. */
+ work_ptr = entry_ptr;
+ read_ptr = (UCHAR *) thread_ptr -> tx_thread_name;
+ i = 0;
+ while ((i < TX_EL_TNI_NAME_SIZE) && (*read_ptr))
+ {
+
+ /* Copy a character of thread's name into TNI area of log. */
+ *work_ptr++ = *read_ptr++;
+
+ /* Increment the character count. */
+ i++;
+ }
+
+ /* Determine if a NULL needs to be inserted. */
+ if (i < TX_EL_TNI_NAME_SIZE)
+ {
+
+ /* Yes, insert a NULL into the event log string. */
+ *work_ptr = (unsigned char) 0;
+ }
+
+ /* Setup the thread ID. */
+ *((ULONG *) (entry_ptr + TX_EL_TNI_THREAD_ID_OFFSET)) = (ULONG) thread_ptr;
+
+ /* Setup the thread priority. */
+ *((ULONG *) (entry_ptr + TX_EL_TNI_THREAD_PRIORITY_OFF)) = (ULONG) thread_ptr -> tx_thread_priority;
+
+ /* Set the valid field to indicate the entry is complete. */
+ *((UCHAR *) (entry_ptr + TX_EL_TNI_VALID_OFFSET)) = (ULONG) TX_EL_VALID_ENTRY;
+
+ /* Thread name has been registered. */
+ return(TX_SUCCESS);
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_thread_unregister PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function unregisters a thread in the event log for future */
+/* display purposes. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control block */
+/* */
+/* OUTPUT */
+/* */
+/* TX_SUCCESS Thread was placed in TNI area */
+/* TX_ERROR No more room in the TNI area */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create ThreadX thread create function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+UINT _tx_el_thread_unregister(TX_THREAD *thread_ptr)
+{
+
+UCHAR *entry_ptr;
+UCHAR *work_ptr;
+UCHAR *read_ptr;
+UINT found;
+UINT i, j;
+
+
+ /* First of all, search for a match in the TNI area. */
+ entry_ptr = _tx_el_tni_start;
+ i = 0;
+ while (i < TX_EL_TNIS)
+ {
+
+ /* Determine if this entry is a match. */
+ work_ptr = entry_ptr;
+ read_ptr = (UCHAR *) thread_ptr -> tx_thread_name;
+ found = TX_TRUE;
+ j = 0;
+ do
+ {
+
+ /* Determine if this character is the same. */
+ if (*work_ptr != *read_ptr)
+ {
+
+ /* Set found to false and fall out of the loop. */
+ found = TX_FALSE;
+ break;
+ }
+ else if (*work_ptr == 0)
+ {
+
+ /* Null terminated, just break the loop. */
+ break;
+ }
+ else
+ {
+
+ /* Copy a character of thread's name into TNI area of log. */
+ *work_ptr++ = *read_ptr++;
+ }
+
+ /* Increment the character count. */
+ j++;
+
+ } while(j < TX_EL_TNIS);
+
+
+ /* Was a match found? */
+ if (found)
+ {
+
+ /* Yes, mark the entry as available now. */
+ *(entry_ptr + TX_EL_TNI_VALID_OFFSET) = TX_EL_INVALID_ENTRY;
+
+ /* Get out of the loop! */
+ break;
+ }
+
+ /* Otherwise, increment the associated pointers and indices. */
+ i++;
+ entry_ptr = entry_ptr + TX_EL_TNI_ENTRY_SIZE;
+ }
+
+ /* Determine status to return. */
+ if (found)
+ return(TX_SUCCESS);
+ else
+ return(TX_EL_NAME_NOT_FOUND);
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_user_event_insert PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function inserts a user event into the event log. */
+/* If the event log is full, the oldest event is overwritten. */
+/* */
+/* INPUT */
+/* */
+/* sub_type Event subtype for kernel call */
+/* info_1 First information field */
+/* info_2 Second information field */
+/* info_3 Third information field */
+/* info_4 Fourth information field */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX services */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_user_event_insert(UINT sub_type, ULONG info_1, ULONG info_2,
+ ULONG info_3, ULONG info_4)
+{
+
+TX_INTERRUPT_SAVE_AREA
+
+UINT upper_tb;
+UCHAR *entry_ptr;
+
+ /* Disable interrupts. */
+ TX_DISABLE
+
+ /* Increment total event counter. */
+ _tx_el_total_events++;
+
+ /* Setup working entry pointer first. */
+ entry_ptr = *_tx_el_current_event;
+
+ /* Store the event type. */
+ *((unsigned short *) entry_ptr) = (unsigned short) TX_EL_USER_EVENT;
+
+ /* Store the event subtype. */
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) =
+ (unsigned short) sub_type;
+
+ /* Get time stamp. */
+ do
+ {
+
+ /* Pickup the upper tb. */
+ upper_tb = (ULONG) read_tbu();
+
+ /* Store the upper time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) =
+ (ULONG) upper_tb;
+
+ /* Store the lower time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =
+ (ULONG) read_tbl();
+ } while (upper_tb != (ULONG) read_tbu());
+
+ /* Store the current thread. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =
+ (ULONG) _tx_thread_current_ptr;
+
+ /* Store the first info field. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =
+ (ULONG) info_1;
+
+ /* Store the second info field. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_2_OFFSET)) =
+ (ULONG) info_2;
+
+ /* Store the third info field. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_3_OFFSET)) =
+ (ULONG) info_3;
+
+ /* Store the fourth info field. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_4_OFFSET)) =
+ (ULONG) info_4;
+
+ /* Now move the current event log pointer. */
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;
+
+ /* Check for a wraparound condition. */
+ if (entry_ptr >= _tx_el_event_area_end)
+ {
+
+ /* Yes, we have wrapped around to the end of the event area.
+ Start back at the top! */
+ entry_ptr = _tx_el_event_area_start;
+ }
+
+ /* Write the entry pointer back into the header. */
+ *_tx_el_current_event = entry_ptr;
+
+ /* Restore interrupts. */
+ TX_RESTORE
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_thread_running PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function inserts a thread change event into the event */
+/* log, which indicates that a context switch is taking place. */
+/* If the event log is full, the oldest event is overwritten. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread being */
+/* scheduled */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_schedule ThreadX scheduler */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_thread_running(TX_THREAD *thread_ptr)
+{
+
+UINT upper_tb;
+UCHAR *entry_ptr;
+
+ TX_EL_NO_STATUS_EVENTS
+
+ /* Increment total event counter. */
+ _tx_el_total_events++;
+
+ /* Setup working entry pointer first. */
+ entry_ptr = *_tx_el_current_event;
+
+ /* Store the event type. */
+ *((unsigned short *) entry_ptr) = (unsigned short) TX_EL_THREAD_CHANGE;
+
+ /* Store the event subtype. */
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) =
+ (unsigned short) 0;
+
+ /* Get time stamp. */
+ do
+ {
+
+ /* Pickup the upper tb. */
+ upper_tb = (ULONG) read_tbu();
+
+ /* Store the upper time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) =
+ (ULONG) upper_tb;
+
+ /* Store the lower time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =
+ (ULONG) read_tbl();
+ } while (upper_tb != (ULONG) read_tbu());
+
+ /* Store the current thread. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =
+ (ULONG) thread_ptr;
+
+ /* Now move the current event log pointer. */
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;
+
+ /* Check for a wraparound condition. */
+ if (entry_ptr >= _tx_el_event_area_end)
+ {
+
+ /* Yes, we have wrapped around to the end of the event area.
+ Start back at the top! */
+ entry_ptr = _tx_el_event_area_start;
+ }
+
+ /* Write the entry pointer back into the header. */
+ *_tx_el_current_event = entry_ptr;
+
+ TX_EL_END_FILTER
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_thread_preempted PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function inserts a thread preempted event into the event */
+/* log, which indicates that an interrupt occurred that made a higher */
+/* priority thread ready for execution. In this case, the previously */
+/* executing thread has an event entered to indicate it is no longer */
+/* running. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread being */
+/* scheduled */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_context_restore ThreadX context restore */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_thread_preempted(TX_THREAD *thread_ptr)
+{
+
+UINT upper_tb;
+UCHAR *entry_ptr;
+
+
+ TX_EL_NO_STATUS_EVENTS
+
+ /* Increment total event counter. */
+ _tx_el_total_events++;
+
+ /* Setup working entry pointer first. */
+ entry_ptr = *_tx_el_current_event;
+
+ /* Store the event type. */
+ *((unsigned short *) entry_ptr) = (unsigned short) TX_EL_THREAD_STATUS_CHANGE;
+
+ /* Store the event subtype. */
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) =
+ (unsigned short) TX_READY;
+
+ /* Get time stamp. */
+ do
+ {
+
+ /* Pickup the upper tb. */
+ upper_tb = (ULONG) read_tbu();
+
+ /* Store the upper time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) =
+ (ULONG) upper_tb;
+
+ /* Store the lower time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =
+ (ULONG) read_tbl();
+ } while (upper_tb != (ULONG) read_tbu());
+
+ /* Store the current thread. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =
+ (ULONG) _tx_thread_current_ptr;
+
+ /* Now move the current event log pointer. */
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;
+
+ /* Check for a wraparound condition. */
+ if (entry_ptr >= _tx_el_event_area_end)
+ {
+
+ /* Yes, we have wrapped around to the end of the event area.
+ Start back at the top! */
+ entry_ptr = _tx_el_event_area_start;
+ }
+
+ /* Write the entry pointer back into the header. */
+ *_tx_el_current_event = entry_ptr;
+
+ TX_EL_END_FILTER
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_interrupt PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function inserts an interrupt event into the log, which */
+/* indicates the start of interrupt processing for the specific */
+/* */
+/* INPUT */
+/* */
+/* interrupt_number Interrupt number supplied by */
+/* ISR */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISR processing */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_interrupt(UINT interrupt_number)
+{
+
+UINT upper_tb;
+UCHAR *entry_ptr;
+
+
+ TX_EL_NO_INTERRUPT_EVENTS
+
+ /* Increment total event counter. */
+ _tx_el_total_events++;
+
+ /* Setup working entry pointer first. */
+ entry_ptr = *_tx_el_current_event;
+
+ /* Store the event type. */
+ *((unsigned short *) entry_ptr) = (unsigned short) TX_EL_INTERRUPT;
+
+ /* Store the event subtype. */
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) =
+ (unsigned short) TX_EL_INTERRUPT_SUB_TYPE;
+
+ /* Get time stamp. */
+ do
+ {
+
+ /* Pickup the upper tb. */
+ upper_tb = (ULONG) read_tbu();
+
+ /* Store the upper time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) =
+ (ULONG) upper_tb;
+
+ /* Store the lower time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =
+ (ULONG) read_tbl();
+ } while (upper_tb != (ULONG) read_tbu());
+
+ /* Store the current thread. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =
+ (ULONG) _tx_thread_current_ptr;
+
+ /* Store the first info word. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =
+ (ULONG) interrupt_number;
+
+ /* Now move the current event log pointer. */
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;
+
+ /* Check for a wraparound condition. */
+ if (entry_ptr >= _tx_el_event_area_end)
+ {
+
+ /* Yes, we have wrapped around to the end of the event area.
+ Start back at the top! */
+ entry_ptr = _tx_el_event_area_start;
+ }
+
+ /* Write the entry pointer back into the header. */
+ *_tx_el_current_event = entry_ptr;
+
+ TX_EL_END_FILTER
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_interrupt_end PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function inserts an interrupt end event into the log, which */
+/* indicates the end of interrupt processing for the specific */
+/* */
+/* INPUT */
+/* */
+/* interrupt_number Interrupt number supplied by */
+/* ISR */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISR processing */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_interrupt_end(UINT interrupt_number)
+{
+
+UINT upper_tb;
+UCHAR *entry_ptr;
+
+
+ TX_EL_NO_INTERRUPT_EVENTS
+
+ /* Increment total event counter. */
+ _tx_el_total_events++;
+
+ /* Setup working entry pointer first. */
+ entry_ptr = *_tx_el_current_event;
+
+ /* Store the event type. */
+ *((unsigned short *) entry_ptr) = (unsigned short) TX_EL_INTERRUPT;
+
+ /* Store the event subtype. */
+ *((unsigned short *) (entry_ptr + TX_EL_EVENT_SUBTYPE_OFFSET)) =
+ (unsigned short) TX_EL_END_OF_INTERRUPT;
+
+ /* Get time stamp. */
+ do
+ {
+
+ /* Pickup the upper tb. */
+ upper_tb = (ULONG) read_tbu();
+
+ /* Store the upper time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) =
+ (ULONG) upper_tb;
+
+ /* Store the lower time stamp. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) =
+ (ULONG) read_tbl();
+ } while (upper_tb != (ULONG) read_tbu());
+
+ /* Store the current thread. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_THREAD_OFFSET)) =
+ (ULONG) _tx_thread_current_ptr;
+
+ /* Store the first info word. */
+ *((ULONG *) (entry_ptr + TX_EL_EVENT_INFO_1_OFFSET)) =
+ (ULONG) interrupt_number;
+
+ /* Now move the current event log pointer. */
+ entry_ptr = entry_ptr + TX_EL_EVENT_SIZE;
+
+ /* Check for a wraparound condition. */
+ if (entry_ptr >= _tx_el_event_area_end)
+ {
+
+ /* Yes, we have wrapped around to the end of the event area.
+ Start back at the top! */
+ entry_ptr = _tx_el_event_area_start;
+ }
+
+ /* Write the entry pointer back into the header. */
+ *_tx_el_current_event = entry_ptr;
+
+ TX_EL_END_FILTER
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_interrupt_control PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function remaps the tx_interrupt_control service call so that */
+/* it can be tracked in the event log. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt posture */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_interrupt_control Interrupt control service */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX services */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+UINT _tx_el_interrupt_control(UINT new_posture)
+{
+
+TX_INTERRUPT_SAVE_AREA
+UINT old_posture;
+
+
+ TX_EL_NO_INTERRUPT_EVENTS
+
+ TX_DISABLE
+ TX_EL_KERNEL_CALL_EVENT_INSERT_INFO2(TX_EL_INTERRUPT_CONTROL, _tx_thread_current_ptr, new_posture)
+ TX_RESTORE
+
+ TX_EL_END_FILTER
+
+ old_posture = _tx_thread_interrupt_control(new_posture);
+ return(old_posture);
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_event_log_on PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function disables all event filters. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_event_log_on(void)
+{
+
+ /* Disable all event filters. */
+ _tx_el_event_filter = TX_EL_ENABLE_ALL_EVENTS;
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_event_log_off PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function sets all event filters, thereby turning event */
+/* logging off. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_event_log_off(void)
+{
+
+ /* Set all event filters. */
+ _tx_el_event_filter = TX_EL_FILTER_ALL_EVENTS;
+}
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_el_event_log_set PORTABLE C */
+/* 6.1 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function sets the events filters specified by the user. */
+/* */
+/* INPUT */
+/* */
+/* filter Events to filter */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+VOID _tx_el_event_filter_set(UINT filter)
+{
+
+ /* Apply the user event filter. */
+ _tx_el_event_filter = filter;
+}
+
diff --git a/ports/cortex_a5/ghs/src/tx_ghs.c b/ports/cortex_a5/ghs/src/tx_ghs.c
new file mode 100644
index 00000000..30b8054e
--- /dev/null
+++ b/ports/cortex_a5/ghs/src/tx_ghs.c
@@ -0,0 +1,485 @@
+/*
+ * ThreadX C/C++ Library Support
+ *
+ * Copyright 1983-2019 Green Hills Software LLC.
+ *
+ * This program is the property of Green Hills Software LLC.,
+ * its contents are proprietary information and no part of it
+ * is to be disclosed to anyone except employees of Green Hills
+ * Software LLC., or as agreed in writing signed by the President
+ * of Green Hills Software LLC.
+ */
+
+#include "tx_ghs.h"
+#ifndef TX_DISABLE_ERROR_CHECKING
+#define TX_DISABLE_ERROR_CHECKING
+#endif
+#include "tx_api.h"
+#include
+#include
+
+/* Allow these routines to access the following ThreadX global variables. */
+extern ULONG _tx_thread_created_count;
+extern TX_THREAD *_tx_thread_created_ptr;
+extern TX_THREAD *_tx_thread_current_ptr;
+
+#if defined(__ghs) && (__GHS_VERSION_NUMBER >= 500)
+/* Thread-local storage routines for Green Hills releases 5.x and above. */
+/*
+ Thread-Local (Per-Thread) Library Data Retrieval
+ ================================================
+
+ __ghs_ThreadLocalStorage_specifier defines all library data items
+ that the Green Hills libraries allow to be allocated per-thread.
+
+ An implementation can choose which of these data items to allocate
+ for each thread. For example, an implementation may choose to
+ allocate an errno value for each thread, but not the strtok_saved_pos
+ pointer. The application could then use strtok_r instead of strtok for
+ correct operation.
+
+ To add per-thread library data, define one of the
+ TX_THREAD_EXTENSION_* macros in tx_port.h to include the data item
+ or items in each thread control block TX_THREAD.
+
+ If C++ with exceptions is being used, the __eh_globals entry must be
+ allocated for each thread. This is typically done by default using
+ TX_THREAD_EXTENSION_1 in tx_port.h.
+
+ If __ghs_GetThreadLocalStorageItem is customized to return a
+ per-thread errno value, you should also:
+
+ * Customize the System Library for your project
+ * Define the preprocessor symbol USE_THREAD_LOCAL_ERRNO in
+ src/libsys/ind_errn.c
+
+ If you customize the System Library, you should remove ind_thrd.c
+ from the libsys.gpj subproject.
+
+ */
+
+/* Provide global __eh_globals value to support C++ exception handling
+ outside a thread context. This name also forces this module to be
+ included in the linked program instead of the ind_thrd.o module from
+ the System Library libsys.a.
+ */
+static void *__eh_globals;
+
+#pragma ghs startnomisra
+void *__ghs_GetThreadLocalStorageItem(int specifier)
+{
+ void *ptlsitem = (void *)0;
+ switch (specifier) {
+ case (int)__ghs_TLS_Errno:
+ /* Set ptslsitem to the address of the per-thread errno value.
+ The per-thread errno value should have the type int.
+
+ If returning a per-thread errno value, follow the steps
+ above.
+
+ This item is used by numerous library functions.
+ */
+ break;
+ case (int)__ghs_TLS_SignalHandlers:
+ /* Set ptslsitem to the address of the per-thread SignalHandlers
+ array. The per-thread SignalHandlers array should have the
+ array type as in the following declaration:
+ SignalHandler SignalHandlers[_SIGMAX];
+ The SignalHandler type and _SIGMAX constant are defined in
+ ind_thrd.h.
+
+ This item is used by the library functions signal() and
+ raise().
+ */
+ break;
+ case (int)__ghs_TLS_asctime_buff:
+ /* Set ptslsitem to the address of the per-thread asctime_buff
+ array. The per-thread asctime_buff array should have the
+ array type as in the following declaration:
+ char asctime_buff[30];
+
+ This item is used by the library functions asctime() and
+ ctime(). The library provides asctime_r() and ctime_r(),
+ inherently thread-safe versions of these functions.
+ */
+ break;
+ case (int)__ghs_TLS_tmpnam_space:
+ /* Set ptslsitem to the address of the per-thread tmpnam_space
+ array. The per-thread tmpnam_space array should have the
+ array type as in the following declaration:
+ char tmpnam_space[L_tmpnam];
+ The constant is defined in
+
+ This item is used by the library function tmpnam() when
+ passed NULL. The library provides tmpnam_r(), an
+ inherently thread-safe version of tmpnam().
+ */
+ break;
+ case (int)__ghs_TLS_strtok_saved_pos:
+ /* Set ptslsitem to the address of the per-thread
+ strtok_saved_pos pointer. The per-thread strtok_saved_pos
+ pointer should have the type "char *".
+
+ This item is used by the library function strtok().
+ The library provides strtok_r(), an inherently thread-safe
+ version of strtok().
+ */
+ break;
+ case (int)__ghs_TLS_gmtime_temp:
+ /* Set ptslsitem to the address of the per-thread gmtime_temp
+ value. The per-thread gmtime_temp value should have the
+ type "struct tm" defined in time.h, included by indos.h.
+
+ This item is used by the library functions gmtime() and
+ localtime(). The library provides gmtime_r() and
+ localtime_r(), inherently thread-safe versions of these
+ functions.
+ */
+ break;
+ case (int)__ghs_TLS___eh_globals:
+ /* Set ptslsitem to the address of the per-thread __eh_globals
+ value. The per-thread __eh_globals value should have the
+ type "void *".
+
+ This item is used by C++ exception handling.
+ */
+ if (_tx_thread_current_ptr)
+ ptlsitem = (void *)&(_tx_thread_current_ptr->tx_thread_eh_globals);
+ else
+ /* Use the global __eh_globals pointer. */
+ ptlsitem = (void *)&__eh_globals;
+ break;
+ }
+ return ptlsitem;
+}
+#pragma ghs endnomisra
+#else
+/* Thread-local storage routines for Green Hills releases 4.x and 3.x . */
+
+/*
+ * ThreadX C and C++ thread-safe library support routines.
+ *
+ * This implementation merely tries to guarantee thread safety within
+ * individual C library calls such as malloc() and free(), but it does
+ * not attempt to solve the problems associated with the following
+ * multithreaded issues:
+ *
+ * 1. Use of errno. This can be made thread-safe by adding errno
+ * to TX_THREAD_PORT_EXTENSION and using that within a modified
+ * version of libsys/ind_errno.c.
+ *
+ * 2. Thread safety ACROSS library calls. Certain C library calls either
+ * return pointers to statically-allocated data structures or maintain
+ * state across calls. These include strtok(), asctime(), gmtime(),
+ * tmpnam(NULL), signal(). To make such C library routines thread-safe
+ * would require adding a ThreadLocalStorage struct to the thread control
+ * block TX_THREAD. Since relatively few applications make use of these
+ * library routines, the implementation provided here uses a single, global
+ * ThreadLocalStorage data structure rather than greatly increasing the size
+ * of the thread control block TX_THREAD.
+ *
+ * The ThreadX global variable _tx_thread_current_ptr points to the
+ * current thread's control block TX_THREAD. If a ThreadLocalStorage struct
+ * called tx_tls is placed in TX_THREAD, the function GetThreadLocalStorage
+ * should be modified to return &(_tx_thread_current_ptr->tx_tls).
+ */
+
+static ThreadLocalStorage GlobalTLS;
+
+ThreadLocalStorage *GetThreadLocalStorage()
+{
+ return &GlobalTLS;
+}
+#endif
+
+/*
+ * Use a global ThreadX mutex to implement thread safety within C and C++
+ * library routines.
+ *
+ */
+TX_MUTEX __ghLockMutex;
+
+/*
+ * Acquire general lock. Blocks until the lock becomes available.
+ * Use tx_mutex_get to implement __ghsLock
+ */
+void __ghsLock(void)
+{
+ tx_mutex_get(&__ghLockMutex, TX_WAIT_FOREVER);
+}
+
+/*
+ * Release general lock
+ * Use tx_mutex_put to implement __ghsUnlock
+ */
+void __ghsUnlock(void)
+{
+ tx_mutex_put(&__ghLockMutex);
+}
+
+/* ThreadX Initialization function prototype. */
+void _tx_initialize_kernel_setup(void);
+
+void __gh_lock_init(void)
+{
+ /* Initialize the low-level portions of ThreadX. */
+ _tx_initialize_kernel_setup();
+
+ /* Create the global thread lock mutex. */
+ tx_mutex_create(&__ghLockMutex, "__ghLockMutex", TX_NO_INHERIT);
+}
+
+/*
+ Saving State Across setjmp() Calls
+ ==================================
+
+ These routines can be used to save and restore arbitrary state
+ across calls to setjmp() and longjmp().
+*/
+int __ghs_SaveSignalContext(jmp_buf jmpbuf)
+{
+ return 0;
+}
+
+/* Restore arbitrary state across a longjmp() */
+void __ghs_RestoreSignalContext(jmp_buf jmpbuf)
+{
+}
+
+#if defined(__GHS_VERSION_NUMBER) && (__GHS_VERSION_NUMBER < 560)
+/*
+ C++ Exception Handling
+ ======================
+
+ These routines allow C++ exceptions to be used in multiple threads.
+ The default implementation uses __ghs_GetThreadLocalStorageItem
+ to return a thread-specific __eh_globals pointer.
+
+*/
+
+/* Must be called after __cpp_exception_init() is called to allocate
+ * and initialize the per-thread exception handling structure */
+void *__get_eh_globals(void)
+{
+#if defined(__ghs) && (__GHS_VERSION_NUMBER >= 500)
+ return *(void **)__ghs_GetThreadLocalStorageItem(__ghs_TLS___eh_globals);
+#else
+ if (_tx_thread_current_ptr)
+
+ /* Return thread-specific __eh_globals pointer. */
+ return _tx_thread_current_ptr->tx_thread_eh_globals;
+ else
+ /* Return the global __eh_globals pointer. */
+ return GlobalTLS.__eh_globals;
+#endif
+}
+#endif
+
+#if defined(__ghs) && (__GHS_VERSION_NUMBER >= 500)
+#pragma weak __cpp_exception_init
+extern void __cpp_exception_init(void **);
+#pragma weak __cpp_exception_cleanup
+extern void __cpp_exception_cleanup(void **);
+
+/* __tx_cpp_exception_init retrieves the eh_globals field from
+ thread-local storage and calls __cpp_exception_init.
+ */
+void __tx_cpp_exception_init(TX_THREAD *thread_ptr) {
+ void **peh_globals;
+ if(__cpp_exception_init) {
+ if (thread_ptr)
+ peh_globals = &(thread_ptr->tx_thread_eh_globals);
+ else
+ /* Use the global __eh_globals pointer. */
+ peh_globals = &__eh_globals;
+ __cpp_exception_init(peh_globals);
+ }
+}
+
+/* __tx_cpp_exception_cleanup retrieves the eh_globals field from
+ thread-local storage and calls __cpp_exception_cleanup.
+ */
+void __tx_cpp_exception_cleanup(TX_THREAD *thread_ptr) {
+ void **peh_globals;
+ if(__cpp_exception_cleanup) {
+ if (thread_ptr)
+ peh_globals = &(thread_ptr->tx_thread_eh_globals);
+ else
+ /* Use the global __eh_globals pointer. */
+ peh_globals = &__eh_globals;
+ __cpp_exception_cleanup(peh_globals);
+ }
+}
+
+/* __ghs_cpp_exception_init is called from ind_crt1.o to initialize
+ exceptions for the global context.
+ */
+void __ghs_cpp_exception_init() {
+ __tx_cpp_exception_init((void *)0);
+}
+
+/* __ghs_cpp_exception_cleanup is called from ind_exit.o to clean up
+ exceptions for the global context.
+ */
+void __ghs_cpp_exception_cleanup(TX_THREAD *thread_ptr) {
+ __tx_cpp_exception_cleanup((void *)0);
+}
+#endif
+
+
+/*
+ File Locks
+ ======================
+
+ These routines can be customized to implement per-file locks to allow
+ thread-safe I/O.
+
+*/
+
+/* Acquire lock for FILE *addr */
+void __ghs_flock_file(void *addr)
+{
+ tx_mutex_get((TX_MUTEX *)addr, TX_WAIT_FOREVER);
+}
+
+/* Release lock for FILE *addr */
+void __ghs_funlock_file(void *addr)
+{
+ tx_mutex_put((TX_MUTEX *)addr);
+}
+
+/* Non blocking acquire lock for FILE *addr. May return -1 if */
+/* not implemented. Returns 0 on success and nonzero otherwise. */
+int __ghs_ftrylock_file(void *addr)
+{
+ return -1;
+}
+
+/* Calls to initialize local lock data structures before they */
+/* are used. */
+void __ghs_flock_create(void **addr)
+{
+ *addr = (void *)(&__ghLockMutex);
+}
+void __ghs_flock_destroy(void *addr) {}
+
+
+/*
+ * ThreadX Peak Stack Checking support routines.
+ *
+ * All of these routines are called by MULTI's ThreadX-aware debugging
+ * package to determine the peak stack use for one thread or for all threads.
+ *
+ * These routines are included in this file in order to guarantee that they will
+ * be available while debugging with MULTI. These routines are not referenced by
+ * any other part of the ThreadX system.
+ *
+ * _txs_thread_stack_check: return the peak stack usage for a thread.
+ *
+ * _txs_thread_stack_check_2: store the peak stack usage for all threads
+ * in the tx_thread_stack_size field of each thread
+ * control block, TX_THREAD. This routine takes
+ * advantage of the redundancy within the TX_THREAD
+ * structure since tx_thread_stack_size can be computed
+ * from the tx_thread_stack_start and tx_thread_stack_end
+ * fields of TX_THREAD.
+ *
+ * _txs_thread_stack_check_2_fixup: clean up from the _txs_thread_stack_check_2
+ * call by computing the stack size for each
+ * thread and storing the result in the
+ * tx_thread_stack_size field of each thread control
+ * block TX_THREAD.
+ *
+ * These three routines do not support architectures such as i960 or StarCore
+ * where the stack grows up instead of down.
+ *
+ */
+#ifndef TX_DISABLE_STACK_CHECKING
+
+ULONG _txs_thread_stack_check(TX_THREAD *thread_ptr)
+{
+ CHAR *cp; /* Pointer inside thread's stack. */
+
+ /* Search through the thread's stack to find the highest address modified. */
+ for ( cp = (CHAR *)thread_ptr->tx_thread_stack_start;
+ cp <= (CHAR *)thread_ptr->tx_thread_stack_end; ++cp ) {
+
+ /* Check if this byte in the stack contains something other than TX_STACK_FILL. */
+ if (*cp != (char)TX_STACK_FILL) {
+
+ /* Assume cp points to the locating marking the peak stack use.
+ Return the number of bytes from cp up to and including the
+ end of the stack. */
+ return (((ULONG)thread_ptr->tx_thread_stack_end) - (ULONG)cp + 1);
+ }
+ }
+ return thread_ptr->tx_thread_stack_size;
+}
+
+
+int _txs_thread_stack_check_2(void) {
+ CHAR * cp; /* Pointer inside thread's stack. */
+ TX_THREAD * tp; /* Pointer to each thread. */
+
+ /* If no threads are created, return immediately. */
+ if (!_tx_thread_created_count)
+ return 0;
+
+ /* Start iterating through the threads in the system. Assume that we always
+ have at least one thread (the system timer thread) in the system. */
+ tp = _tx_thread_created_ptr;
+
+ do {
+
+ /* Search through the thread's stack to find the highest address modified. */
+ for ( cp = (CHAR *)tp->tx_thread_stack_start; cp <= (CHAR *)tp->tx_thread_stack_end;
+ ++cp ) {
+
+ /* Check if this byte in the stack contains something other than TX_STACK_FILL. */
+ if (*cp != (char)TX_STACK_FILL) {
+
+ /* Assume cp points to the locating marking the peak stack use.
+ Store the number of bytes from cp up to and including the
+ end of the stack in the tx_thread_stack_size field. */
+ tp->tx_thread_stack_size = ((ULONG)tp->tx_thread_stack_end) - (ULONG)cp + 1;
+ break;
+ }
+
+ }
+
+ /* Continue with the next thread. */
+ tp = tp->tx_thread_created_next;
+
+ /* Loop until we point to the first thread again. */
+ } while ( tp != _tx_thread_created_ptr );
+
+ return 0;
+}
+
+int _txs_thread_stack_check_2_fixup(void) {
+ TX_THREAD * tp; /* Pointer to each thread. */
+
+ /* If no threads are created, return immediately. */
+ if (!_tx_thread_created_count)
+ return 0;
+
+ /* Start iterating through the threads in the system. Assume that we always
+ have at least one thread (the system timer thread) in the system. */
+ tp = _tx_thread_created_ptr;
+
+ do {
+
+ /* Compute the tx_thread_stack_size field by using the tx_thread_stack_end and
+ tx_thread_stack_start fields. */
+ tp->tx_thread_stack_size = (ULONG)tp->tx_thread_stack_end-(ULONG)tp->tx_thread_stack_start+1;
+
+ /* Continue with the next thread. */
+ tp = tp->tx_thread_created_next;
+
+ /* Loop until we point to the first thread again. */
+ } while ( tp != _tx_thread_created_ptr );
+
+ return 0;
+}
+
+#endif /* TX_DISABLE_STACK_CHECKING */
diff --git a/ports/cortex_a5/ghs/src/tx_ghse.c b/ports/cortex_a5/ghs/src/tx_ghse.c
new file mode 100644
index 00000000..6369df77
--- /dev/null
+++ b/ports/cortex_a5/ghs/src/tx_ghse.c
@@ -0,0 +1,49 @@
+/*
+ * ThreadX C++ Library Support
+ *
+ * Copyright 1983-2019 Green Hills Software LLC.
+ *
+ * This program is the property of Green Hills Software LLC.,
+ * its contents are proprietary information and no part of it
+ * is to be disclosed to anyone except employees of Green Hills
+ * Software LLC., or as agreed in writing signed by the President
+ * of Green Hills Software LLC.
+ */
+#include "tx_ghs.h"
+#ifndef TX_DISABLE_ERROR_CHECKING
+#define TX_DISABLE_ERROR_CHECKING
+#endif
+#include "tx_api.h"
+
+/*
+ C++ Exception Handling
+ ======================
+
+ These routines allow C++ exceptions to be used in multiple threads.
+ The default implementation uses __ghs_GetThreadLocalStorageItem
+ to return a thread-specific __eh_globals pointer.
+
+*/
+
+#if defined(__ghs) && (__GHS_VERSION_NUMBER >= 560)
+#ifdef _WIN32
+/* Windows uses a different linker, so include a stub routine, never called,
+ to pull in __cpp_exception_init and __cpp_exception_cleanup */
+extern void __cpp_exception_init(void **);
+extern void __cpp_exception_cleanup(void **);
+void __tx_win32_pull_in_exceptions(void) {
+ __cpp_exception_init(0);
+ __cpp_exception_cleanup(0);
+}
+#else
+#pragma ghs reference __cpp_exception_init
+#pragma ghs reference __cpp_exception_cleanup
+#endif
+
+/* Must be called after __cpp_exception_init() is called to allocate
+ * and initialize the per-thread exception handling structure */
+void *__get_eh_globals(void)
+{
+ return *(void **)__ghs_GetThreadLocalStorageItem(__ghs_TLS___eh_globals);
+}
+#endif
diff --git a/ports/cortex_a5/green/src/tx_thread_context_restore.arm b/ports/cortex_a5/ghs/src/tx_thread_context_restore.arm
similarity index 94%
rename from ports/cortex_a5/green/src/tx_thread_context_restore.arm
rename to ports/cortex_a5/ghs/src/tx_thread_context_restore.arm
index 72a63fa9..d97dd1d2 100644
--- a/ports/cortex_a5/green/src/tx_thread_context_restore.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_context_restore.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -41,47 +41,44 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_context_restore Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function restores the interrupt context if it is processing a */
-/* nested interrupt. If not, it returns to the interrupt thread if no */
-/* preemption is necessary. Otherwise, if preemption is necessary or */
-/* if no thread was running, the function returns to the scheduler. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling routine */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs Interrupt Service Routines */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_context_restore(VOID)
@@ -117,13 +114,13 @@ _tx_thread_context_restore:
LDR r3, =_tx_thread_system_state # Pickup address of system state var
LDR r2, [r3] # Pickup system state
SUB r2, r2, 1 # Decrement the counter
- STR r2, [r3] # Store the counter
+ STR r2, [r3] # Store the counter
CMP r2, 0 # Was this the first interrupt?
BEQ __tx_thread_not_nested_restore # If so, not a nested restore
/* Interrupts are nested. */
- /* Just recover the saved registers and return to the point of
+ /* Just recover the saved registers and return to the point of
interrupt. */
LDMIA sp!, {r0, r10, r12, lr} # Recover SPSR, POI, and scratch regs
@@ -135,7 +132,7 @@ _tx_thread_context_restore:
__tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
- /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
|| (_tx_thread_preempt_disable))
{ */
@@ -225,7 +222,7 @@ _tx_skip_irq_vfp_save:
/* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
_tx_timer_time_slice = 0; */
-
+
STR r2, [r0, 24] # Save thread's time-slice
MOV r2, 0 # Clear value
STR r2, [r3] # Disable global time-slice flag
diff --git a/ports/cortex_a5/green/src/tx_thread_context_save.arm b/ports/cortex_a5/ghs/src/tx_thread_context_save.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_context_save.arm
rename to ports/cortex_a5/ghs/src/tx_thread_context_save.arm
index dd728a21..9eee5620 100644
--- a/ports/cortex_a5/green/src/tx_thread_context_save.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_context_save.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -39,46 +39,43 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_context_save Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function saves the context of an executing thread in the */
-/* beginning of interrupt processing. The function also ensures that */
-/* the system stack is used upon return to the calling ISR. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_context_save(VOID)
@@ -93,7 +90,7 @@ _tx_thread_context_save:
/* if (_tx_thread_system_state++)
{ */
- STMDB sp!, {r0-r3} # Save some working registers
+ STMDB sp!, {r0-r3} # Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
#ifdef TX_BEFORE_ARMV6
@@ -119,7 +116,7 @@ _tx_thread_context_save:
calling ISR. */
MRS r0, SPSR # Pickup saved SPSR
- SUB lr, lr, 4 # Adjust point of interrupt
+ SUB lr, lr, 4 # Adjust point of interrupt
STMDB sp!, {r0, r10, r12, lr} # Store other registers
/* Return to the ISR. */
@@ -135,7 +132,7 @@ _tx_thread_context_save:
POP {lr} # Recover ISR lr
#endif
- B __tx_irq_processing_return # Continue IRQ processing
+ B __tx_irq_processing_return # Continue IRQ processing
__tx_thread_not_nested_save:
/* } */
@@ -149,13 +146,13 @@ __tx_thread_not_nested_save:
LDR r1, =_tx_thread_current_ptr # Pickup address of current thread ptr
LDR r0, [r1] # Pickup current thread pointer
CMP r0, 0 # Is it NULL?
- BEQ __tx_thread_idle_system_save # If so, interrupt occurred in
+ BEQ __tx_thread_idle_system_save # If so, interrupt occurred in
/* # scheduling loop - nothing needs saving! */
/* Save minimal context of interrupted thread. */
MRS r2, SPSR # Pickup saved SPSR
- SUB lr, lr, 4 # Adjust point of interrupt
+ SUB lr, lr, 4 # Adjust point of interrupt
STMDB sp!, {r2, r10, r12, lr} # Store other registers
/* Save the current stack pointer in the thread's control block. */
@@ -175,7 +172,7 @@ __tx_thread_not_nested_save:
POP {lr} # Recover ISR lr
#endif
- B __tx_irq_processing_return # Continue IRQ processing
+ B __tx_irq_processing_return # Continue IRQ processing
/* }
else
@@ -185,7 +182,7 @@ __tx_thread_idle_system_save:
/* Interrupt occurred in the scheduling loop. */
- /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
processing. */
MOV r10, 0 # Clear stack limit
@@ -200,7 +197,7 @@ __tx_thread_idle_system_save:
#endif
ADD sp, sp, 16 # Recover saved registers
- B __tx_irq_processing_return # Continue IRQ processing
+ B __tx_irq_processing_return # Continue IRQ processing
.type _tx_thread_context_save,$function
.size _tx_thread_context_save,.-_tx_thread_context_save
diff --git a/ports/cortex_a5/green/src/tx_thread_fiq_context_restore.arm b/ports/cortex_a5/ghs/src/tx_thread_fiq_context_restore.arm
similarity index 94%
rename from ports/cortex_a5/green/src/tx_thread_fiq_context_restore.arm
rename to ports/cortex_a5/ghs/src/tx_thread_fiq_context_restore.arm
index ed25cc55..555aaa47 100644
--- a/ports/cortex_a5/green/src/tx_thread_fiq_context_restore.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_fiq_context_restore.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -42,47 +42,44 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fiq_context_restore Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function restores the fiq interrupt context when processing a */
-/* nested interrupt. If not, it returns to the interrupt thread if no */
-/* preemption is necessary. Otherwise, if preemption is necessary or */
-/* if no thread was running, the function returns to the scheduler. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling routine */
-/* */
-/* CALLED BY */
-/* */
-/* FIQ ISR Interrupt Service Routines */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_fiq_context_restore(VOID)
@@ -114,13 +111,13 @@ _tx_thread_fiq_context_restore:
LDR r3, =_tx_thread_system_state # Pickup address of system state var
LDR r2, [r3] # Pickup system state
SUB r2, r2, 1 # Decrement the counter
- STR r2, [r3] # Store the counter
+ STR r2, [r3] # Store the counter
CMP r2, 0 # Was this the first interrupt?
BEQ __tx_thread_fiq_not_nested_restore # If so, not a nested restore
/* Interrupts are nested. */
- /* Just recover the saved registers and return to the point of
+ /* Just recover the saved registers and return to the point of
interrupt. */
LDMIA sp!, {r0, r10, r12, lr} # Recover SPSR, POI, and scratch regs
@@ -132,7 +129,7 @@ _tx_thread_fiq_context_restore:
__tx_thread_fiq_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
- /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
|| (_tx_thread_preempt_disable))
{ */
diff --git a/ports/cortex_a5/green/src/tx_thread_fiq_context_save.arm b/ports/cortex_a5/ghs/src/tx_thread_fiq_context_save.arm
similarity index 91%
rename from ports/cortex_a5/green/src/tx_thread_fiq_context_save.arm
rename to ports/cortex_a5/ghs/src/tx_thread_fiq_context_save.arm
index 33b03bbd..ab9d6fa7 100644
--- a/ports/cortex_a5/green/src/tx_thread_fiq_context_save.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_fiq_context_save.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -33,46 +33,43 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fiq_context_save Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function saves the context of an executing thread in the */
-/* beginning of interrupt processing. The function also ensures that */
-/* the system stack is used upon return to the calling ISR. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_fiq_context_save(VOID)
@@ -87,7 +84,7 @@ _tx_thread_fiq_context_save:
/* if (_tx_thread_system_state++)
{ */
- STMDB sp!, {r0-r3} # Save some working registers
+ STMDB sp!, {r0-r3} # Save some working registers
LDR r3, =_tx_thread_system_state # Pickup address of system state var
LDR r2, [r3] # Pickup system state
CMP r2, 0 # Is this the first interrupt?
@@ -102,7 +99,7 @@ _tx_thread_fiq_context_save:
calling ISR. */
MRS r0, SPSR # Pickup saved SPSR
- SUB lr, lr, 4 # Adjust point of interrupt
+ SUB lr, lr, 4 # Adjust point of interrupt
STMDB sp!, {r0, r10, r12, lr} # Store other registers
/* Return to the ISR. */
@@ -118,7 +115,7 @@ _tx_thread_fiq_context_save:
POP {lr} # Recover ISR lr
#endif
- B __tx_fiq_processing_return # Continue FIQ processing
+ B __tx_fiq_processing_return # Continue FIQ processing
__tx_thread_fiq_not_nested_save:
/* } */
@@ -132,16 +129,16 @@ __tx_thread_fiq_not_nested_save:
LDR r1, =_tx_thread_current_ptr # Pickup address of current thread ptr
LDR r0, [r1] # Pickup current thread pointer
CMP r0, 0 # Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save # If so, interrupt occurred in
+ BEQ __tx_thread_fiq_idle_system_save # If so, interrupt occurred in
/* # scheduling loop - nothing needs saving! */
/* Save minimal context of interrupted thread. */
MRS r2, SPSR # Pickup saved SPSR
- SUB lr, lr, 4 # Adjust point of interrupt
+ SUB lr, lr, 4 # Adjust point of interrupt
STMDB sp!, {r2, lr} # Store other registers, Note that we don't
- /* # need to save sl and ip since FIQ has
- # copies of these registers. Nested
+ /* # need to save sl and ip since FIQ has
+ # copies of these registers. Nested
# interrupt processing does need to save
# these registers. */
@@ -162,7 +159,7 @@ __tx_thread_fiq_not_nested_save:
POP {lr} # Recover ISR lr
#endif
- B __tx_fiq_processing_return # Continue FIQ processing
+ B __tx_fiq_processing_return # Continue FIQ processing
/* }
else
@@ -182,15 +179,15 @@ __tx_thread_fiq_idle_system_save:
#endif
/* Not much to do here, save the current SPSR and LR for possible
- use in IRQ interrupted in idle system conditions, and return to
+ use in IRQ interrupted in idle system conditions, and return to
FIQ interrupt processing. */
MRS r0, SPSR # Pickup saved SPSR
- SUB lr, lr, 4 # Adjust point of interrupt
+ SUB lr, lr, 4 # Adjust point of interrupt
STMDB sp!, {r0, lr} # Store other registers that will get used
- /* # or stripped off the stack in context
+ /* # or stripped off the stack in context
# restore */
- B __tx_fiq_processing_return # Continue FIQ processing
+ B __tx_fiq_processing_return # Continue FIQ processing
.type _tx_thread_fiq_context_save,$function
.size _tx_thread_fiq_context_save,.-_tx_thread_fiq_context_save
diff --git a/ports/cortex_a5/green/src/tx_thread_fiq_nesting_end.arm b/ports/cortex_a5/ghs/src/tx_thread_fiq_nesting_end.arm
similarity index 91%
rename from ports/cortex_a5/green/src/tx_thread_fiq_nesting_end.arm
rename to ports/cortex_a5/ghs/src/tx_thread_fiq_nesting_end.arm
index 156bf31c..5c020da9 100644
--- a/ports/cortex_a5/green/src/tx_thread_fiq_nesting_end.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_fiq_nesting_end.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -41,48 +41,48 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fiq_nesting_end Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is called by the application from FIQ mode after */
-/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-/* processing from system mode back to FIQ mode prior to the ISR */
-/* calling _tx_thread_fiq_context_restore. Note that this function */
-/* assumes the system stack pointer is in the same position after */
-/* nesting start function was called. */
-/* */
-/* This function assumes that the system mode stack pointer was setup */
-/* during low-level initialization (tx_initialize_low_level.arm). */
-/* */
-/* This function returns with FIQ interrupts disabled. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.arm). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_fiq_nesting_start.arm b/ports/cortex_a5/ghs/src/tx_thread_fiq_nesting_start.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_fiq_nesting_start.arm
rename to ports/cortex_a5/ghs/src/tx_thread_fiq_nesting_start.arm
index e085b508..9d16ec52 100644
--- a/ports/cortex_a5/green/src/tx_thread_fiq_nesting_start.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_fiq_nesting_start.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -39,45 +39,45 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_fiq_nesting_start Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is called by the application from FIQ mode after */
-/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-/* processing to the system mode so nested FIQ interrupt processing */
-/* is possible (system mode has its own "lr" register). Note that */
-/* this function assumes that the system mode stack pointer was setup */
-/* during low-level initialization (tx_initialize_low_level.arm). */
-/* */
-/* This function returns with FIQ interrupts enabled. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.arm). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_interrupt_control.arm b/ports/cortex_a5/ghs/src/tx_thread_interrupt_control.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_interrupt_control.arm
rename to ports/cortex_a5/ghs/src/tx_thread_interrupt_control.arm
index d10afac0..96040828 100644
--- a/ports/cortex_a5/green/src/tx_thread_interrupt_control.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_interrupt_control.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -38,39 +38,39 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_control Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for changing the interrupt lockout */
-/* posture of the system. */
-/* */
-/* INPUT */
-/* */
-/* new_posture New interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_interrupt_disable.arm b/ports/cortex_a5/ghs/src/tx_thread_interrupt_disable.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_interrupt_disable.arm
rename to ports/cortex_a5/ghs/src/tx_thread_interrupt_disable.arm
index a67b3135..e93ed24f 100644
--- a/ports/cortex_a5/green/src/tx_thread_interrupt_disable.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_interrupt_disable.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -38,38 +38,38 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_disable Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is responsible for disabling interrupts */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* old_posture Old interrupt lockout posture */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_interrupt_restore.arm b/ports/cortex_a5/ghs/src/tx_thread_interrupt_restore.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_interrupt_restore.arm
rename to ports/cortex_a5/ghs/src/tx_thread_interrupt_restore.arm
index 37a1fc67..f1927ea1 100644
--- a/ports/cortex_a5/green/src/tx_thread_interrupt_restore.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_interrupt_restore.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -31,39 +31,39 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_interrupt_restore Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
+/* */
/* This function is responsible for restoring interrupts to the state */
/* returned by a previous _tx_thread_interrupt_disable call. */
-/* */
-/* INPUT */
-/* */
-/* new_posture New interrupt lockout posture */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* Application Code */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_irq_nesting_end.arm b/ports/cortex_a5/ghs/src/tx_thread_irq_nesting_end.arm
similarity index 91%
rename from ports/cortex_a5/green/src/tx_thread_irq_nesting_end.arm
rename to ports/cortex_a5/ghs/src/tx_thread_irq_nesting_end.arm
index f58cbdce..94d79d31 100644
--- a/ports/cortex_a5/green/src/tx_thread_irq_nesting_end.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_irq_nesting_end.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -40,48 +40,48 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_irq_nesting_end Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is called by the application from IRQ mode after */
-/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-/* processing from system mode back to IRQ mode prior to the ISR */
-/* calling _tx_thread_context_restore. Note that this function */
-/* assumes the system stack pointer is in the same position after */
-/* nesting start function was called. */
-/* */
-/* This function assumes that the system mode stack pointer was setup */
-/* during low-level initialization (tx_initialize_low_level.arm). */
-/* */
-/* This function returns with IRQ interrupts disabled. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.arm). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_irq_nesting_start.arm b/ports/cortex_a5/ghs/src/tx_thread_irq_nesting_start.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_irq_nesting_start.arm
rename to ports/cortex_a5/ghs/src/tx_thread_irq_nesting_start.arm
index 6a1c0671..d87221cd 100644
--- a/ports/cortex_a5/green/src/tx_thread_irq_nesting_start.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_irq_nesting_start.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -38,45 +38,45 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_irq_nesting_start Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is called by the application from IRQ mode after */
-/* _tx_thread_context_save has been called and switches the IRQ */
-/* processing to the system mode so nested IRQ interrupt processing */
-/* is possible (system mode has its own "lr" register). Note that */
-/* this function assumes that the system mode stack pointer was setup */
-/* during low-level initialization (tx_initialize_low_level.arm). */
-/* */
-/* This function returns with IRQ interrupts enabled. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.arm). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
diff --git a/ports/cortex_a5/green/src/tx_thread_schedule.arm b/ports/cortex_a5/ghs/src/tx_thread_schedule.arm
similarity index 94%
rename from ports/cortex_a5/green/src/tx_thread_schedule.arm
rename to ports/cortex_a5/ghs/src/tx_thread_schedule.arm
index 3c8f172d..8463cc28 100644
--- a/ports/cortex_a5/green/src/tx_thread_schedule.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_schedule.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -38,48 +38,45 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_schedule Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function waits for a thread control block pointer to appear in */
-/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-/* in the variable, the corresponding thread is resumed. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* OUTPUT */
+/* */
/* None */
-/* */
-/* CALLED BY */
-/* */
-/* _tx_initialize_kernel_enter ThreadX entry function */
-/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_schedule(VOID)
@@ -114,7 +111,7 @@ __tx_thread_schedule_loop:
/* }
while(_tx_thread_execute_ptr == TX_NULL); */
-
+
/* Yes! We have a thread to execute. Lockout interrupts and
transfer control to it. */
@@ -137,7 +134,7 @@ __tx_thread_schedule_loop:
MOV r0, v1 # Restore temp register
#endif
- LDR r1, =_tx_thread_current_ptr # Pickup address of current thread
+ LDR r1, =_tx_thread_current_ptr # Pickup address of current thread
STR r0, [r1] # Setup current thread pointer
/* Increment the run count for this thread. */
@@ -151,7 +148,7 @@ __tx_thread_schedule_loop:
/* Setup time-slice, if present. */
/* _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice; */
- LDR r2, =_tx_timer_time_slice # Pickup address of time slice
+ LDR r2, =_tx_timer_time_slice # Pickup address of time slice
/* # variable */
LDR sp, [r0, 8] # Switch stack pointers
STR r3, [r2] # Setup time-slice
@@ -204,7 +201,7 @@ _tx_skip_solicited_vfp_restore:
.type _tx_thread_schedule,$function
.size _tx_thread_schedule,.-_tx_thread_schedule
-
+
#ifdef __VFP__
.globl tx_thread_vfp_enable
tx_thread_vfp_enable:
diff --git a/ports/cortex_a5/green/src/tx_thread_stack_build.arm b/ports/cortex_a5/ghs/src/tx_thread_stack_build.arm
similarity index 96%
rename from ports/cortex_a5/green/src/tx_thread_stack_build.arm
rename to ports/cortex_a5/ghs/src/tx_thread_stack_build.arm
index 9775e47c..2bf063c8 100644
--- a/ports/cortex_a5/green/src/tx_thread_stack_build.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_stack_build.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -41,41 +41,41 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_stack_build Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
+/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
-/* function pointer. */
-/* */
-/* INPUT */
-/* */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
-/* */
-/* OUTPUT */
-/* */
+/* */
+/* OUTPUT */
+/* */
/* None */
-/* */
-/* CALLS */
-/* */
+/* */
+/* CALLS */
+/* */
/* None */
-/* */
-/* CALLED BY */
-/* */
+/* */
+/* CALLED BY */
+/* */
/* _tx_thread_create Create thread service */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@@ -86,10 +86,10 @@
.globl _tx_thread_stack_build
_tx_thread_stack_build:
-
+
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the Cortex-A5 should look like the following after it is built:
-
+
Stack Top: 1 Interrupt stack frame type
CPSR Initial value for CPSR
r0 (a1) Initial value for r0
diff --git a/ports/cortex_a5/green/src/tx_thread_system_return.arm b/ports/cortex_a5/ghs/src/tx_thread_system_return.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_system_return.arm
rename to ports/cortex_a5/ghs/src/tx_thread_system_return.arm
index d829a476..04bd477d 100644
--- a/ports/cortex_a5/green/src/tx_thread_system_return.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_system_return.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -37,47 +37,44 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_system_return Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function is target processor specific. It is used to transfer */
-/* control from a thread back to the ThreadX system. Only a */
-/* minimal context is saved since the compiler assumes temp registers */
-/* are going to get slicked by a function call anyway. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_thread_schedule Thread scheduling loop */
-/* */
-/* CALLED BY */
-/* */
-/* ThreadX components */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_system_return(VOID)
@@ -93,7 +90,7 @@ _tx_thread_system_return:
LDR r4, =_tx_thread_current_ptr # Pickup address of current ptr
LDR r5, [r4] # Pickup current thread pointer
-
+
#ifdef __VFP__
LDR r1, [r5, 144] # Pickup the VFP enabled flag
CMP r1, 0 # Is the VFP enabled?
@@ -108,7 +105,7 @@ _tx_skip_solicited_vfp_save:
MOV r0, #0 # Build a solicited stack type
MRS r1, CPSR # Pickup the CPSR
STMDB sp!, {r0-r1} # Save type and CPSR
-
+
/* Lockout interrupts. */
#ifdef TX_BEFORE_ARMV6
diff --git a/ports/cortex_a5/green/src/tx_thread_vectored_context_save.arm b/ports/cortex_a5/ghs/src/tx_thread_vectored_context_save.arm
similarity index 92%
rename from ports/cortex_a5/green/src/tx_thread_vectored_context_save.arm
rename to ports/cortex_a5/ghs/src/tx_thread_vectored_context_save.arm
index ec73f813..93c4845d 100644
--- a/ports/cortex_a5/green/src/tx_thread_vectored_context_save.arm
+++ b/ports/cortex_a5/ghs/src/tx_thread_vectored_context_save.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -39,46 +39,43 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_thread_vectored_context_save Cortex-A5/Green Hills */
-/* 6.1.9 */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save Cortex-A5/Green Hills */
+/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function saves the context of an executing thread in the */
-/* beginning of interrupt processing. The function also ensures that */
-/* the system stack is used upon return to the calling ISR. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* None */
-/* */
-/* CALLED BY */
-/* */
-/* ISRs */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 William E. Lamie Modified comment(s), added */
-/* execution profile support, */
-/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
/* VOID _tx_thread_vectored_context_save(VOID)
@@ -142,7 +139,7 @@ __tx_thread_not_nested_save:
LDR r1, =_tx_thread_current_ptr # Pickup address of current thread ptr
LDR r0, [r1] # Pickup current thread pointer
CMP r0, 0 # Is it NULL?
- BEQ __tx_thread_idle_system_save # If so, interrupt occurred in
+ BEQ __tx_thread_idle_system_save # If so, interrupt occurred in
/* # scheduling loop - nothing needs saving! */
/* Note: Minimal context of interrupted thread is already saved. */
@@ -174,7 +171,7 @@ __tx_thread_idle_system_save:
/* Interrupt occurred in the scheduling loop. */
- /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
processing. */
MOV r10, 0 # Clear stack limit
diff --git a/ports/cortex_a5/green/src/tx_timer_interrupt.arm b/ports/cortex_a5/ghs/src/tx_timer_interrupt.arm
similarity index 95%
rename from ports/cortex_a5/green/src/tx_timer_interrupt.arm
rename to ports/cortex_a5/ghs/src/tx_timer_interrupt.arm
index 336b0176..ea416b7b 100644
--- a/ports/cortex_a5/green/src/tx_timer_interrupt.arm
+++ b/ports/cortex_a5/ghs/src/tx_timer_interrupt.arm
@@ -12,8 +12,8 @@
/**************************************************************************/
/**************************************************************************/
-/** */
-/** ThreadX Component */
+/** */
+/** ThreadX Component */
/** */
/** Timer */
/** */
@@ -32,43 +32,43 @@
.text
.align 4
-/**************************************************************************/
-/* */
-/* FUNCTION RELEASE */
-/* */
-/* _tx_timer_interrupt Cortex-A5/Green Hills */
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt Cortex-A5/Green Hills */
/* 6.1 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This function processes the hardware timer interrupt. This */
-/* processing includes incrementing the system clock and checking for */
-/* time slice and/or timer expiration. If either is found, the */
-/* interrupt context save/restore functions are called along with the */
-/* expiration functions. */
-/* */
-/* INPUT */
-/* */
-/* None */
-/* */
-/* OUTPUT */
-/* */
-/* None */
-/* */
-/* CALLS */
-/* */
-/* _tx_timer_expiration_process Process timer expiration */
-/* _tx_thread_time_slice Time slice interrupted thread */
-/* */
-/* CALLED BY */
-/* */
-/* interrupt vector */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Process timer expiration */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@@ -95,7 +95,7 @@ _tx_timer_interrupt:
/* if (_tx_timer_time_slice)
{ */
- LDR r3, =_tx_timer_time_slice # Pickup address of time-slice
+ LDR r3, =_tx_timer_time_slice # Pickup address of time-slice
LDR r2, [r3] # Pickup time-slice
CMP r2, 0 # Is it non-active?
BEQ __tx_timer_no_time_slice # Yes, skip time-slice processing
@@ -212,7 +212,7 @@ __tx_timer_dont_activate:
/* if (_tx_timer_expired_time_slice)
{ */
- LDR r3, =_tx_timer_expired_time_slice # Pickup addr of time-slice expired
+ LDR r3, =_tx_timer_expired_time_slice # Pickup addr of time-slice expired
LDR r2, [r3] # Pickup the actual flag
CMP r2, 0 # See if the flag is set
BEQ __tx_timer_not_ts_expiration # No, skip time-slice processing
diff --git a/ports/cortex_a5/ghs/src/txr_ghs.c b/ports/cortex_a5/ghs/src/txr_ghs.c
new file mode 100644
index 00000000..19572e2b
--- /dev/null
+++ b/ports/cortex_a5/ghs/src/txr_ghs.c
@@ -0,0 +1,84 @@
+/*
+ * ThreadX API Runtime Error Support
+ *
+ * Copyright 1983-2019 Green Hills Software LLC.
+ *
+ * This program is the property of Green Hills Software LLC.,
+ * its contents are proprietary information and no part of it
+ * is to be disclosed to anyone except employees of Green Hills
+ * Software LLC., or as agreed in writing signed by the President
+ * of Green Hills Software LLC.
+ */
+
+/* #include "tx_ghs.h" */
+#ifndef TX_DISABLE_ERROR_CHECKING
+#define TX_DISABLE_ERROR_CHECKING
+#endif
+#include "tx_api.h"
+
+/* Customized ThreadX API runtime error support routine. */
+
+void _rnerr(int num, int linenum, const char*str, void*ptr, ...);
+
+/* __ghs_rnerr()
+ This is the custom runtime error checking routine.
+ This implementation uses the existing __rnerr() routine.
+ Another implementation could use the .syscall mechanism,
+ provided MULTI was modified to understand that.
+ */
+void __ghs_rnerr(char *errMsg, int stackLevels, int stackTraceDisplay, void *hexVal) {
+ TX_INTERRUPT_SAVE_AREA
+ int num;
+ /*
+ Initialize the stack levels value.
+
+ Add 3 to account for the calls to _rnerr, __rnerr, and
+ __ghs_rnerr.
+
+ If the implementation changes, calls to __ghs_rnerr
+ will not need to be changed.
+
+ Zero is not permitted, so substitute 3 in that case.
+ */
+ num = (stackLevels+3) & 0xf;
+ if (!num) {
+ num = 3;
+ }
+ /*
+ Shift the stack levels value to bits 12..15 and
+ insert the stack trace display value in bit 11.
+ Bits 0..10 are unused.
+ */
+ num = (num << 12) | (stackTraceDisplay ? 0x800 : 0);
+
+ /* This will mask all interrupts in the RTEC code, which is probably
+ unacceptable for many targets. */
+ TX_DISABLE
+ _rnerr(num, -1, (const char *)hexVal, (void *)errMsg);
+ TX_RESTORE
+}
+
+
+/* ThreadX thread stack checking runtime support routine. */
+
+extern char __ghsbegin_stack[];
+extern TX_THREAD *_tx_thread_current_ptr;
+
+void __stkchk(void) {
+ int i;
+ if(_tx_thread_current_ptr)
+ {
+ if((unsigned)(&i) <=
+ (unsigned)(_tx_thread_current_ptr -> tx_thread_stack_start))
+ {
+ _rnerr(21, -1, 0, 0);
+ }
+ }
+ else
+ {
+ if((unsigned)(&i) <= (unsigned)__ghsbegin_stack)
+ {
+ _rnerr(21, -1, 0, 0);
+ }
+ }
+}
diff --git a/ports/cortex_a5/gnu/example_build/libc.a b/ports/cortex_a5/gnu/example_build/libc.a
deleted file mode 100644
index 5b04fa4e..00000000
Binary files a/ports/cortex_a5/gnu/example_build/libc.a and /dev/null differ
diff --git a/ports/cortex_a5/gnu/example_build/libgcc.a b/ports/cortex_a5/gnu/example_build/libgcc.a
deleted file mode 100644
index d7353496..00000000
Binary files a/ports/cortex_a5/gnu/example_build/libgcc.a and /dev/null differ
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a53/ac6/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..f51a736a
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/.cproject
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/arc_em/metaware/test_regression/threadx_regression/.project b/ports/cortex_a53/ac6/example_build/sample_threadx/.project
similarity index 77%
rename from ports/arc_em/metaware/test_regression/threadx_regression/.project
rename to ports/cortex_a53/ac6/example_build/sample_threadx/.project
index 3a0bbbec..a1b15572 100644
--- a/ports/arc_em/metaware/test_regression/threadx_regression/.project
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/.project
@@ -1,6 +1,6 @@
- sample_threadx_regression
+ sample_threadx
@@ -23,11 +23,4 @@
org.eclipse.cdt.managedbuilder.core.managedBuildNature
org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
-
-
- Regression
- 2
- PARENT-5-PROJECT_LOC/TX/Test/Regression
-
-
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3.h b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3.h
new file mode 100644
index 00000000..dfe37586
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3.h
@@ -0,0 +1,561 @@
+/*
+ * GICv3.h - data types and function prototypes for GICv3 utility routines
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_h
+#define GICV3_h
+
+#include
+
+/*
+ * extra flags for GICD enable
+ */
+typedef enum
+{
+ gicdctlr_EnableGrp0 = (1 << 0),
+ gicdctlr_EnableGrp1NS = (1 << 1),
+ gicdctlr_EnableGrp1A = (1 << 1),
+ gicdctlr_EnableGrp1S = (1 << 2),
+ gicdctlr_EnableAll = (1 << 2) | (1 << 1) | (1 << 0),
+ gicdctlr_ARE_S = (1 << 4), /* Enable Secure state affinity routing */
+ gicdctlr_ARE_NS = (1 << 5), /* Enable Non-Secure state affinity routing */
+ gicdctlr_DS = (1 << 6), /* Disable Security support */
+ gicdctlr_E1NWF = (1 << 7) /* Enable "1-of-N" wakeup model */
+} GICDCTLRFlags_t;
+
+/*
+ * modes for SPI routing
+ */
+typedef enum
+{
+ gicdirouter_ModeSpecific = 0,
+ gicdirouter_ModeAny = (1 << 31)
+} GICDIROUTERBits_t;
+
+typedef enum
+{
+ gicdicfgr_Level = 0,
+ gicdicfgr_Edge = (1 << 1)
+} GICDICFGRBits_t;
+
+typedef enum
+{
+ gicigroupr_G0S = 0,
+ gicigroupr_G1NS = (1 << 0),
+ gicigroupr_G1S = (1 << 2)
+} GICIGROUPRBits_t;
+
+typedef enum
+{
+ gicrwaker_ProcessorSleep = (1 << 1),
+ gicrwaker_ChildrenAsleep = (1 << 2)
+} GICRWAKERBits_t;
+
+/**********************************************************************/
+
+/*
+ * Utility macros & functions
+ */
+#define RANGE_LIMIT(x) ((sizeof(x) / sizeof((x)[0])) - 1)
+
+static inline uint64_t gicv3PackAffinity(uint32_t aff3, uint32_t aff2,
+ uint32_t aff1, uint32_t aff0)
+{
+ /*
+ * only need to cast aff3 to get type promotion for all affinities
+ */
+ return ((((uint64_t)aff3 & 0xff) << 32) |
+ ((aff2 & 0xff) << 16) |
+ ((aff1 & 0xff) << 8) | aff0);
+}
+
+/**********************************************************************/
+
+/*
+ * GIC Distributor Function Prototypes
+ */
+
+/*
+ * ConfigGICD - configure GIC Distributor prior to enabling it
+ *
+ * Inputs:
+ *
+ * control - control flags
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void ConfigGICD(GICDCTLRFlags_t flags);
+
+/*
+ * EnableGICD - top-level enable for GIC Distributor
+ *
+ * Inputs:
+ *
+ * flags - new control flags to set
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void EnableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * DisableGICD - top-level disable for GIC Distributor
+ *
+ * Inputs
+ *
+ * flags - control flags to clear
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void DisableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * SyncAREinGICD - synchronise GICD Address Routing Enable bits
+ *
+ * Inputs
+ *
+ * flags - absolute flag bits to set in GIC Distributor
+ *
+ * dosync - flag whether to wait for ARE bits to match passed
+ * flag field (dosync = true), or whether to set absolute
+ * flag bits (dosync = false)
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * This function is used to resolve a race in an MP system whereby secondary
+ * CPUs cannot reliably program all Redistributor registers until the
+ * primary CPU has enabled Address Routing. The primary CPU will call this
+ * function with dosync = false, while the secondaries will call it with
+ * dosync = true.
+ */
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync);
+
+/*
+ * EnableSPI - enable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnableSPI(uint32_t id);
+
+/*
+ * DisableSPI - disable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisableSPI(uint32_t id);
+
+/*
+ * SetSPIPriority - configure the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetSPIPriority(uint32_t id, uint32_t priority);
+
+/*
+ * GetSPIPriority - determine the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * interrupt priority in the range 0 - 0xff
+ */
+uint32_t GetSPIPriority(uint32_t id);
+
+/*
+ * SetSPIRoute - specify interrupt routing when gicdctlr_ARE is enabled
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * affinity - prepacked "dotted quad" affinity routing. NOTE: use the
+ * gicv3PackAffinity() helper routine to generate this input
+ *
+ * mode - select routing mode (specific affinity, or any recipient)
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode);
+
+/*
+ * GetSPIRoute - read ARE-enabled interrupt routing information
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * routing configuration
+ */
+uint64_t GetSPIRoute(uint32_t id);
+
+/*
+ * SetSPITarget - configure the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * target - 8-bit target bitmap
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPITarget(uint32_t id, uint32_t target);
+
+/*
+ * GetSPITarget - read the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * 8-bit target bitmap
+ */
+uint32_t GetSPITarget(uint32_t id);
+
+/*
+ * ConfigureSPI - setup an interrupt as edge- or level-triggered
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * config - desired configuration
+ *
+ * Returns
+ *
+ *
+ */
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config);
+
+/*
+ * SetSPIPending - mark an interrupt as pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPIPending(uint32_t id);
+
+/*
+ * ClearSPIPending - mark an interrupt as not pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearSPIPending(uint32_t id);
+
+/*
+ * GetSPIPending - query whether an interrupt is pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetSPIPending(uint32_t id);
+
+/*
+ * SetSPISecurity - mark a shared peripheral interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityBlock - mark a block of 32 shared peripheral
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * block - which block to mark (e.g. 1 = Ints 32-63)
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityAll - mark all shared peripheral interrupts
+ * as security
+ *
+ * Inputs:
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityAll(GICIGROUPRBits_t group);
+
+/**********************************************************************/
+
+/*
+ * GIC Re-Distributor Function Prototypes
+ *
+ * The model for calling Redistributor functions is that, rather than
+ * identifying the target redistributor with every function call, the
+ * SelectRedistributor() function is used to identify which redistributor
+ * is to be used for all functions until a different redistributor is
+ * explicitly selected
+ */
+
+/*
+ * WakeupGICR - wake up a Redistributor
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to wakeup
+ *
+ * Returns:
+ *
+ *
+ */
+void WakeupGICR(uint32_t gicr);
+
+/*
+ * EnablePrivateInt - enable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * DisablePrivateInt - disable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority);
+
+/*
+ * GetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * Int priority
+ */
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPending - mark a private (SGI/PPI) interrupt as pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * ClearPrivateIntPending - mark a private (SGI/PPI) interrupt as not pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * GetPrivateIntPending - query whether a private (SGI/PPI) interrupt is pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntSecurity - mark a private (SGI/PPI) interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetPrivateIntSecurityBlock - mark all 32 private (SGI/PPI)
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * group - the group for the interrupt
+ *
+ * Returns:
+ *
+ *
+ */
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group);
+
+#endif /* ndef GICV3_h */
+
+/* EOF GICv3.h */
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_aliases.h b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_aliases.h
new file mode 100644
index 00000000..826ba973
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_aliases.h
@@ -0,0 +1,113 @@
+//
+// Aliases for GICv3 registers
+//
+// Copyright (c) 2016-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef GICV3_ALIASES_H
+#define GICV3_ALIASES_H
+
+#ifndef __clang__
+
+/*
+ * Mapping of MSR and MRS to physical and virtual CPU interface registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-5
+ */
+#define ICC_AP0R0_EL1 S3_0_C12_C8_4
+#define ICC_AP0R1_EL1 S3_0_C12_C8_5
+#define ICC_AP0R2_EL1 S3_0_C12_C8_6
+#define ICC_AP0R3_EL1 S3_0_C12_C8_7
+
+#define ICC_AP1R0_EL1 S3_0_C12_C9_0
+#define ICC_AP1R1_EL1 S3_0_C12_C9_1
+#define ICC_AP1R2_EL1 S3_0_C12_C9_2
+#define ICC_AP1R3_EL1 S3_0_C12_C9_3
+
+#define ICC_ASGI1R_EL1 S3_0_C12_C11_6
+
+#define ICC_BPR0_EL1 S3_0_C12_C8_3
+#define ICC_BPR1_EL1 S3_0_C12_C12_3
+
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+
+#define ICC_DIR_EL1 S3_0_C12_C11_1
+
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+#define ICC_SGI1R_EL1 S3_0_C12_C11_5
+
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+
+/*
+ * Mapping of MSR and MRS to virtual interface control registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-6
+ */
+#define ICH_AP0R0_EL2 S3_4_C12_C8_0
+#define ICH_AP0R1_EL2 S3_4_C12_C8_1
+#define ICH_AP0R2_EL2 S3_4_C12_C8_2
+#define ICH_AP0R3_EL2 S3_4_C12_C8_3
+
+#define ICH_AP1R0_EL2 S3_4_C12_C9_0
+#define ICH_AP1R1_EL2 S3_4_C12_C9_1
+#define ICH_AP1R2_EL2 S3_4_C12_C9_2
+#define ICH_AP1R3_EL2 S3_4_C12_C9_3
+
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+
+#define ICH_VTR_EL2 S3_4_C12_C11_1
+
+#define ICH_MISR_EL2 S3_4_C12_C11_2
+
+#define ICH_EISR_EL2 S3_4_C12_C11_3
+
+#define ICH_ELRSR_EL2 S3_4_C12_C11_5
+
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+
+#define ICH_LR0_EL2 S3_4_C12_C12_0
+#define ICH_LR1_EL2 S3_4_C12_C12_1
+#define ICH_LR2_EL2 S3_4_C12_C12_2
+#define ICH_LR3_EL2 S3_4_C12_C12_3
+#define ICH_LR4_EL2 S3_4_C12_C12_4
+#define ICH_LR5_EL2 S3_4_C12_C12_5
+#define ICH_LR6_EL2 S3_4_C12_C12_6
+#define ICH_LR7_EL2 S3_4_C12_C12_7
+#define ICH_LR8_EL2 S3_4_C12_C13_0
+#define ICH_LR9_EL2 S3_4_C12_C13_1
+#define ICH_LR10_EL2 S3_4_C12_C13_2
+#define ICH_LR11_EL2 S3_4_C12_C13_3
+#define ICH_LR12_EL2 S3_4_C12_C13_4
+#define ICH_LR13_EL2 S3_4_C12_C13_5
+#define ICH_LR14_EL2 S3_4_C12_C13_6
+#define ICH_LR15_EL2 S3_4_C12_C13_7
+
+#endif /* not __clang__ */
+
+#endif /* GICV3_ALIASES */
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicc.h
new file mode 100644
index 00000000..998d92b5
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicc.h
@@ -0,0 +1,254 @@
+/*
+ * GICv3_gicc.h - prototypes and inline functions for GICC system register operations
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_gicc_h
+#define GICV3_gicc_h
+
+#include "GICv3_aliases.h"
+
+#define stringify_no_expansion(x) #x
+#define stringify(x) stringify_no_expansion(x)
+
+/**********************************************************************/
+
+typedef enum
+{
+ sreSRE = (1 << 0),
+ sreDFB = (1 << 1),
+ sreDIB = (1 << 2),
+ sreEnable = (1 << 3)
+} ICC_SREBits_t;
+
+static inline void setICC_SRE_EL1(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL2(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL2)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL2(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL2)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL3(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ igrpEnable = (1 << 0),
+ igrpEnableGrp1NS = (1 << 0),
+ igrpEnableGrp1S = (1 << 2)
+} ICC_IGRPBits_t;
+
+static inline void setICC_IGRPEN0_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN0_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL3(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ ctlrCBPR = (1 << 0),
+ ctlrCBPR_EL1S = (1 << 0),
+ ctlrEOImode = (1 << 1),
+ ctlrCBPR_EL1NS = (1 << 1),
+ ctlrEOImode_EL3 = (1 << 2),
+ ctlrEOImode_EL1S = (1 << 3),
+ ctlrEOImode_EL1NS = (1 << 4),
+ ctlrRM = (1 << 5),
+ ctlrPMHE = (1 << 6)
+} ICC_CTLRBits_t;
+
+static inline void setICC_CTLR_EL1(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_CTLR_EL3(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+static inline uint64_t getICC_IAR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_IAR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_EOIR0(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR0_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_EOIR1(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR1_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_DIR(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_DIR_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_PMR(uint32_t priority)
+{
+ asm("msr "stringify(ICC_PMR_EL1)", %0\n; isb" :: "r" ((uint64_t)priority));
+}
+
+static inline void setICC_BPR0(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR0_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline void setICC_BPR1(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR1_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline uint64_t getICC_BPR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_BPR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_RPR(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_RPR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ sgirIRMTarget = 0,
+ sgirIRMAll = (1ull << 40)
+} ICC_SGIRBits_t;
+
+static inline void setICC_SGI0R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI0R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_ASGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+#endif /* ndef GICV3_gicc_h */
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicd.c
new file mode 100644
index 00000000..2cf1553b
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicd.c
@@ -0,0 +1,339 @@
+/*
+ * GICv3_gicd.c - generic driver code for GICv3 distributor
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include
+
+#include "GICv3.h"
+
+typedef struct
+{
+ volatile uint32_t GICD_CTLR; // +0x0000
+ const volatile uint32_t GICD_TYPER; // +0x0004
+ const volatile uint32_t GICD_IIDR; // +0x0008
+
+ const volatile uint32_t padding0; // +0x000c
+
+ volatile uint32_t GICD_STATUSR; // +0x0010
+
+ const volatile uint32_t padding1[3]; // +0x0014
+
+ volatile uint32_t IMP_DEF[8]; // +0x0020
+
+ volatile uint32_t GICD_SETSPI_NSR; // +0x0040
+ const volatile uint32_t padding2; // +0x0044
+ volatile uint32_t GICD_CLRSPI_NSR; // +0x0048
+ const volatile uint32_t padding3; // +0x004c
+ volatile uint32_t GICD_SETSPI_SR; // +0x0050
+ const volatile uint32_t padding4; // +0x0054
+ volatile uint32_t GICD_CLRSPI_SR; // +0x0058
+
+ const volatile uint32_t padding5[3]; // +0x005c
+
+ volatile uint32_t GICD_SEIR; // +0x0068
+
+ const volatile uint32_t padding6[5]; // +0x006c
+
+ volatile uint32_t GICD_IGROUPR[32]; // +0x0080
+
+ volatile uint32_t GICD_ISENABLER[32]; // +0x0100
+ volatile uint32_t GICD_ICENABLER[32]; // +0x0180
+ volatile uint32_t GICD_ISPENDR[32]; // +0x0200
+ volatile uint32_t GICD_ICPENDR[32]; // +0x0280
+ volatile uint32_t GICD_ISACTIVER[32]; // +0x0300
+ volatile uint32_t GICD_ICACTIVER[32]; // +0x0380
+
+ volatile uint8_t GICD_IPRIORITYR[1024]; // +0x0400
+ volatile uint8_t GICD_ITARGETSR[1024]; // +0x0800
+ volatile uint32_t GICD_ICFGR[64]; // +0x0c00
+ volatile uint32_t GICD_IGRPMODR[32]; // +0x0d00
+ const volatile uint32_t padding7[32]; // +0x0d80
+ volatile uint32_t GICD_NSACR[64]; // +0x0e00
+
+ volatile uint32_t GICD_SGIR; // +0x0f00
+
+ const volatile uint32_t padding8[3]; // +0x0f04
+
+ volatile uint32_t GICD_CPENDSGIR[4]; // +0x0f10
+ volatile uint32_t GICD_SPENDSGIR[4]; // +0x0f20
+
+ const volatile uint32_t padding9[52]; // +0x0f30
+ const volatile uint32_t padding10[5120]; // +0x1000
+
+ volatile uint64_t GICD_IROUTER[1024]; // +0x6000
+} GICv3_distributor;
+
+/*
+ * use the scatter file to place GICD
+ */
+static GICv3_distributor __attribute__((section(".bss.distributor"))) gicd;
+
+void ConfigGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR |= flags;
+}
+
+void DisableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR &= ~flags;
+}
+
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync)
+{
+ if (dosync)
+ {
+ const uint32_t tmask = gicdctlr_ARE_S | gicdctlr_ARE_NS;
+ const uint32_t tval = flags & tmask;
+
+ while ((gicd.GICD_CTLR & tmask) != tval)
+ continue;
+ }
+ else
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ISENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void DisableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ICENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void SetSPIPriority(uint32_t id, uint32_t priority)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ gicd.GICD_IPRIORITYR[bank] = priority;
+}
+
+uint32_t GetSPIPriority(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ return (uint32_t)(gicd.GICD_IPRIORITYR[bank]);
+}
+
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ gicd.GICD_IROUTER[bank] = affinity | (uint64_t)mode;
+}
+
+uint64_t GetSPIRoute(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ return gicd.GICD_IROUTER[bank];
+}
+
+void SetSPITarget(uint32_t id, uint32_t target)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ gicd.GICD_ITARGETSR[bank] = target;
+}
+
+uint32_t GetSPITarget(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ /*
+ * GICD_ITARGETSR has 4 interrupts per register, i.e. 8-bits of
+ * target bitmap per register
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ return (uint32_t)(gicd.GICD_ITARGETSR[bank]);
+}
+
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config)
+{
+ uint32_t bank, tmp;
+
+ /*
+ * GICD_ICFGR has 16 interrupts per register, i.e. 2-bits of
+ * configuration per register
+ */
+ bank = (id >> 4) & RANGE_LIMIT(gicd.GICD_ICFGR);
+ config &= 3;
+
+ id = (id & 0xf) << 1;
+
+ tmp = gicd.GICD_ICFGR[bank];
+ tmp &= ~(3 << id);
+ tmp |= config << id;
+ gicd.GICD_ICFGR[bank] = tmp;
+}
+
+void SetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ISPENDR[bank] = 1 << id;
+}
+
+void ClearSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ICPENDR[bank] = 1 << id;
+}
+
+uint32_t GetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ return (gicd.GICD_ICPENDR[bank] >> id) & 1;
+}
+
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group)
+{
+ uint32_t bank, groupmod;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_IGROUPR);
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicd.GICD_IGROUPR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGROUPR[bank] &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicd.GICD_IGRPMODR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGRPMODR[bank] &= ~(1 << id);
+}
+
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group)
+{
+ uint32_t groupmod;
+ const uint32_t nbits = (sizeof group * 8) - 1;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ block &= RANGE_LIMIT(gicd.GICD_IGROUPR);
+
+ /*
+ * get each bit of group config duplicated over all 32-bits in a word
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicd.GICD_IGROUPR[block] = group;
+ gicd.GICD_IGRPMODR[block] = groupmod;
+}
+
+void SetSPISecurityAll(GICIGROUPRBits_t group)
+{
+ uint32_t block;
+
+ /*
+ * GICD_TYPER.ITLinesNumber gives (No. SPIS / 32) - 1, and we
+ * want to iterate over all blocks excluding 0 (which are the
+ * SGI/PPI interrupts, and not relevant here)
+ */
+ for (block = (gicd.GICD_TYPER & ((1 << 5) - 1)); block > 0; --block)
+ SetSPISecurityBlock(block, group);
+}
+
+/* EOF GICv3_gicd.c */
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicr.c
new file mode 100644
index 00000000..d91aeb27
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/GICv3_gicr.c
@@ -0,0 +1,308 @@
+/*
+ * GICv3_gicr.c - generic driver code for GICv3 redistributor
+ *
+ * Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include "GICv3.h"
+
+/*
+ * physical LPI Redistributor register map
+ */
+typedef struct
+{
+ volatile uint32_t GICR_CTLR; // +0x0000 - RW - Redistributor Control Register
+ const volatile uint32_t GICR_IIDR; // +0x0004 - RO - Implementer Identification Register
+ const volatile uint32_t GICR_TYPER[2]; // +0x0008 - RO - Redistributor Type Register
+ volatile uint32_t GICR_STATUSR; // +0x0010 - RW - Error Reporting Status Register, optional
+ volatile uint32_t GICR_WAKER; // +0x0014 - RW - Redistributor Wake Register
+ const volatile uint32_t padding1[2]; // +0x0018 - RESERVED
+#ifndef USE_GIC600
+ volatile uint32_t IMPDEF1[8]; // +0x0020 - ?? - IMPLEMENTATION DEFINED
+#else
+ volatile uint32_t GICR_FCTLR; // +0x0020 - RW - Function Control Register
+ volatile uint32_t GICR_PWRR; // +0x0024 - RW - Power Management Control Register
+ volatile uint32_t GICR_CLASS; // +0x0028 - RW - Class Register
+ const volatile uint32_t padding2[5]; // +0x002C - RESERVED
+#endif
+ volatile uint64_t GICR_SETLPIR; // +0x0040 - WO - Set LPI Pending Register
+ volatile uint64_t GICR_CLRLPIR; // +0x0048 - WO - Clear LPI Pending Register
+ const volatile uint32_t padding3[8]; // +0x0050 - RESERVED
+ volatile uint64_t GICR_PROPBASER; // +0x0070 - RW - Redistributor Properties Base Address Register
+ volatile uint64_t GICR_PENDBASER; // +0x0078 - RW - Redistributor LPI Pending Table Base Address Register
+ const volatile uint32_t padding4[8]; // +0x0080 - RESERVED
+ volatile uint64_t GICR_INVLPIR; // +0x00A0 - WO - Redistributor Invalidate LPI Register
+ const volatile uint32_t padding5[2]; // +0x00A8 - RESERVED
+ volatile uint64_t GICR_INVALLR; // +0x00B0 - WO - Redistributor Invalidate All Register
+ const volatile uint32_t padding6[2]; // +0x00B8 - RESERVED
+ volatile uint64_t GICR_SYNCR; // +0x00C0 - RO - Redistributor Synchronize Register
+ const volatile uint32_t padding7[2]; // +0x00C8 - RESERVED
+ const volatile uint32_t padding8[12]; // +0x00D0 - RESERVED
+ volatile uint64_t IMPDEF2; // +0x0100 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding9[2]; // +0x0108 - RESERVED
+ volatile uint64_t IMPDEF3; // +0x0110 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding10[2]; // +0x0118 - RESERVED
+} GICv3_redistributor_RD;
+
+/*
+ * SGI and PPI Redistributor register map
+ */
+typedef struct
+{
+ const volatile uint32_t padding1[32]; // +0x0000 - RESERVED
+ volatile uint32_t GICR_IGROUPR0; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+ const volatile uint32_t padding2[31]; // +0x0084 - RESERVED
+ volatile uint32_t GICR_ISENABLER; // +0x0100 - RW - Interrupt Set-Enable Registers
+ const volatile uint32_t padding3[31]; // +0x0104 - RESERVED
+ volatile uint32_t GICR_ICENABLER; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ const volatile uint32_t padding4[31]; // +0x0184 - RESERVED
+ volatile uint32_t GICR_ISPENDR; // +0x0200 - RW - Interrupt Set-Pending Registers
+ const volatile uint32_t padding5[31]; // +0x0204 - RESERVED
+ volatile uint32_t GICR_ICPENDR; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ const volatile uint32_t padding6[31]; // +0x0284 - RESERVED
+ volatile uint32_t GICR_ISACTIVER; // +0x0300 - RW - Interrupt Set-Active Register
+ const volatile uint32_t padding7[31]; // +0x0304 - RESERVED
+ volatile uint32_t GICR_ICACTIVER; // +0x0380 - RW - Interrupt Clear-Active Register
+ const volatile uint32_t padding8[31]; // +0x0184 - RESERVED
+ volatile uint8_t GICR_IPRIORITYR[32]; // +0x0400 - RW - Interrupt Priority Registers
+ const volatile uint32_t padding9[504]; // +0x0420 - RESERVED
+ volatile uint32_t GICR_ICnoFGR[2]; // +0x0C00 - RW - Interrupt Configuration Registers
+ const volatile uint32_t padding10[62]; // +0x0C08 - RESERVED
+ volatile uint32_t GICR_IGRPMODR0; // +0x0D00 - RW - ????
+ const volatile uint32_t padding11[63]; // +0x0D04 - RESERVED
+ volatile uint32_t GICR_NSACR; // +0x0E00 - RW - Non-Secure Access Control Register
+} GICv3_redistributor_SGI;
+
+/*
+ * We have a multiplicity of GIC Redistributors; on the GIC-AEM and
+ * GIC-500 they are arranged as one 128KB region per redistributor: one
+ * 64KB page of GICR LPI registers, and one 64KB page of GICR Private
+ * Int registers
+ */
+typedef struct
+{
+ union
+ {
+ GICv3_redistributor_RD RD_base;
+ uint8_t padding[64 * 1024];
+ } RDblock;
+
+ union
+ {
+ GICv3_redistributor_SGI SGI_base;
+ uint8_t padding[64 * 1024];
+ } SGIblock;
+} GICv3_GICR;
+
+/*
+ * use the scatter file to place GIC Redistributor base address
+ *
+ * although this code doesn't know how many Redistributor banks
+ * a particular system will have, we declare gicrbase as an array
+ * to avoid unwanted compiler optimisations when calculating the
+ * base of a particular Redistributor bank
+ */
+static const GICv3_GICR gicrbase[2] __attribute__((section (".bss.redistributor")));
+
+/**********************************************************************/
+
+/*
+ * utility functions to calculate base of a particular
+ * Redistributor bank
+ */
+
+static inline GICv3_redistributor_RD *const getgicrRD(uint32_t gicr)
+{
+ GICv3_GICR *const arraybase = (GICv3_GICR *const)&gicrbase;
+
+ return &((arraybase + gicr)->RDblock.RD_base);
+}
+
+static inline GICv3_redistributor_SGI *const getgicrSGI(uint32_t gicr)
+{
+ GICv3_GICR *arraybase = (GICv3_GICR *)(&gicrbase);
+
+ return &(arraybase[gicr].SGIblock.SGI_base);
+}
+
+/**********************************************************************/
+
+// This function walks a block of RDs to find one with the matching affinity
+uint32_t GetGICR(uint32_t affinity)
+{
+ GICv3_redistributor_RD* gicr;
+ uint32_t index = 0;
+
+ do
+ {
+ gicr = getgicrRD(index);
+ if (gicr->GICR_TYPER[1] == affinity)
+ return index;
+
+ index++;
+ }
+ while((gicr->GICR_TYPER[0] & (1<<4)) == 0); // Keep looking until GICR_TYPER.Last reports no more RDs in block
+
+ return 0xFFFFFFFF; // return -1 to signal not RD found
+}
+
+void WakeupGICR(uint32_t gicr)
+{
+ GICv3_redistributor_RD *const gicrRD = getgicrRD(gicr);
+#ifdef USE_GIC600
+ //Power up Re-distributor for GIC-600
+ gicrRD->GICR_PWRR = 0x2;
+#endif
+
+ /*
+ * step 1 - ensure GICR_WAKER.ProcessorSleep is off
+ */
+ gicrRD->GICR_WAKER &= ~gicrwaker_ProcessorSleep;
+
+ /*
+ * step 2 - wait for children asleep to be cleared
+ */
+ while ((gicrRD->GICR_WAKER & gicrwaker_ChildrenAsleep) != 0)
+ continue;
+
+ /*
+ * OK, GICR is go
+ */
+ return;
+}
+
+void EnablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISENABLER = 1 << id;
+}
+
+void DisablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICENABLER = 1 << id;
+}
+
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ gicrSGI->GICR_IPRIORITYR[id] = priority;
+}
+
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ return (uint32_t)(gicrSGI->GICR_IPRIORITYR[id]);
+}
+
+void SetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISPENDR = 1 << id;
+}
+
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ICPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICPENDR = 1 << id;
+}
+
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ return (gicrSGI->GICR_ISPENDR >> id) & 0x01;
+}
+
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ uint32_t groupmod;
+
+ /*
+ * GICR_IGROUPR0 is one 32-bit register
+ */
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicrSGI->GICR_IGROUPR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGROUPR0 &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicrSGI->GICR_IGRPMODR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGRPMODR0 &= ~(1 << id);
+}
+
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ const uint32_t nbits = (sizeof group * 8) - 1;
+ uint32_t groupmod;
+
+ /*
+ * get each bit of group config duplicated over all 32-bits
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicrSGI->GICR_IGROUPR0 = group;
+ gicrSGI->GICR_IGRPMODR0 = groupmod;
+}
+
+/* EOF GICv3_gicr.c */
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a53/ac6/example_build/sample_threadx/MP_Mutexes.S
new file mode 100644
index 00000000..c787c3f5
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/MP_Mutexes.S
@@ -0,0 +1,133 @@
+//
+// Armv8-A AArch64 - Basic Mutex Example
+// Includes the option (USE_LSE_ATOMIC) to use Large System Extension (LSE) atomics introduced in Armv8.1-A
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+ .global _mutex_initialize
+ .global _mutex_acquire
+ .global _mutex_release
+
+//
+// These routines implement the mutex management functions required for running
+// the Arm C library in a multi-threaded environment.
+//
+// They use a value of 0 to represent an unlocked mutex, and 1 for a locked mutex
+//
+// **********************************************************************
+//
+
+ .type _mutex_initialize, "function"
+ .cfi_startproc
+_mutex_initialize:
+
+ //
+ // mark the mutex as unlocked
+ //
+ mov w1, #0
+ str w1, [x0]
+
+ //
+ // we are running multi-threaded, so set a non-zero return
+ // value (function prototype says use 1)
+ //
+ mov w0, #1
+ ret
+ .cfi_endproc
+
+#if !defined(USE_LSE_ATOMIC)
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+
+ //
+ // send ourselves an event, so we don't stick on the wfe at the
+ // top of the loop
+ //
+ sevl
+
+ //
+ // wait until the mutex is available
+ //
+loop:
+ wfe
+ ldaxr w1, [x0]
+ cbnz w1, loop
+
+ //
+ // mutex is (at least, it was) available - try to claim it
+ //
+ mov w1, #1
+ stxr w2, w1, [x0]
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+
+ mov w1, #0
+ stlr w1, [x0]
+ ret
+ .cfi_endproc
+
+#else // LSE version
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+ // This uses a "ticket lock". The lock is stored as a 32-bit value:
+ // - the upper 16-bits record the thread's ticket number ("take a ticket")
+ // - the lower 16-bits record the ticket being served ("now serving")
+
+ // atomically load then increment the thread's ticket number ("take a ticket")
+ mov w3, #(1 << 16)
+ ldadda w3, w1, [x0]
+
+ // is the ticket now being served?
+ eor w2, w1, w1, ror #16
+ cbz w2, loop_exit
+
+ // no, so wait for the ticket to be served
+
+ // send a local event to avoid missing an unlock before the exclusive load
+ sevl
+
+loop:
+ wfe
+ ldaxrh w3, [x0]
+ eor w2, w3, w1, lsr #16
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+loop_exit:
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+ mov w1, #1
+ staddlh w1, [x0]
+ ret
+ .cfi_endproc
+#endif
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/MP_Mutexes.h b/ports/cortex_a53/ac6/example_build/sample_threadx/MP_Mutexes.h
new file mode 100644
index 00000000..ec1a1d28
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/MP_Mutexes.h
@@ -0,0 +1,66 @@
+/*
+ * Armv8-A AArch64 - Basic Mutex Example
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef MP_MUTEX_H
+#define MP_MUTEX_H
+
+/*
+ * The Arm C library calls-out to these functions to manage multithreading.
+ * They can also be called by user application code.
+ *
+ * Mutex type is specified by the Arm C library
+ *
+ * Declare function prototypes for libc mutex routines
+ */
+typedef signed int *mutex;
+
+/*
+ * int _mutex_initialize(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ * 0 - application is non-threaded
+ * 1 - application is threaded
+ * The C library uses the return result to indicate whether it is being used in a multithreaded environment.
+ */
+int _mutex_initialize(mutex *m);
+
+/*
+ * void _mutex_acquire(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * Routine does not return until the mutex has been claimed. A load-acquire
+ * is used to guarantee that the mutex claim is properly ordered with
+ * respect to any accesses to the resource protected by the mutex
+ */
+void _mutex_acquire(mutex *m);
+
+/*
+ * void _mutex_release(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * A store-release is used to guarantee that the mutex release is properly
+ * ordered with respect any accesses to the resource protected by the mutex
+ */
+void _mutex_release(mutex *m);
+
+#endif
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a53/ac6/example_build/sample_threadx/PPM_AEM.h
new file mode 100644
index 00000000..f7501eeb
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/PPM_AEM.h
@@ -0,0 +1,66 @@
+//
+// Private Peripheral Map for the v8 Architecture Envelope Model
+//
+// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef PPM_AEM_H
+#define PPM_AEM_H
+
+//
+// Distributor layout
+//
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_IGROUP 0x0080
+#define GICD_ISENABLE 0x0100
+#define GICD_ICENABLE 0x0180
+#define GICD_ISPEND 0x0200
+#define GICD_ICPEND 0x0280
+#define GICD_ISACTIVE 0x0300
+#define GICD_ICACTIVE 0x0380
+#define GICD_IPRIORITY 0x0400
+#define GICD_ITARGETS 0x0800
+#define GICD_ICFG 0x0c00
+#define GICD_PPISR 0x0d00
+#define GICD_SPISR 0x0d04
+#define GICD_SGIR 0x0f00
+#define GICD_CPENDSGI 0x0f10
+#define GICD_SPENDSGI 0x0f20
+#define GICD_PIDR4 0x0fd0
+#define GICD_PIDR5 0x0fd4
+#define GICD_PIDR6 0x0fd8
+#define GICD_PIDR7 0x0fdc
+#define GICD_PIDR0 0x0fe0
+#define GICD_PIDR1 0x0fe4
+#define GICD_PIDR2 0x0fe8
+#define GICD_PIDR3 0x0fec
+#define GICD_CIDR0 0x0ff0
+#define GICD_CIDR1 0x0ff4
+#define GICD_CIDR2 0x0ff8
+#define GICD_CIDR3 0x0ffc
+
+//
+// CPU Interface layout
+//
+#define GICC_CTLR 0x0000
+#define GICC_PMR 0x0004
+#define GICC_BPR 0x0008
+#define GICC_IAR 0x000c
+#define GICC_EOIR 0x0010
+#define GICC_RPR 0x0014
+#define GICC_HPPIR 0x0018
+#define GICC_ABPR 0x001c
+#define GICC_AIAR 0x0020
+#define GICC_AEOIR 0x0024
+#define GICC_AHPPIR 0x0028
+#define GICC_APR0 0x00d0
+#define GICC_NSAPR0 0x00e0
+#define GICC_IIDR 0x00fc
+#define GICC_DIR 0x1000
+
+#endif // PPM_AEM_H
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..17cceb01
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,393 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_TIMER timer_0;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
+
+/* Define main entry point. */
+
+int main(void)
+{
+
+ /* Initialize timer. */
+ init_timer();
+
+ /* Enter ThreadX. */
+ tx_kernel_enter();
+
+ return 0;
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..f55da65f
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,325 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.scat
new file mode 100644
index 00000000..e5783c7c
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -0,0 +1,103 @@
+;********************************************************
+; Scatter file for Armv8-A Startup code on FVP Base model
+; Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+; Use, modification and redistribution of this file is subject to your possession of a
+; valid End User License Agreement for the Arm Product of which these examples are part of
+; and your compliance with all applicable terms and conditions of such licence agreement.
+;********************************************************
+
+LOAD 0x80000000
+{
+ EXEC +0
+ {
+ startup.o (StartUp, +FIRST)
+ * (+RO, +RW, +ZI)
+ }
+
+ ;
+ ; App stacks for all CPUs
+ ; All stacks and heap are aligned to a cache-line boundary
+ ;
+ ARM_LIB_STACK +0 ALIGN 64 EMPTY 8 * 0x4000 {}
+
+ ;
+ ; Separate heap - import symbol __use_two_region_memory
+ ; in source code for this to work correctly
+ ;
+ ARM_LIB_HEAP +0 ALIGN 64 EMPTY 0xA0000 {}
+
+ ;
+ ; Handler stacks for all CPUs
+ ; All stacks and heap are aligned to a cache-line boundary
+ ;
+ HANDLER_STACK +0 ALIGN 64 EMPTY 4 * 0x4000 {}
+
+ ;
+ ; Stacks for EL3
+ ;
+ EL3_STACKS +0 ALIGN 64 EMPTY 8 * 0x1000 {}
+ ;
+ ; Strictly speaking, the L1 tables do not need to
+ ; be so strongly aligned, but no matter
+ ;
+ TTB0_L1 +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ ;
+ ; Various sets of L2 tables
+ ;
+ ; Alignment is 4KB, since the code uses a 4K page
+ ; granularity - larger granularities would require
+ ; correspondingly stricter alignment
+ ;
+ TTB0_L2_RAM +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ TTB0_L2_PRIVATE +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ TTB0_L2_PERIPH +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ ;
+ ; The startup code uses the end of this region to calculate
+ ; the top of memory - do not place any RAM regions after it
+ ;
+ TOP_OF_RAM +0 EMPTY 4 {}
+
+ ;
+ ; CS3 Peripherals is a 64MB region from 0x1c000000
+ ; that includes the following:
+ ; System Registers at 0x1C010000
+ ; UART0 (PL011) at 0x1C090000
+ ; Color LCD Controller (PL111) at 0x1C1F0000
+ ; plus a number of others.
+ ; CS3_PERIPHERALS is used by the startup code for page-table generation
+ ; This region is not truly empty, but we have no
+ ; predefined objects that live within it
+ ;
+ CS3_PERIPHERALS 0x1c000000 EMPTY 0x90000 {}
+
+ ;
+ ; Place the UART peripheral registers data structure
+ ; This is only really needed if USE_SERIAL_PORT is defined, but
+ ; the linker will remove unused sections if not needed
+; PL011 0x1c090000 UNINIT 0x1000
+; {
+; uart.o (+ZI)
+; }
+ ; Note that some other CS3_PERIPHERALS follow this
+
+ ;
+ ; GICv3 distributor
+ ;
+ GICD 0x2f000000 UNINIT 0x8000
+ {
+ GICv3_gicd.o (.bss.distributor)
+ }
+
+ ;
+ ; GICv3 redistributors
+ ; 128KB for each redistributor in the system
+ ;
+ GICR 0x2f100000 UNINIT 0x80000
+ {
+ GICv3_gicr.o (.bss.redistributor)
+ }
+}
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a53/ac6/example_build/sample_threadx/sp804_timer.c
new file mode 100644
index 00000000..c2ce6faa
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/sp804_timer.c
@@ -0,0 +1,122 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "sp804_timer.h"
+
+#define TIMER_SP804_CTRL_TIMEREN (1 << 7)
+#define TIMER_SP804_CTRL_TIMERMODE (1 << 6) // Bit 6:
+#define TIMER_SP804_CTRL_INTENABLE (1 << 5)
+#define TIMER_SP804_CTRL_TIMERSIZE (1 << 1) // Bit 1: 0=16-bit, 1=32-bit
+#define TIMER_SP804_CTRL_ONESHOT (1 << 0) // Bit 0: 0=wrapping, 1=one-shot
+
+#define TIMER_SP804_CTRL_PRESCALE_1 (0 << 2) // clk/1
+#define TIMER_SP804_CTRL_PRESCALE_4 (1 << 2) // clk/4
+#define TIMER_SP804_CTRL_PRESCALE_8 (2 << 2) // clk/8
+
+struct sp804_timer
+{
+ volatile uint32_t Time1Load; // +0x00
+ const volatile uint32_t Time1Value; // +0x04 - RO
+ volatile uint32_t Timer1Control; // +0x08
+ volatile uint32_t Timer1IntClr; // +0x0C - WO
+ const volatile uint32_t Timer1RIS; // +0x10 - RO
+ const volatile uint32_t Timer1MIS; // +0x14 - RO
+ volatile uint32_t Timer1BGLoad; // +0x18
+
+ volatile uint32_t Time2Load; // +0x20
+ volatile uint32_t Time2Value; // +0x24
+ volatile uint8_t Timer2Control; // +0x28
+ volatile uint32_t Timer2IntClr; // +0x2C - WO
+ const volatile uint32_t Timer2RIS; // +0x30 - RO
+ const volatile uint32_t Timer2MIS; // +0x34 - RO
+ volatile uint32_t Timer2BGLoad; // +0x38
+
+ // Not including ID registers
+
+};
+
+// Instance of the dual timer, will be placed using the scatter file
+struct sp804_timer* dual_timer;
+
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address)
+{
+ dual_timer = (struct sp804_timer*)address;
+ return;
+}
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt)
+{
+ uint32_t tmp = 0;
+
+ dual_timer->Time1Load = load_value;
+
+ // Fixed setting: 32-bit, no prescaling
+ tmp = TIMER_SP804_CTRL_TIMERSIZE | TIMER_SP804_CTRL_PRESCALE_1 | TIMER_SP804_CTRL_TIMERMODE;
+
+ // Settings from parameters: interrupt generation & reload
+ tmp = tmp | interrupt | auto_reload;
+
+ // Write control register
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Starts the timer
+void startTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp | TIMER_SP804_CTRL_TIMEREN; // Set TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Stops the timer
+void stopTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp & ~TIMER_SP804_CTRL_TIMEREN; // Clear TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void)
+{
+ return dual_timer->Time1Value;
+}
+
+
+void clearTimerIrq(void)
+{
+ // A write to this register, of any value, clears the interrupt
+ dual_timer->Timer1IntClr = 1;
+}
+
+
+// ------------------------------------------------------------
+// End of sp804_timer.c
+// ------------------------------------------------------------
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a53/ac6/example_build/sample_threadx/sp804_timer.h
new file mode 100644
index 00000000..4d423904
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/sp804_timer.h
@@ -0,0 +1,53 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+// Header Filer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#ifndef _SP804_TIMER_
+#define _SP804_TIMER_
+
+#include
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address);
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+
+#define SP804_AUTORELOAD (0)
+#define SP804_SINGLESHOT (1)
+#define SP804_GENERATE_IRQ (1 << 5)
+#define SP804_NO_IRQ (0)
+
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt);
+
+
+// Starts the timer
+void startTimer(void);
+
+
+// Stops the timer
+void stopTimer(void);
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void);
+
+
+// Clears the timer interrupt
+void clearTimerIrq(void);
+
+#endif
+
+// ------------------------------------------------------------
+// End of sp804_timer.h
+// ------------------------------------------------------------
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a53/ac6/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..3952a200
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/startup.S
@@ -0,0 +1,779 @@
+// ------------------------------------------------------------
+// Armv8-A MPCore EL3 AArch64 Startup Code
+//
+// Basic Vectors, MMU, caches and GICv3 initialization
+//
+// Exits in EL1 AArch64
+//
+// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_mmu.h"
+#include "v8_system.h"
+#include "GICv3_aliases.h"
+
+ .section StartUp, "ax"
+ .balign 4
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+
+ .global InvalidateUDCaches
+ .global ZeroBlock
+
+ .global SetPrivateIntSecurityBlock
+ .global SetSPISecurityAll
+ .global SetPrivateIntPriority
+
+ .global GetGICR
+ .global WakeupGICR
+ .global SyncAREinGICD
+ .global EnableGICD
+ .global EnablePrivateInt
+ .global GetPrivateIntPending
+ .global ClearPrivateIntPending
+
+ .global __main
+ //.global MainApp
+
+ .global Image$$EXEC$$RO$$Base
+ .global Image$$TTB0_L1$$ZI$$Base
+ .global Image$$TTB0_L2_RAM$$ZI$$Base
+ .global Image$$TTB0_L2_PERIPH$$ZI$$Base
+ .global Image$$TOP_OF_RAM$$ZI$$Base
+ .global Image$$GICD$$ZI$$Base
+ .global Image$$ARM_LIB_STACK$$ZI$$Limit
+ .global Image$$EL3_STACKS$$ZI$$Limit
+ .global Image$$CS3_PERIPHERALS$$ZI$$Base
+ // use separate stack and heap, as anticipated by scatter.scat
+ .global __use_two_region_memory
+
+
+// ------------------------------------------------------------
+
+ .global start64
+ .type start64, "function"
+start64:
+
+ //
+ // program the VBARs
+ //
+ ldr x1, =el1_vectors
+ msr VBAR_EL1, x1
+
+ ldr x1, =el2_vectors
+ msr VBAR_EL2, x1
+
+ ldr x1, =el3_vectors
+ msr VBAR_EL3, x1
+
+
+ // GIC-500 comes out of reset in GICv2 compatibility mode - first set
+ // system register enables for all relevant exception levels, and
+ // select GICv3 operating mode
+ //
+ msr SCR_EL3, xzr // Ensure NS bit is initially clear, so secure copy of ICC_SRE_EL1 can be configured
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL3, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Secure copy of ICC_SRE_EL1
+
+ //
+ // set lower exception levels as non-secure, with no access
+ // back to EL2 or EL3, and are AArch64 capable
+ //
+ mov x3, #(SCR_EL3_RW | \
+ SCR_EL3_SMD | \
+ SCR_EL3_NS) // Set NS bit, to access Non-secure registers
+ msr SCR_EL3, x3
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL2, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Non-secure copy of ICC_SRE_EL1
+
+
+ //
+ // no traps or VM modifications from the Hypervisor, EL1 is AArch64
+ //
+ mov x2, #HCR_EL2_RW
+ msr HCR_EL2, x2
+
+ //
+ // VMID is still significant, even when virtualisation is not
+ // being used, so ensure VTTBR_EL2 is properly initialised
+ //
+ msr VTTBR_EL2, xzr
+
+ //
+ // VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by Non-secure EL1 reads of MPIDR_EL1.
+ // VPIDR_EL2 holds the value of the Virtualization Processor ID. This is the value returned by Non-secure EL1 reads of MIDR_EL1.
+ // Both of these registers are architecturally UNKNOWN at reset, and so they must be set to the correct value
+ // (even if EL2/virtualization is not being used), otherwise non-secure EL1 reads of MPIDR_EL1/MIDR_EL1 will return garbage values.
+ // This guarantees that any future reads of MPIDR_EL1 and MIDR_EL1 from Non-secure EL1 will return the correct value.
+ //
+ mrs x0, MPIDR_EL1
+ msr VMPIDR_EL2, x0
+ mrs x0, MIDR_EL1
+ msr VPIDR_EL2, x0
+
+ // extract the core number from MPIDR_EL1 and store it in
+ // x19 (defined by the AAPCS as callee-saved), so we can re-use
+ // the number later
+ //
+ bl GetCPUID
+ mov x19, x0
+
+ //
+ // neither EL3 nor EL2 trap floating point or accesses to CPACR
+ //
+ msr CPTR_EL3, xzr
+ msr CPTR_EL2, xzr
+
+ //
+ // SCTLR_ELx may come out of reset with UNKNOWN values so we will
+ // set the fields to 0 except, possibly, the endianess field(s).
+ // Note that setting SCTLR_EL2 or the EL0 related fields of SCTLR_EL1
+ // is not strictly needed, since we're never in EL2 or EL0
+ //
+#ifdef __ARM_BIG_ENDIAN
+ mov x0, #(SCTLR_ELx_EE | SCTLR_EL1_E0E)
+#else
+ mov x0, #0
+#endif
+ msr SCTLR_EL3, x0
+ msr SCTLR_EL2, x0
+ msr SCTLR_EL1, x0
+
+#ifdef CORTEXA
+ //
+ // Configure ACTLR_EL[23]
+ // ----------------------
+ //
+ // These bits are IMPLEMENTATION DEFINED, so are different for
+ // different processors
+ //
+ // For Cortex-A57, the controls we set are:
+ //
+ // Enable lower level access to CPUACTLR_EL1
+ // Enable lower level access to CPUECTLR_EL1
+ // Enable lower level access to L2CTLR_EL1
+ // Enable lower level access to L2ECTLR_EL1
+ // Enable lower level access to L2ACTLR_EL1
+ //
+ mov x0, #((1 << 0) | \
+ (1 << 1) | \
+ (1 << 4) | \
+ (1 << 5) | \
+ (1 << 6))
+
+ msr ACTLR_EL3, x0
+ msr ACTLR_EL2, x0
+
+ //
+ // configure CPUECTLR_EL1
+ //
+ // These bits are IMP DEF, so need to different for different
+ // processors
+ //
+ // SMPEN - bit 6 - Enables the processor to receive cache
+ // and TLB maintenance operations
+ //
+ // Note: For Cortex-A57/53 SMPEN should be set before enabling
+ // the caches and MMU, or performing any cache and TLB
+ // maintenance operations.
+ //
+ // This register has a defined reset value, so we use a
+ // read-modify-write sequence to set SMPEN
+ //
+ mrs x0, S3_1_c15_c2_1 // Read EL1 CPU Extended Control Register
+ orr x0, x0, #(1 << 6) // Set the SMPEN bit
+ msr S3_1_c15_c2_1, x0 // Write EL1 CPU Extended Control Register
+
+ isb
+#endif
+
+ //
+ // That's the last of the control settings for now
+ //
+ // Note: no ISB after all these changes, as registers won't be
+ // accessed until after an exception return, which is itself a
+ // context synchronisation event
+ //
+
+ //
+ // Setup some EL3 stack space, ready for calling some subroutines, below.
+ //
+ // Stack space allocation is CPU-specific, so use CPU
+ // number already held in x19
+ //
+ // 2^12 bytes per CPU for the EL3 stacks
+ //
+ ldr x0, =Image$$EL3_STACKS$$ZI$$Limit
+ sub x0, x0, x19, lsl #12
+ mov sp, x0
+
+ //
+ // we need to configure the GIC while still in secure mode, specifically
+ // all PPIs and SPIs have to be programmed as Group1 interrupts
+ //
+
+ //
+ // Before the GIC can be reliably programmed, we need to
+ // enable Affinity Routing, as this affects where the configuration
+ // registers are (with Affinity Routing enabled, some registers are
+ // in the Redistributor, whereas those same registers are in the
+ // Distributor with Affinity Routing disabled (i.e. when in GICv2
+ // compatibility mode).
+ //
+ mov x0, #(1 << 4) | (1 << 5) // gicdctlr_ARE_S | gicdctlr_ARE_NS
+ mov x1, x19
+ bl SyncAREinGICD
+
+ //
+ // The Redistributor comes out of reset assuming the processor is
+ // asleep - correct that assumption
+ //
+ bl GetAffinity
+ bl GetGICR
+ mov w20, w0 // Keep a copy for later
+ bl WakeupGICR
+
+ //
+ // Now we're ready to set security and other initialisations
+ //
+ // This is a per-CPU configuration for these interrupts
+ //
+ // for the first cluster, CPU number is the redistributor index
+ //
+ mov w0, w20
+ mov w1, #1 // gicigroupr_G1NS
+ bl SetPrivateIntSecurityBlock
+
+ //
+ // While we're in the Secure World, set the priority mask low enough
+ // for it to be writable in the Non-Secure World
+ //
+ //mov x0, #16 << 3 // 5 bits of priority in the Secure world
+ mov x0, #0xFF // for Non-Secure interrupts
+ msr ICC_PMR_EL1, x0
+
+ //
+ // there's more GIC setup to do, but only for the primary CPU
+ //
+ cbnz x19, drop_to_el1
+
+ //
+ // There's more to do to the GIC - call the utility routine to set
+ // all SPIs to Group1
+ //
+ mov w0, #1 // gicigroupr_G1NS
+ bl SetSPISecurityAll
+
+ //
+ // Set up EL1 entry point and "dummy" exception return information,
+ // then perform exception return to enter EL1
+ //
+ .global drop_to_el1
+drop_to_el1:
+ adr x1, el1_entry_aarch64
+ msr ELR_EL3, x1
+ mov x1, #(AARCH64_SPSR_EL1h | \
+ AARCH64_SPSR_F | \
+ AARCH64_SPSR_I | \
+ AARCH64_SPSR_A)
+ msr SPSR_EL3, x1
+ eret
+
+
+
+// ------------------------------------------------------------
+// EL1 - Common start-up code
+// ------------------------------------------------------------
+
+ .global el1_entry_aarch64
+ .type el1_entry_aarch64, "function"
+el1_entry_aarch64:
+
+ //
+ // Now we're in EL1, setup the application stack
+ // the scatter file allocates 2^14 bytes per app stack
+ //
+ ldr x0, =Image$$HANDLER_STACK$$ZI$$Limit
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+ MSR SPSel, #0
+ ISB
+ ldr x0, =Image$$ARM_LIB_STACK$$ZI$$Limit
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+
+ //
+ // Enable floating point
+ //
+ mov x0, #CPACR_EL1_FPEN
+ msr CPACR_EL1, x0
+
+ //
+ // Invalidate caches and TLBs for all stage 1
+ // translations used at EL1
+ //
+ // Cortex-A processors automatically invalidate their caches on reset
+ // (unless suppressed with the DBGL1RSTDISABLE or L2RSTDISABLE pins).
+ // It is therefore not necessary for software to invalidate the caches
+ // on startup, however, this is done here in case of a warm reset.
+ bl InvalidateUDCaches
+ tlbi VMALLE1
+
+
+ //
+ // Set TTBR0 Base address
+ //
+ // The CPUs share one set of translation tables that are
+ // generated by CPU0 at run-time
+ //
+ // TTBR1_EL1 is not used in this example
+ //
+ ldr x1, =Image$$TTB0_L1$$ZI$$Base
+ msr TTBR0_EL1, x1
+
+
+ //
+ // Set up memory attributes
+ //
+ // These equate to:
+ //
+ // 0 -> 0b01000100 = 0x00000044 = Normal, Inner/Outer Non-Cacheable
+ // 1 -> 0b11111111 = 0x0000ff00 = Normal, Inner/Outer WriteBack Read/Write Allocate
+ // 2 -> 0b00000100 = 0x00040000 = Device-nGnRE
+ //
+ mov x1, #0xff44
+ movk x1, #4, LSL #16 // equiv to: movk x1, #0x0000000000040000
+ msr MAIR_EL1, x1
+
+
+ //
+ // Set up TCR_EL1
+ //
+ // We're using only TTBR0 (EPD1 = 1), and the page table entries:
+ // - are using an 8-bit ASID from TTBR0
+ // - have a 4K granularity (TG0 = 0b00)
+ // - are outer-shareable (SH0 = 0b10)
+ // - are using Inner & Outer WBWA Normal memory ([IO]RGN0 = 0b01)
+ // - map
+ // + 32 bits of VA space (T0SZ = 0x20)
+ // + into a 32-bit PA space (IPS = 0b000)
+ //
+ // 36 32 28 24 20 16 12 8 4 0
+ // -----+----+----+----+----+----+----+----+----+----+
+ // | | |OOII| | | |OOII| | |
+ // TT | | |RRRR|E T | T| |RRRR|E T | T|
+ // BB | I I|TTSS|GGGG|P 1 | 1|TTSS|GGGG|P 0 | 0|
+ // IIA| P P|GGHH|NNNN|DAS | S|GGHH|NNNN|D S | S|
+ // 10S| S-S|1111|1111|11Z-|---Z|0000|0000|0 Z-|---Z|
+ //
+ // 000 0000 0000 0000 1000 0000 0010 0101 0010 0000
+ //
+ // 0x 8 0 2 5 2 0
+ //
+ // Note: the ISB is needed to ensure the changes to system
+ // context are before the write of SCTLR_EL1.M to enable
+ // the MMU. It is likely on a "real" implementation that
+ // this setup would work without an ISB, due to the
+ // amount of code that gets executed before enabling the
+ // MMU, but that would not be architecturally correct.
+ //
+ ldr x1, =0x0000000000802520
+ msr TCR_EL1, x1
+ isb
+
+ //
+ // x19 already contains the CPU number, so branch to secondary
+ // code if we're not on CPU0
+ //
+ cbnz x19, el1_secondary
+
+ //
+ // Fall through to primary code
+ //
+
+
+//
+// ------------------------------------------------------------
+//
+// EL1 - primary CPU init code
+//
+// This code is run on CPU0, while the other CPUs are in the
+// holding pen
+//
+
+ .global el1_primary
+ .type el1_primary, "function"
+el1_primary:
+
+ //
+ // Turn on the banked GIC distributor enable,
+ // ready for individual CPU enables later
+ //
+ mov w0, #(1 << 1) // gicdctlr_EnableGrp1A
+ bl EnableGICD
+
+ //
+ // Generate TTBR0 L1
+ //
+ // at 4KB granularity, 32-bit VA space, table lookup starts at
+ // L1, with 1GB regions
+ //
+ // we are going to create entries pointing to L2 tables for a
+ // couple of these 1GB regions, the first of which is the
+ // RAM on the VE board model - get the table addresses and
+ // start by emptying out the L1 page tables (4 entries at L1
+ // for a 4K granularity)
+ //
+ // x21 = address of L1 tables
+ //
+ ldr x21, =Image$$TTB0_L1$$ZI$$Base
+ mov x0, x21
+ mov x1, #(4 << 3)
+ bl ZeroBlock
+
+ //
+ // time to start mapping the RAM regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // x22 = address of L2 tables, needs to be remembered in case
+ // we want to re-use the tables for mapping peripherals
+ //
+ ldr x22, =Image$$TTB0_L2_RAM$$ZI$$Base
+ mov x1, #(512 << 3)
+ mov x0, x22
+ bl ZeroBlock
+
+ //
+ // Get the start address of RAM (the EXEC region) into x4
+ // and calculate the offset into the L1 table (1GB per region,
+ // max 4GB)
+ //
+ // x23 = L1 table offset, saved for later comparison against
+ // peripheral offset
+ //
+ ldr x4, =Image$$EXEC$$RO$$Base
+ ubfx x23, x4, #30, #2
+
+ orr x1, x22, #TT_S1_ATTR_PAGE
+ str x1, [x21, x23, lsl #3]
+
+ //
+ // we've already used the RAM start address in x4 - we now need
+ // to get this in terms of an offset into the L2 page tables,
+ // where each entry covers 2MB
+ //
+ ubfx x2, x4, #21, #9
+
+ //
+ // TOP_OF_RAM in the scatter file marks the end of the
+ // Execute region in RAM: convert the end of this region to an
+ // offset too, being careful to round up, then calculate the
+ // number of entries to write
+ //
+ ldr x5, =Image$$TOP_OF_RAM$$ZI$$Base
+ sub x3, x5, #1
+ ubfx x3, x3, #21, #9
+ add x3, x3, #1
+ sub x3, x3, x2
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as Shared, Normal WBWA (MAIR[1]) with a flat
+ // VA->PA translation
+ //
+ bic x4, x4, #((1 << 21) - 1)
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (1 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_SH_INNER | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // factor the offset into the page table address and then write
+ // the entries
+ //
+ add x0, x22, x2, lsl #3
+
+loop1:
+ subs x3, x3, #1
+ str x1, [x0], #8
+ add x1, x1, #0x200, LSL #12 // equiv to add x1, x1, #(1 << 21) // 2MB per entry
+ bne loop1
+
+
+ //
+ // now mapping the Peripheral regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // The assumption here is that all peripherals live within
+ // a common 1GB region (i.e. that there's a single set of
+ // L2 pages for all the peripherals). We only use a UART
+ // and the GIC in this example, so the assumption is sound
+ //
+ // x24 = address of L2 peripheral tables
+ //
+ ldr x24, =Image$$TTB0_L2_PERIPH$$ZI$$Base
+
+ //
+ // get the GICD address into x4 and calculate
+ // the offset into the L1 table
+ //
+ // x25 = L1 table offset
+ //
+ ldr x4, =Image$$GICD$$ZI$$Base
+ ubfx x25, x4, #30, #2
+
+ //
+ // here's the tricky bit: it's possible that the peripherals are
+ // in the same 1GB region as the RAM, in which case we don't need
+ // to prime a separate set of L2 page tables, nor add them to the
+ // L1 tables
+ //
+ // if we're going to re-use the TTB0_L2_RAM tables, get their
+ // address into x24, which is used later on to write the PTEs
+ //
+ cmp x25, x23
+ csel x24, x22, x24, EQ
+ b.eq nol2setup
+
+ //
+ // Peripherals are in a separate 1GB region, and so have their own
+ // set of L2 tables - clean out the tables and add them to the L1
+ // table
+ //
+ mov x0, x24
+ mov x1, #512 << 3
+ bl ZeroBlock
+
+ orr x1, x24, #TT_S1_ATTR_PAGE
+ str x1, [x21, x25, lsl #3]
+
+ //
+ // there's only going to be a single 2MB region for GICD (in
+ // x4) - get this in terms of an offset into the L2 page tables
+ //
+ // with larger systems, it is possible that the GIC redistributor
+ // registers require extra 2MB pages, in which case extra code
+ // would be required here
+ //
+nol2setup:
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry for this, so no loop as we have for RAM, above
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // we have CS3_PERIPHERALS that include the UART controller
+ //
+ // Again, the code is making assumptions - this time that the CS3_PERIPHERALS
+ // region uses the same 1GB portion of the address space as the GICD,
+ // and thus shares the same set of L2 page tables
+ //
+ // Get CS3_PERIPHERALS address into x4 and calculate the offset into the
+ // L2 tables
+ //
+ ldr x4, =Image$$CS3_PERIPHERALS$$ZI$$Base
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry again - write it
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // issue a barrier to ensure all table entry writes are complete
+ //
+ dsb ish
+
+ //
+ // Enable the MMU. Caches will be enabled later, after scatterloading.
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to C library init code
+ //
+ b __main
+
+
+// ------------------------------------------------------------
+
+// AArch64 Arm C library startup add-in:
+
+// The Arm Architecture Reference Manual for Armv8-A states:
+//
+// Instruction accesses to Non-cacheable Normal memory can be held in instruction caches.
+// Correspondingly, the sequence for ensuring that modifications to instructions are available
+// for execution must include invalidation of the modified locations from the instruction cache,
+// even if the instructions are held in Normal Non-cacheable memory.
+// This includes cases where the instruction cache is disabled.
+//
+// To invalidate the AArch64 instruction cache after scatter-loading and before initialization of the stack and heap,
+// it is necessary for the user to:
+//
+// * Implement instruction cache invalidation code in _platform_pre_stackheap_init.
+// * Ensure all code on the path from the program entry up to and including _platform_pre_stackheap_init is located in a root region.
+//
+// In this example, this function is only called once, by the primary core
+
+ .global _platform_pre_stackheap_init
+ .type _platform_pre_stackheap_init, "function"
+ .cfi_startproc
+_platform_pre_stackheap_init:
+ dsb ish // ensure all previous stores have completed before invalidating
+ ic ialluis // I cache invalidate all inner shareable to PoU (which includes secondary cores)
+ dsb ish // ensure completion on inner shareable domain (which includes secondary cores)
+ isb
+
+ // Scatter-loading is complete, so enable the caches here, so that the C-library's mutex initialization later will work
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ msr SCTLR_EL1, x1
+ isb
+
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+// EL1 - secondary CPU init code
+//
+// This code is run on CPUs 1, 2, 3 etc....
+// ------------------------------------------------------------
+
+ .global el1_secondary
+ .type el1_secondary, "function"
+el1_secondary:
+
+ //
+ // the primary CPU is going to use SGI 15 as a wakeup event
+ // to let us know when it is OK to proceed, so prepare for
+ // receiving that interrupt
+ //
+ // NS interrupt priorities run from 0 to 15, with 15 being
+ // too low a priority to ever raise an interrupt, so let's
+ // use 14
+ //
+ mov w0, w20
+ mov w1, #15
+ mov w2, #14 << 4 // we're in NS world, so 4 bits of priority,
+ // 8-bit field, - 4 = 4-bit shift
+ bl SetPrivateIntPriority
+
+ mov w0, w20
+ mov w1, #15
+ bl EnablePrivateInt
+
+ //
+ // set priority mask as low as possible; although,being in the
+ // NS World, we can't set bit[7] of the priority, we still
+ // write all 8-bits of priority to an ICC register
+ //
+ mov x0, #31 << 3
+ msr ICC_PMR_EL1, x0
+
+ //
+ // set global enable and wait for our interrupt to arrive
+ //
+ mov x0, #1
+ msr ICC_IGRPEN1_EL1, x0
+ isb
+
+loop_wfi:
+ dsb SY // Clear all pending data accesses
+ wfi // Go to sleep
+
+ //
+ // something woke us from our wait, was it the required interrupt?
+ //
+ mov w0, w20
+ mov w1, #15
+ bl GetPrivateIntPending
+ cbz w0, loop_wfi
+
+ //
+ // it was - there's no need to actually take the interrupt,
+ // so just clear it
+ //
+ mov w0, w20
+ mov w1, #15
+ bl ClearPrivateIntPending
+
+ //
+ // Enable the MMU and caches
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to thread start
+ //
+ //B MainApp
+ b __main
+
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/timer_interrupts.c b/ports/cortex_a53/ac6/example_build/sample_threadx/timer_interrupts.c
new file mode 100644
index 00000000..8f522217
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/timer_interrupts.c
@@ -0,0 +1,152 @@
+/* Bare-metal example for Armv8-A FVP Base model */
+
+/* Timer and interrupts */
+
+/* Copyright (c) 2016-2018 Arm Limited (or its affiliates). All rights reserved. */
+/* Use, modification and redistribution of this file is subject to your possession of a */
+/* valid End User License Agreement for the Arm Product of which these examples are part of */
+/* and your compliance with all applicable terms and conditions of such licence agreement. */
+
+#include
+
+#include "GICv3.h"
+#include "GICv3_gicc.h"
+#include "sp804_timer.h"
+
+void _tx_timer_interrupt(void);
+
+// LED Base address
+#define LED_BASE (volatile unsigned int *)0x1C010008
+
+
+void nudge_leds(void) // Move LEDs along
+{
+ static int state = 1;
+ static int value = 1;
+
+ if (state)
+ {
+ int max = (1 << 7);
+ value <<= 1;
+ if (value == max)
+ state = 0;
+ }
+ else
+ {
+ value >>= 1;
+ if (value == 1)
+ state = 1;
+ }
+
+ *LED_BASE = value; // Update LEDs hardware
+}
+
+
+// Initialize Timer 0 and Interrupt Controller
+void init_timer(void)
+{
+ // Enable interrupts
+ __asm("MSR DAIFClr, #0xF");
+ setICC_IGRPEN1_EL1(igrpEnable);
+
+ // Configure the SP804 timer to generate an interrupt
+ setTimerBaseAddress(0x1C110000);
+ initTimer(0x200, SP804_AUTORELOAD, SP804_GENERATE_IRQ);
+ startTimer();
+
+ // The SP804 timer generates SPI INTID 34. Enable
+ // this ID, and route it to core 0.0.0.0 (this one!)
+ SetSPIRoute(34, 0, gicdirouter_ModeSpecific); // Route INTID 34 to 0.0.0.0 (this core)
+ SetSPIPriority(34, 0); // Set INTID 34 to priority to 0
+ ConfigureSPI(34, gicdicfgr_Level); // Set INTID 34 as level-sensitive
+ EnableSPI(34); // Enable INTID 34
+}
+
+
+// --------------------------------------------------------
+
+void irqHandler(void)
+{
+ unsigned int ID;
+
+ ID = getICC_IAR1(); // readIntAck();
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("irqHandler() - Reserved INTID %d\n\n", ID);
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("irqHandler() - External timer interrupt\n\n");
+ nudge_leds();
+ clearTimerIrq();
+
+ /* Call ThreadX timer interrupt processing. */
+ _tx_timer_interrupt();
+
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("irqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
+
+// --------------------------------------------------------
+
+// Not actually used in this example, but provided for completeness
+
+void fiqHandler(void)
+{
+ unsigned int ID;
+ unsigned int aliased = 0;
+
+ ID = getICC_IAR0(); // readIntAck();
+ //printf("fiqHandler() - Read %d from IAR0\n", ID);
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("fiqHandler() - Reserved INTID %d\n\n", ID);
+ ID = getICC_IAR1(); // readAliasedIntAck();
+ //printf("fiqHandler() - Read %d from AIAR\n", ID);
+ aliased = 1;
+
+ // If still spurious then simply return
+ if ((1020 <= ID) && (ID <= 1023))
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("fiqHandler() - External timer interrupt\n\n");
+ clearTimerIrq();
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("fiqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ // NOTE: If the ID was read from the Aliased IAR, then
+ // the aliased EOI register must be used
+ if (aliased == 0)
+ setICC_EOIR0(ID); // writeEOI(ID);
+ else
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/use_model_semihosting.ds b/ports/cortex_a53/ac6/example_build/sample_threadx/use_model_semihosting.ds
new file mode 100644
index 00000000..6fde52b2
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/use_model_semihosting.ds
@@ -0,0 +1 @@
+set semihosting enabled off
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_aarch64.S
new file mode 100644
index 00000000..45445a98
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_aarch64.S
@@ -0,0 +1,179 @@
+// ------------------------------------------------------------
+// Armv8-A AArch64 - Common helper functions
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global EnableCachesEL1
+ .global DisableCachesEL1
+ .global InvalidateUDCaches
+ .global GetMIDR
+ .global GetMPIDR
+ .global GetAffinity
+ .global GetCPUID
+
+// ------------------------------------------------------------
+
+//
+// void EnableCachesEL1(void)
+//
+// enable Instruction and Data caches
+//
+ .type EnableCachesEL1, "function"
+ .cfi_startproc
+EnableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_ELx_I
+ orr x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+ .type DisableCachesEL1, "function"
+ .cfi_startproc
+DisableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_ELx_I
+ bic x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// void InvalidateUDCaches(void)
+//
+// Invalidate data and unified caches
+//
+ .type InvalidateUDCaches, "function"
+ .cfi_startproc
+InvalidateUDCaches:
+ // From the Armv8-A Architecture Reference Manual
+
+ dmb ish // ensure all prior inner-shareable accesses have been observed
+
+ mrs x0, CLIDR_EL1
+ and w3, w0, #0x07000000 // get 2 x level of coherence
+ lsr w3, w3, #23
+ cbz w3, finished
+ mov w10, #0 // w10 = 2 x cache level
+ mov w8, #1 // w8 = constant 0b1
+loop_level:
+ add w2, w10, w10, lsr #1 // calculate 3 x cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7
+ cmp w1, #2
+ b.lt next_level // no data or unified cache at this level
+ msr CSSELR_EL1, x10 // select this cache level
+ isb // synchronize change of csselr
+ mrs x1, CCSIDR_EL1 // read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen)-4
+ add w2, w2, #4 // w2 = log2(linelen)
+ ubfx w4, w1, #3, #10 // w4 = max way number, right aligned
+ clz w5, w4 // w5 = 32-log2(ways), bit position of way in dc operand
+ lsl w9, w4, w5 // w9 = max way number, aligned to position in dc operand
+ lsl w16, w8, w5 // w16 = amount to decrement way number per iteration
+loop_way:
+ ubfx w7, w1, #13, #15 // w7 = max set number, right aligned
+ lsl w7, w7, w2 // w7 = max set number, aligned to position in dc operand
+ lsl w17, w8, w2 // w17 = amount to decrement set number per iteration
+loop_set:
+ orr w11, w10, w9 // w11 = combine way number and cache number ...
+ orr w11, w11, w7 // ... and set number for dc operand
+ dc isw, x11 // do data cache invalidate by set and way
+ subs w7, w7, w17 // decrement set number
+ b.ge loop_set
+ subs x9, x9, x16 // decrement way number
+ b.ge loop_way
+next_level:
+ add w10, w10, #2 // increment 2 x cache level
+ cmp w3, w10
+ b.gt loop_level
+ dsb sy // ensure completion of previous cache maintenance operation
+ isb
+finished:
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// ID Register functions
+//
+
+ .type GetMIDR, "function"
+ .cfi_startproc
+GetMIDR:
+
+ mrs x0, MIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetMPIDR, "function"
+ .cfi_startproc
+GetMPIDR:
+
+ mrs x0, MPIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetAffinity, "function"
+ .cfi_startproc
+GetAffinity:
+
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #32, #8
+ bfi w0, w1, #24, #8
+ ret
+ .cfi_endproc
+
+
+ .type GetCPUID, "function"
+ .cfi_startproc
+GetCPUID:
+
+ mrs x0, MIDR_EL1
+ ubfx x0, x0, #4, #12 // extract PartNum
+ cmp x0, #0xD0D // Cortex-A77
+ b.eq DynamIQ
+ cmp x0, #0xD0B // Cortex-A76
+ b.eq DynamIQ
+ cmp x0, #0xD0A // Cortex-A75
+ b.eq DynamIQ
+ cmp x0, #0xD05 // Cortex-A55
+ b.eq DynamIQ
+ b Others
+DynamIQ:
+ mrs x0, MPIDR_EL1
+ ubfx x0, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ ret
+
+Others:
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #MPIDR_EL1_AFF0_LSB, #MPIDR_EL1_AFF_WIDTH
+ ubfx x2, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ add x0, x1, x2, LSL #2
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/v8_aarch64.h b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_aarch64.h
new file mode 100644
index 00000000..b09079a4
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_aarch64.h
@@ -0,0 +1,103 @@
+/*
+ *
+ * Armv8-A AArch64 common helper functions
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+
+#ifndef V8_AARCH64_H
+#define V8_AARCH64_H
+
+/*
+ * Parameters for data barriers
+ */
+#define OSHLD 1
+#define OSHST 2
+#define OSH 3
+#define NSHLD 5
+#define NSHST 6
+#define NSH 7
+#define ISHLD 9
+#define ISHST 10
+#define ISH 11
+#define LD 13
+#define ST 14
+#define SY 15
+
+/**********************************************************************/
+
+/*
+ * function prototypes
+ */
+
+/*
+ * void InvalidateUDCaches(void)
+ * invalidates all Unified and Data Caches
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * guarantees that all levels of cache will be invalidated before
+ * returning to caller
+ */
+void InvalidateUDCaches(void);
+
+/*
+ * unsigned long long EnableCachesEL1(void)
+ * enables I- and D- caches at EL1
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * New value of SCTLR_EL1
+ *
+ * Side Effects
+ * context will be synchronised before returning to caller
+ */
+unsigned long long EnableCachesEL1(void);
+
+/*
+ * unsigned long long GetMIDR(void)
+ * returns the contents of MIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MIDR_EL0
+ */
+unsigned long long GetMIDR(void);
+
+/*
+ * unsigned long long GetMPIDR(void)
+ * returns the contents of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0
+ */
+unsigned long long GetMPIDR(void);
+
+/*
+ * unsigned int GetCPUID(void)
+ * returns the Aff0 field of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0[7:0]
+ */
+unsigned int GetCPUID(void);
+
+#endif
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_mmu.h
new file mode 100644
index 00000000..bce62b54
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_mmu.h
@@ -0,0 +1,128 @@
+//
+// Defines for v8 Memory Model
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_MMU_H
+#define V8_MMU_H
+
+//
+// Translation Control Register fields
+//
+// RGN field encodings
+//
+#define TCR_RGN_NC 0b00
+#define TCR_RGN_WBWA 0b01
+#define TCR_RGN_WT 0b10
+#define TCR_RGN_WBRA 0b11
+
+//
+// Shareability encodings
+//
+#define TCR_SHARE_NONE 0b00
+#define TCR_SHARE_OUTER 0b10
+#define TCR_SHARE_INNER 0b11
+
+//
+// Granule size encodings
+//
+#define TCR_GRANULE_4K 0b00
+#define TCR_GRANULE_64K 0b01
+#define TCR_GRANULE_16K 0b10
+
+//
+// Physical Address sizes
+//
+#define TCR_SIZE_4G 0b000
+#define TCR_SIZE_64G 0b001
+#define TCR_SIZE_1T 0b010
+#define TCR_SIZE_4T 0b011
+#define TCR_SIZE_16T 0b100
+#define TCR_SIZE_256T 0b101
+
+//
+// Translation Control Register fields
+//
+#define TCR_EL1_T0SZ_SHIFT 0
+#define TCR_EL1_EPD0 (1 << 7)
+#define TCR_EL1_IRGN0_SHIFT 8
+#define TCR_EL1_ORGN0_SHIFT 10
+#define TCR_EL1_SH0_SHIFT 12
+#define TCR_EL1_TG0_SHIFT 14
+
+#define TCR_EL1_T1SZ_SHIFT 16
+#define TCR_EL1_A1 (1 << 22)
+#define TCR_EL1_EPD1 (1 << 23)
+#define TCR_EL1_IRGN1_SHIFT 24
+#define TCR_EL1_ORGN1_SHIFT 26
+#define TCR_EL1_SH1_SHIFT 28
+#define TCR_EL1_TG1_SHIFT 30
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_EL1_AS (1 << 36)
+#define TCR_EL1_TBI0 (1 << 37)
+#define TCR_EL1_TBI1 (1 << 38)
+
+//
+// Stage 1 Translation Table descriptor fields
+//
+#define TT_S1_ATTR_FAULT (0b00 << 0)
+#define TT_S1_ATTR_BLOCK (0b01 << 0) // Level 1/2
+#define TT_S1_ATTR_TABLE (0b11 << 0) // Level 0/1/2
+#define TT_S1_ATTR_PAGE (0b11 << 0) // Level 3
+
+#define TT_S1_ATTR_MATTR_LSB 2
+
+#define TT_S1_ATTR_NS (1 << 5)
+
+#define TT_S1_ATTR_AP_RW_PL1 (0b00 << 6)
+#define TT_S1_ATTR_AP_RW_ANY (0b01 << 6)
+#define TT_S1_ATTR_AP_RO_PL1 (0b10 << 6)
+#define TT_S1_ATTR_AP_RO_ANY (0b11 << 6)
+
+#define TT_S1_ATTR_SH_NONE (0b00 << 8)
+#define TT_S1_ATTR_SH_OUTER (0b10 << 8)
+#define TT_S1_ATTR_SH_INNER (0b11 << 8)
+
+#define TT_S1_ATTR_AF (1 << 10)
+#define TT_S1_ATTR_nG (1 << 11)
+
+// OA bits [15:12] - If Armv8.2-LPA is implemented, bits[15:12] are bits[51:48]
+// and bits[47:16] are bits[47:16] of the output address for a page of memory
+
+#define TT_S1_ATTR_nT (1 << 16) // Present if Armv8.4-TTRem is implemented, otherwise RES0
+
+#define TT_S1_ATTR_DBM (1 << 51) // Present if Armv8.1-TTHM is implemented, otherwise RES0
+
+#define TT_S1_ATTR_CONTIG (1 << 52)
+#define TT_S1_ATTR_PXN (1 << 53)
+#define TT_S1_ATTR_UXN (1 << 54)
+
+// PBHA bits[62:59] - If Armv8.2-TTPBHA is implemented, hardware can use these bits
+// for IMPLEMENTATIONDEFINED purposes, otherwise IGNORED
+
+#define TT_S1_MAIR_DEV_nGnRnE 0b00000000
+#define TT_S1_MAIR_DEV_nGnRE 0b00000100
+#define TT_S1_MAIR_DEV_nGRE 0b00001000
+#define TT_S1_MAIR_DEV_GRE 0b00001100
+
+//
+// Inner and Outer Normal memory attributes use the same bit patterns
+// Outer attributes just need to be shifted up
+//
+#define TT_S1_MAIR_OUTER_SHIFT 4
+
+#define TT_S1_MAIR_WT_TRANS_RA 0b0010
+
+#define TT_S1_MAIR_WB_TRANS_RA 0b0110
+#define TT_S1_MAIR_WB_TRANS_RWA 0b0111
+
+#define TT_S1_MAIR_WT_RA 0b1010
+
+#define TT_S1_MAIR_WB_RA 0b1110
+#define TT_S1_MAIR_WB_RWA 0b1111
+
+#endif // V8_MMU_H
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/v8_system.h b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_system.h
new file mode 100644
index 00000000..a62d2a33
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_system.h
@@ -0,0 +1,115 @@
+//
+// Defines for v8 System Registers
+//
+// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_SYSTEM_H
+#define V8_SYSTEM_H
+
+//
+// AArch64 SPSR
+//
+#define AARCH64_SPSR_EL3h 0b1101
+#define AARCH64_SPSR_EL3t 0b1100
+#define AARCH64_SPSR_EL2h 0b1001
+#define AARCH64_SPSR_EL2t 0b1000
+#define AARCH64_SPSR_EL1h 0b0101
+#define AARCH64_SPSR_EL1t 0b0100
+#define AARCH64_SPSR_EL0t 0b0000
+#define AARCH64_SPSR_RW (1 << 4)
+#define AARCH64_SPSR_F (1 << 6)
+#define AARCH64_SPSR_I (1 << 7)
+#define AARCH64_SPSR_A (1 << 8)
+#define AARCH64_SPSR_D (1 << 9)
+#define AARCH64_SPSR_IL (1 << 20)
+#define AARCH64_SPSR_SS (1 << 21)
+#define AARCH64_SPSR_V (1 << 28)
+#define AARCH64_SPSR_C (1 << 29)
+#define AARCH64_SPSR_Z (1 << 30)
+#define AARCH64_SPSR_N (1 << 31)
+
+//
+// Multiprocessor Affinity Register
+//
+#define MPIDR_EL1_AFF3_LSB 32
+#define MPIDR_EL1_U (1 << 30)
+#define MPIDR_EL1_MT (1 << 24)
+#define MPIDR_EL1_AFF2_LSB 16
+#define MPIDR_EL1_AFF1_LSB 8
+#define MPIDR_EL1_AFF0_LSB 0
+#define MPIDR_EL1_AFF_WIDTH 8
+
+//
+// Data Cache Zero ID Register
+//
+#define DCZID_EL0_BS_LSB 0
+#define DCZID_EL0_BS_WIDTH 4
+#define DCZID_EL0_DZP_LSB 5
+#define DCZID_EL0_DZP (1 << 5)
+
+//
+// System Control Register
+//
+#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_EL1_E0E (1 << 24)
+#define SCTLR_ELx_WXN (1 << 19)
+#define SCTLR_EL1_nTWE (1 << 18)
+#define SCTLR_EL1_nTWI (1 << 16)
+#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_EL1_UMA (1 << 9)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
+#define SCTLR_EL1_THEE (1 << 6)
+#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M (1 << 0)
+
+//
+// Architectural Feature Access Control Register
+//
+#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_FPEN (3 << 20)
+
+//
+// Architectural Feature Trap Register
+//
+#define CPTR_ELx_TCPAC (1 << 31)
+#define CPTR_ELx_TTA (1 << 20)
+#define CPTR_ELx_TFP (1 << 10)
+
+//
+// Secure Configuration Register
+//
+#define SCR_EL3_TWE (1 << 13)
+#define SCR_EL3_TWI (1 << 12)
+#define SCR_EL3_ST (1 << 11)
+#define SCR_EL3_RW (1 << 10)
+#define SCR_EL3_SIF (1 << 9)
+#define SCR_EL3_HCE (1 << 8)
+#define SCR_EL3_SMD (1 << 7)
+#define SCR_EL3_EA (1 << 3)
+#define SCR_EL3_FIQ (1 << 2)
+#define SCR_EL3_IRQ (1 << 1)
+#define SCR_EL3_NS (1 << 0)
+
+//
+// Hypervisor Configuration Register
+//
+#define HCR_EL2_ID (1 << 33)
+#define HCR_EL2_CD (1 << 32)
+#define HCR_EL2_RW (1 << 31)
+#define HCR_EL2_TRVM (1 << 30)
+#define HCR_EL2_HVC (1 << 29)
+#define HCR_EL2_TDZ (1 << 28)
+
+#endif // V8_SYSTEM_H
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/v8_utils.S b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_utils.S
new file mode 100644
index 00000000..888892a0
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/v8_utils.S
@@ -0,0 +1,69 @@
+//
+// Simple utility routines for baremetal v8 code
+//
+// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+//
+// void *ZeroBlock(void *blockPtr, unsigned int nBytes)
+//
+// Zero fill a block of memory
+// Fill memory pages or similar structures with zeros.
+// The byte count must be a multiple of the block fill size (16 bytes)
+//
+// Inputs:
+// blockPtr - base address of block to fill
+// nBytes - block size, in bytes
+//
+// Returns:
+// pointer to just filled block, NULL if nBytes is
+// incompatible with block fill size
+//
+ .global ZeroBlock
+ .type ZeroBlock, "function"
+ .cfi_startproc
+ZeroBlock:
+
+ //
+ // we fill data by steam, 16 bytes at a time: check that
+ // blocksize is a multiple of that
+ //
+ ubfx x2, x1, #0, #4
+ cbnz x2, incompatible
+
+ //
+ // we already have one register full of zeros, get another
+ //
+ mov x3, x2
+
+ //
+ // OK, set temporary pointer and away we go
+ //
+ add x0, x0, x1
+
+loop0:
+ subs x1, x1, #16
+ stp x2, x3, [x0, #-16]!
+ b.ne loop0
+
+ //
+ // that's all - x0 will be back to its start value
+ //
+ ret
+
+ //
+ // parameters are incompatible with block size - return
+ // an indication that this is so
+ //
+incompatible:
+ mov x0,#0
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a53/ac6/example_build/sample_threadx/vectors.S b/ports/cortex_a53/ac6/example_build/sample_threadx/vectors.S
new file mode 100644
index 00000000..7784f98e
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/sample_threadx/vectors.S
@@ -0,0 +1,252 @@
+// ------------------------------------------------------------
+// Armv8-A Vector tables
+//
+// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+ .global c0sync1
+ .global irqHandler
+ .global fiqHandler
+ .global irqFirstLevelHandler
+ .global fiqFirstLevelHandler
+
+ .section EL1VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el1_vectors:
+c0sync1: B c0sync1
+
+ .balign 0x80
+c0irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr1: B c0serr1
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync1: B cxsync1
+
+ .balign 0x80
+cxirq1: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr1: B cxserr1
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync1: B l64sync1
+
+ .balign 0x80
+l64irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr1: B l64serr1
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync1: B l32sync1
+
+ .balign 0x80
+l32irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr1: B l32serr1
+
+//----------------------------------------------------------------
+
+ .section EL2VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el2_vectors:
+c0sync2: B c0sync2
+
+ .balign 0x80
+c0irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr2: B c0serr2
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync2: B cxsync2
+
+ .balign 0x80
+cxirq2: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr2: B cxserr2
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync2: B l64sync2
+
+ .balign 0x80
+l64irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr2: B l64serr2
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync2: B l32sync2
+
+ .balign 0x80
+l32irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr2: B l32serr2
+
+//----------------------------------------------------------------
+
+ .section EL3VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el3_vectors:
+c0sync3: B c0sync3
+
+ .balign 0x80
+c0irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr3: B c0serr3
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync3: B cxsync3
+
+ .balign 0x80
+cxirq3: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr3: B cxserr3
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync3: B l64sync3
+
+ .balign 0x80
+l64irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr3: B l64serr3
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync3: B l32sync3
+
+ .balign 0x80
+l32irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr3: B l32serr3
+
+
+ .section InterruptHandlers, "ax"
+ .balign 4
+
+ .type irqFirstLevelHandler, "function"
+irqFirstLevelHandler:
+ MSR SPSel, 0
+ STP x29, x30, [sp, #-16]!
+ BL _tx_thread_context_save
+ BL irqHandler
+ B _tx_thread_context_restore
+
+ .type fiqFirstLevelHandler, "function"
+fiqFirstLevelHandler:
+ STP x29, x30, [sp, #-16]!
+ STP x18, x19, [sp, #-16]!
+ STP x16, x17, [sp, #-16]!
+ STP x14, x15, [sp, #-16]!
+ STP x12, x13, [sp, #-16]!
+ STP x10, x11, [sp, #-16]!
+ STP x8, x9, [sp, #-16]!
+ STP x6, x7, [sp, #-16]!
+ STP x4, x5, [sp, #-16]!
+ STP x2, x3, [sp, #-16]!
+ STP x0, x1, [sp, #-16]!
+
+ BL fiqHandler
+
+ LDP x0, x1, [sp], #16
+ LDP x2, x3, [sp], #16
+ LDP x4, x5, [sp], #16
+ LDP x6, x7, [sp], #16
+ LDP x8, x9, [sp], #16
+ LDP x10, x11, [sp], #16
+ LDP x12, x13, [sp], #16
+ LDP x14, x15, [sp], #16
+ LDP x16, x17, [sp], #16
+ LDP x18, x19, [sp], #16
+ LDP x29, x30, [sp], #16
+ ERET
diff --git a/ports/cortex_a53/ac6/example_build/tx/.cproject b/ports/cortex_a53/ac6/example_build/tx/.cproject
new file mode 100644
index 00000000..35fed0f2
--- /dev/null
+++ b/ports/cortex_a53/ac6/example_build/tx/.cproject
@@ -0,0 +1,148 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/arc_em/metaware/test_validation/tx/.project b/ports/cortex_a53/ac6/example_build/tx/.project
similarity index 100%
rename from ports/arc_em/metaware/test_validation/tx/.project
rename to ports/cortex_a53/ac6/example_build/tx/.project
diff --git a/ports/cortex_a53/ac6/inc/tx_port.h b/ports/cortex_a53/ac6/inc/tx_port.h
new file mode 100644
index 00000000..33bccbf1
--- /dev/null
+++ b/ports/cortex_a53/ac6/inc/tx_port.h
@@ -0,0 +1,379 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef int LONG;
+typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
+typedef short SHORT;
+typedef unsigned short USHORT;
+#define ULONG64_DEFINED
+
+/* Override the alignment type to use 64-bit alignment and storage for pointers. */
+
+#define ALIGN_TYPE_DEFINED
+typedef unsigned long long ALIGN_TYPE;
+
+
+/* Override the free block marker for byte pools to be a 64-bit constant. */
+
+#define TX_BYTE_BLOCK_FREE ((ALIGN_TYPE) 0xFFFFEEEEFFFFEEEE)
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_MISRA_ENABLE
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
+#define TX_INLINE_INITIALIZATION
+#endif
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifndef TX_MISRA_ENABLE
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#ifndef TX_DISABLE_INLINE
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) b = (UINT) __builtin_ctz((unsigned int) m);
+
+#endif
+
+
+/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
+ can figure out what thread timeout to process. */
+
+#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define the thread timeout setup logic in _tx_thread_create. */
+
+#define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
+ (t) -> tx_thread_timer.tx_timer_internal_timeout_param = 0; \
+ (t) -> tx_thread_timer.tx_timer_internal_thread_timeout_ptr = (VOID *) (t);
+
+
+/* Define the thread timeout pointer setup in _tx_thread_timeout. */
+
+#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifndef TX_DISABLE_INLINE
+
+/* Define macros, with in-line assembly for performance. */
+
+__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
+{
+
+unsigned long long daif_value;
+
+ __asm__ volatile (" MRS %0, DAIF ": "=r" (daif_value) );
+ __asm__ volatile (" MSR DAIFSet, 0x3" : : : "memory" );
+ return((unsigned int) daif_value);
+}
+
+__attribute__( ( always_inline ) ) static inline void __restore_interrupts(unsigned int daif_value)
+{
+
+unsigned long long temp;
+
+ temp = (unsigned long long) daif_value;
+ __asm__ volatile (" MSR DAIF,%0": : "r" (temp): "memory" );
+}
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+#define TX_DISABLE interrupt_save = __disable_interrupts();
+#define TX_RESTORE __restore_interrupts(interrupt_save);
+
+#else
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+#endif
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
diff --git a/ports/cortex_a53/ac6/src/tx_initialize_low_level.S b/ports/cortex_a53/ac6/src/tx_initialize_low_level.S
new file mode 100644
index 00000000..d0b541f1
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_initialize_low_level.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_initialize_low_level(VOID)
+// {
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level, @function
+_tx_initialize_low_level:
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+
+ /* Save the system stack pointer. */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
+
+ LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
+ MOV x1, sp // Pickup SP
+ SUB x1, x1, #15 //
+ BIC x1, x1, #0xF // Get 16-bit alignment
+ STR x1, [x0] // Store system stack
+
+ /* Save the first available memory address. */
+ // _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit;
+
+ LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
+ LDR x1, =zi_limit // Pickup unused memory address
+ LDR x1, [x1] //
+ STR x1, [x0] // Store unused memory address
+
+ /* Done, return to caller. */
+
+ RET // Return to caller
+// }
+
+
+zi_limit:
+ .quad (Image$$TOP_OF_RAM$$Base)
+
diff --git a/ports/cortex_a53/ac6/src/tx_thread_context_restore.S b/ports/cortex_a53/ac6/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..994c404d
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_context_restore.S
@@ -0,0 +1,287 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_restore(VOID)
+// {
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore, @function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ // if (--_tx_thread_system_state)
+ // {
+
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ SUB w2, w2, #1 // Decrement the counter
+ STR w2, [x3, #0] // Store the counter
+ CMP w2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup actual current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR x3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR w2, [x3, #0] // Pickup actual preempt disable flag
+ CMP w2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR x2, [x3, #0] // Pickup actual execute thread pointer
+ CMP x0, x2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+
+ /* Pickup the saved stack pointer. */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+ // else
+ // {
+__tx_thread_preempt_restore:
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ STP x20, x21, [sp, #-16]! // Save x20, x21
+ STP x22, x23, [sp, #-16]! // Save x22, x23
+ STP x24, x25, [sp, #-16]! // Save x24, x25
+ STP x26, x27, [sp, #-16]! // Save x26, x27
+ STP x28, x29, [sp, #-16]! // Save x28, x29
+#ifdef ENABLE_ARM_FP
+ LDR w3, [x0, #248] // Pickup FP enable flag
+ CMP w3, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q0, q1, [sp, #-32]! // Save q0, q1
+ STP q2, q3, [sp, #-32]! // Save q2, q3
+ STP q4, q5, [sp, #-32]! // Save q4, q5
+ STP q6, q7, [sp, #-32]! // Save q6, q7
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ STP q16, q17, [sp, #-32]! // Save q16, q17
+ STP q18, q19, [sp, #-32]! // Save q18, q19
+ STP q20, q21, [sp, #-32]! // Save q20, q21
+ STP q22, q23, [sp, #-32]! // Save q22, q23
+ STP q24, q25, [sp, #-32]! // Save q24, q25
+ STP q26, q27, [sp, #-32]! // Save q26, q27
+ STP q28, q29, [sp, #-32]! // Save q28, q29
+ STP q30, q31, [sp, #-32]! // Save q30, q31
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+ STP x4, x5, [sp, #-16]! // Save x4 (SPSR_EL3), x5 (ELR_E3)
+
+ MOV x3, sp // Move sp into x3
+ STR x3, [x0, #8] // Save stack pointer in thread control
+ // block
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+
+ /* Save the remaining time-slice and disable it. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w2, [x0, #36] // Save thread's time-slice
+ MOV w2, #0 // Clear value
+ STR w2, [x3, #0] // Disable global time-slice flag
+
+ // }
+__tx_thread_dont_save_ts:
+
+
+ /* Clear the current task pointer. */
+ /* _tx_thread_current_ptr = TX_NULL; */
+
+ MOV x0, #0 // NULL value
+ STR x0, [x1, #0] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ // _tx_thread_schedule();
+
+ // }
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ LDR x1, =_tx_thread_schedule // Build address for _tx_thread_schedule
+#ifdef EL1
+ MSR ELR_EL1, x1 // Setup point of interrupt
+// MOV x1, #0x4 // Setup EL1 return
+// MSR spsr_el1, x1 // Move into SPSR
+#else
+#ifdef EL2
+ MSR ELR_EL2, x1 // Setup point of interrupt
+// MOV x1, #0x8 // Setup EL2 return
+// MSR spsr_el2, x1 // Move into SPSR
+#else
+ MSR ELR_EL3, x1 // Setup point of interrupt
+// MOV x1, #0xC // Setup EL3 return
+// MSR spsr_el3, x1 // Move into SPSR
+#endif
+#endif
+ ERET // Return to scheduler
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_context_save.S b/ports/cortex_a53/ac6/src/tx_thread_context_save.S
new file mode 100644
index 00000000..859a1e44
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_context_save.S
@@ -0,0 +1,216 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_save(VOID)
+// {
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save, @function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ and all other registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+ // if (_tx_thread_system_state++)
+ // {
+
+ STP x0, x1, [sp, #-16]! // Save x0, x1
+ STP x2, x3, [sp, #-16]! // Save x2, x3
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ CMP w2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD w2, w2, #1 // Increment the nested interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x0, SPSR_EL1 // Pickup SPSR
+ MRS x1, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x0, SPSR_EL2 // Pickup SPSR
+ MRS x1, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x0, SPSR_EL3 // Pickup SPSR
+ MRS x1, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x0, x1, [sp, #-16]! // Save SPSR, ELR
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ /* Return to the ISR. */
+
+ RET // Return to ISR
+
+__tx_thread_not_nested_save:
+ // }
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ // else if (_tx_thread_current_ptr)
+ // {
+
+ ADD w2, w2, #1 // Increment the interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x4, SPSR_EL1 // Pickup SPSR
+ MRS x5, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x4, SPSR_EL2 // Pickup SPSR
+ MRS x5, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x4, SPSR_EL3 // Pickup SPSR
+ MRS x5, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x4, x5, [sp, #-16]! // Save SPSR, ELR
+
+ /* Save the current stack pointer in the thread's control block. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+
+ MOV x4, sp //
+ STR x4, [x0, #8] // Save thread stack pointer
+
+ /* Switch to the system stack. */
+ // sp = _tx_thread_system_stack_ptr;
+
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ RET // Return to caller
+
+ // }
+ // else
+ // {
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ ADD sp, sp, #48 // Recover saved registers
+ RET // Continue IRQ processing
+
+ // }
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_fp_disable.c b/ports/cortex_a53/ac6/src/tx_thread_fp_disable.c
new file mode 100644
index 00000000..3e5d7e21
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_fp_disable.c
@@ -0,0 +1,97 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_disable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_FALSE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a53/ac6/src/tx_thread_fp_enable.c b/ports/cortex_a53/ac6/src/tx_thread_fp_enable.c
new file mode 100644
index 00000000..4e69205c
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_fp_enable.c
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_enable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now setup the FP enable flag in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_TRUE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a53/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a53/ac6/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..6a5a7741
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_interrupt_control.S
@@ -0,0 +1,81 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control, @function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x1, DAIF // Pickup current interrupt posture
+
+ /* Apply the new interrupt posture. */
+
+ MSR DAIF, x0 // Set new interrupt posture
+ MOV x0, x1 // Setup return value
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a53/ac6/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..d0062ef8
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,79 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_disable(void)
+// {
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable, @function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x0, DAIF // Pickup current interrupt lockout posture
+
+ /* Mask interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a53/ac6/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..1b6261ba
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,77 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore, @function
+_tx_thread_interrupt_restore:
+
+ /* Restore the old interrupt posture. */
+
+ MSR DAIF, x0 // Setup the old posture
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_schedule.S b/ports/cortex_a53/ac6/src/tx_thread_schedule.S
new file mode 100644
index 00000000..9a7a7262
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_schedule.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_schedule(VOID)
+// {
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule, @function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+ MSR DAIFClr, 0x3 // Enable interrupts
+
+ /* Wait for a thread to execute. */
+ // do
+ // {
+
+ LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+#ifdef TX_ENABLE_WFI
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BNE _tx_thread_schedule_thread //
+ WFI //
+ B __tx_thread_schedule_loop // Keep looking for a thread
+_tx_thread_schedule_thread:
+#else
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+#endif
+
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+ /* Setup the current thread pointer. */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR x0, [x1, #0] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
+
+ LDR w2, [x0, #4] // Pickup run counter
+ LDR w3, [x0, #36] // Pickup time-slice for this thread
+ ADD w2, w2, #1 // Increment thread run-counter
+ STR w2, [x0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ // variable
+ LDR x4, [x0, #8] // Switch stack pointers
+ MOV sp, x4 //
+ STR w3, [x2, #0] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV x19, x0 // Save x0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV x0, x19 // Restore x0
+#endif
+
+ /* Switch to the thread's stack. */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL)
+ BEQ _tx_solicited_return
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_interrupt_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q30, q31, [sp], #32 // Recover q30, q31
+ LDP q28, q29, [sp], #32 // Recover q28, q29
+ LDP q26, q27, [sp], #32 // Recover q26, q27
+ LDP q24, q25, [sp], #32 // Recover q24, q25
+ LDP q22, q23, [sp], #32 // Recover q22, q23
+ LDP q20, q21, [sp], #32 // Recover q20, q21
+ LDP q18, q19, [sp], #32 // Recover q18, q19
+ LDP q16, q17, [sp], #32 // Recover q16, q17
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+ LDP q6, q7, [sp], #32 // Recover q6, q7
+ LDP q4, q5, [sp], #32 // Recover q4, q5
+ LDP q2, q3, [sp], #32 // Recover q2, q3
+ LDP q0, q1, [sp], #32 // Recover q0, q1
+_skip_interrupt_fp_restore:
+#endif
+ LDP x28, x29, [sp], #16 // Recover x28
+ LDP x26, x27, [sp], #16 // Recover x26, x27
+ LDP x24, x25, [sp], #16 // Recover x24, x25
+ LDP x22, x23, [sp], #16 // Recover x22, x23
+ LDP x20, x21, [sp], #16 // Recover x20, x21
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+_tx_solicited_return:
+
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_solicited_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+_skip_solicited_fp_restore:
+#endif
+ LDP x27, x28, [sp], #16 // Recover x27, x28
+ LDP x25, x26, [sp], #16 // Recover x25, x26
+ LDP x23, x24, [sp], #16 // Recover x23, x24
+ LDP x21, x22, [sp], #16 // Recover x21, x22
+ LDP x19, x20, [sp], #16 // Recover x19, x20
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ MSR DAIF, x4 // Recover DAIF
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_stack_build.S b/ports/cortex_a53/ac6/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..5b7e945a
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_stack_build.S
@@ -0,0 +1,158 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build, @function
+_tx_thread_stack_build:
+
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
+ Stack Top: SSPR Initial SSPR
+ ELR Point of interrupt
+ x28 Initial value for x28
+ not used Not used
+ x26 Initial value for x26
+ x27 Initial value for x27
+ x24 Initial value for x24
+ x25 Initial value for x25
+ x22 Initial value for x22
+ x23 Initial value for x23
+ x20 Initial value for x20
+ x21 Initial value for x21
+ x18 Initial value for x18
+ x19 Initial value for x19
+ x16 Initial value for x16
+ x17 Initial value for x17
+ x14 Initial value for x14
+ x15 Initial value for x15
+ x12 Initial value for x12
+ x13 Initial value for x13
+ x10 Initial value for x10
+ x11 Initial value for x11
+ x8 Initial value for x8
+ x9 Initial value for x9
+ x6 Initial value for x6
+ x7 Initial value for x7
+ x4 Initial value for x4
+ x5 Initial value for x5
+ x2 Initial value for x2
+ x3 Initial value for x3
+ x0 Initial value for x0
+ x1 Initial value for x1
+ x29 Initial value for x29 (frame pointer)
+ x30 Initial value for x30 (link register)
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR x4, [x0, #24] // Pickup end of stack area
+ BIC x4, x4, #0xF // Ensure 16-byte alignment
+
+ /* Actually build the stack frame. */
+
+ MOV x2, #0 // Build clear value
+ MOV x3, #0 //
+
+ STP x2, x3, [x4, #-16]! // Set backtrace to 0
+ STP x2, x3, [x4, #-16]! // Set initial x29, x30
+ STP x2, x3, [x4, #-16]! // Set initial x0, x1
+ STP x2, x3, [x4, #-16]! // Set initial x2, x3
+ STP x2, x3, [x4, #-16]! // Set initial x4, x5
+ STP x2, x3, [x4, #-16]! // Set initial x6, x7
+ STP x2, x3, [x4, #-16]! // Set initial x8, x9
+ STP x2, x3, [x4, #-16]! // Set initial x10, x11
+ STP x2, x3, [x4, #-16]! // Set initial x12, x13
+ STP x2, x3, [x4, #-16]! // Set initial x14, x15
+ STP x2, x3, [x4, #-16]! // Set initial x16, x17
+ STP x2, x3, [x4, #-16]! // Set initial x18, x19
+ STP x2, x3, [x4, #-16]! // Set initial x20, x21
+ STP x2, x3, [x4, #-16]! // Set initial x22, x23
+ STP x2, x3, [x4, #-16]! // Set initial x24, x25
+ STP x2, x3, [x4, #-16]! // Set initial x26, x27
+ STP x2, x3, [x4, #-16]! // Set initial x28
+#ifdef EL1
+ MOV x2, #0x4 // Build initial SPSR (EL1)
+#else
+#ifdef EL2
+ MOV x2, #0x8 // Build initial SPSR (EL2)
+#else
+ MOV x2, #0xC // Build initial SPSR (EL3)
+#endif
+#endif
+ MOV x3, x1 // Build initial ELR
+ STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
+
+ /* Setup stack pointer. */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
+
+ STR x4, [x0, #8] // Save stack pointer in thread's
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_thread_system_return.S b/ports/cortex_a53/ac6/src/tx_thread_system_return.S
new file mode 100644
index 00000000..7d42b63d
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_thread_system_return.S
@@ -0,0 +1,151 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_system_return(VOID)
+// {
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return, @function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ MRS x0, DAIF // Pickup DAIF
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+ STP x19, x20, [sp, #-16]! // Save x19, x20
+ STP x21, x22, [sp, #-16]! // Save x21, x22
+ STP x23, x24, [sp, #-16]! // Save x23, x24
+ STP x25, x26, [sp, #-16]! // Save x25, x26
+ STP x27, x28, [sp, #-16]! // Save x27, x28
+ LDR x5, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR x6, [x5, #0] // Pickup current thread pointer
+
+#ifdef ENABLE_ARM_FP
+ LDR w7, [x6, #248] // Pickup FP enable flag
+ CMP w7, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+
+ MOV x1, #0 // Clear x1
+ STP x0, x1, [sp, #-16]! // Save DAIF and clear value for ELR_EK1
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ MOV x19, x5 // Save x5
+ MOV x20, x6 // Save x6
+ BL _tx_execution_thread_exit // Call the thread exit function
+ MOV x5, x19 // Restore x5
+ MOV x6, x20 // Restore x6
+#endif
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR w1, [x2, #0] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
+
+ MOV x4, sp //
+ STR x4, [x6, #8] // Save thread stack pointer
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+ /* Determine if the time-slice is active. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ MOV x4, #0 // Build clear value
+ CMP w1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save the current remaining time-slice. */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w4, [x2, #0] // Clear time-slice
+ STR w1, [x6, #36] // Store current time-slice
+
+ // }
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+ // _tx_thread_current_ptr = TX_NULL;
+
+ STR x4, [x5, #0] // Clear current thread pointer
+
+ B _tx_thread_schedule // Jump to scheduler!
+
+// }
diff --git a/ports/cortex_a53/ac6/src/tx_timer_interrupt.S b/ports/cortex_a53/ac6/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..5810b5c2
--- /dev/null
+++ b/ports/cortex_a53/ac6/src/tx_timer_interrupt.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_timer_interrupt(VOID)
+// {
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt, @function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+ // _tx_timer_system_clock++;
+
+ LDR x1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR w0, [x1, #0] // Pickup system clock
+ ADD w0, w0, #1 // Increment system clock
+ STR w0, [x1, #0] // Store new system clock
+
+ /* Test for time-slice expiration. */
+ /* if (_tx_timer_time_slice)
+ { */
+
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+ /* _tx_timer_time_slice--; */
+
+ SUB w2, w2, #1 // Decrement the time-slice
+ STR w2, [x3, #0] // Store new time-slice value
+
+ /* Check for expiration. */
+ /* if (__tx_timer_time_slice == 0) */
+
+ CMP w2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+ /* _tx_timer_expired_time_slice = TX_TRUE; */
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV w0, #1 // Build expired value
+ STR w0, [x3, #0] // Set time-slice expiration flag
+
+ /* } */
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+ // if (*_tx_timer_current_ptr)
+ // {
+
+ LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
+ LDR x0, [x1, #0] // Pickup current timer
+ LDR x2, [x0, #0] // Pickup timer list entry
+ CMP x2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+ // _tx_timer_expired = TX_TRUE;
+
+ LDR x3, =_tx_timer_expired // Pickup expiration flag address
+ MOV w2, #1 // Build expired value
+ STR w2, [x3, #0] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+ // }
+ // else
+ // {
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ // _tx_timer_current_ptr++;
+
+ ADD x0, x0, #8 // Move to next timer
+
+ /* Check for wrap-around. */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
+
+ LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
+ LDR x2, [x3, #0] // Pickup list end
+ CMP x0, x2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wrap-around logic
+
+ /* Wrap to beginning of list. */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
+
+ LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
+ LDR x0, [x3, #0] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR x0, [x1, #0] // Store new current timer pointer
+ // }
+
+__tx_timer_done:
+
+
+ /* See if anything has expired. */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
+ LDR w2, [x3, #0] // Pickup time-slice expired flag
+ CMP w2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR x1, =_tx_timer_expired // Pickup addr of other expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+
+ /* Did a timer expire? */
+ // if (_tx_timer_expired)
+ // {
+
+ LDR x1, =_tx_timer_expired // Pickup addr of expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ // _tx_timer_expiration_process();
+
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+ // }
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+ // if (_tx_timer_expired_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR w2, [x3, #0] // Pickup the actual flag
+ CMP w2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+ // _tx_thread_time_slice();
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+ // }/
+
+__tx_timer_not_ts_expiration:
+
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ // }
+
+__tx_timer_nothing_expired:
+
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/.cproject b/ports/cortex_a53/gnu/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..1c32cb32
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/.cproject
@@ -0,0 +1,170 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/.project b/ports/cortex_a53/gnu/example_build/sample_threadx/.project
new file mode 100644
index 00000000..a1b15572
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/.project
@@ -0,0 +1,26 @@
+
+
+ sample_threadx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3.h b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3.h
new file mode 100644
index 00000000..dfe37586
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3.h
@@ -0,0 +1,561 @@
+/*
+ * GICv3.h - data types and function prototypes for GICv3 utility routines
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_h
+#define GICV3_h
+
+#include
+
+/*
+ * extra flags for GICD enable
+ */
+typedef enum
+{
+ gicdctlr_EnableGrp0 = (1 << 0),
+ gicdctlr_EnableGrp1NS = (1 << 1),
+ gicdctlr_EnableGrp1A = (1 << 1),
+ gicdctlr_EnableGrp1S = (1 << 2),
+ gicdctlr_EnableAll = (1 << 2) | (1 << 1) | (1 << 0),
+ gicdctlr_ARE_S = (1 << 4), /* Enable Secure state affinity routing */
+ gicdctlr_ARE_NS = (1 << 5), /* Enable Non-Secure state affinity routing */
+ gicdctlr_DS = (1 << 6), /* Disable Security support */
+ gicdctlr_E1NWF = (1 << 7) /* Enable "1-of-N" wakeup model */
+} GICDCTLRFlags_t;
+
+/*
+ * modes for SPI routing
+ */
+typedef enum
+{
+ gicdirouter_ModeSpecific = 0,
+ gicdirouter_ModeAny = (1 << 31)
+} GICDIROUTERBits_t;
+
+typedef enum
+{
+ gicdicfgr_Level = 0,
+ gicdicfgr_Edge = (1 << 1)
+} GICDICFGRBits_t;
+
+typedef enum
+{
+ gicigroupr_G0S = 0,
+ gicigroupr_G1NS = (1 << 0),
+ gicigroupr_G1S = (1 << 2)
+} GICIGROUPRBits_t;
+
+typedef enum
+{
+ gicrwaker_ProcessorSleep = (1 << 1),
+ gicrwaker_ChildrenAsleep = (1 << 2)
+} GICRWAKERBits_t;
+
+/**********************************************************************/
+
+/*
+ * Utility macros & functions
+ */
+#define RANGE_LIMIT(x) ((sizeof(x) / sizeof((x)[0])) - 1)
+
+static inline uint64_t gicv3PackAffinity(uint32_t aff3, uint32_t aff2,
+ uint32_t aff1, uint32_t aff0)
+{
+ /*
+ * only need to cast aff3 to get type promotion for all affinities
+ */
+ return ((((uint64_t)aff3 & 0xff) << 32) |
+ ((aff2 & 0xff) << 16) |
+ ((aff1 & 0xff) << 8) | aff0);
+}
+
+/**********************************************************************/
+
+/*
+ * GIC Distributor Function Prototypes
+ */
+
+/*
+ * ConfigGICD - configure GIC Distributor prior to enabling it
+ *
+ * Inputs:
+ *
+ * control - control flags
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void ConfigGICD(GICDCTLRFlags_t flags);
+
+/*
+ * EnableGICD - top-level enable for GIC Distributor
+ *
+ * Inputs:
+ *
+ * flags - new control flags to set
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void EnableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * DisableGICD - top-level disable for GIC Distributor
+ *
+ * Inputs
+ *
+ * flags - control flags to clear
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void DisableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * SyncAREinGICD - synchronise GICD Address Routing Enable bits
+ *
+ * Inputs
+ *
+ * flags - absolute flag bits to set in GIC Distributor
+ *
+ * dosync - flag whether to wait for ARE bits to match passed
+ * flag field (dosync = true), or whether to set absolute
+ * flag bits (dosync = false)
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * This function is used to resolve a race in an MP system whereby secondary
+ * CPUs cannot reliably program all Redistributor registers until the
+ * primary CPU has enabled Address Routing. The primary CPU will call this
+ * function with dosync = false, while the secondaries will call it with
+ * dosync = true.
+ */
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync);
+
+/*
+ * EnableSPI - enable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnableSPI(uint32_t id);
+
+/*
+ * DisableSPI - disable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisableSPI(uint32_t id);
+
+/*
+ * SetSPIPriority - configure the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetSPIPriority(uint32_t id, uint32_t priority);
+
+/*
+ * GetSPIPriority - determine the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * interrupt priority in the range 0 - 0xff
+ */
+uint32_t GetSPIPriority(uint32_t id);
+
+/*
+ * SetSPIRoute - specify interrupt routing when gicdctlr_ARE is enabled
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * affinity - prepacked "dotted quad" affinity routing. NOTE: use the
+ * gicv3PackAffinity() helper routine to generate this input
+ *
+ * mode - select routing mode (specific affinity, or any recipient)
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode);
+
+/*
+ * GetSPIRoute - read ARE-enabled interrupt routing information
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * routing configuration
+ */
+uint64_t GetSPIRoute(uint32_t id);
+
+/*
+ * SetSPITarget - configure the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * target - 8-bit target bitmap
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPITarget(uint32_t id, uint32_t target);
+
+/*
+ * GetSPITarget - read the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * 8-bit target bitmap
+ */
+uint32_t GetSPITarget(uint32_t id);
+
+/*
+ * ConfigureSPI - setup an interrupt as edge- or level-triggered
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * config - desired configuration
+ *
+ * Returns
+ *
+ *
+ */
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config);
+
+/*
+ * SetSPIPending - mark an interrupt as pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPIPending(uint32_t id);
+
+/*
+ * ClearSPIPending - mark an interrupt as not pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearSPIPending(uint32_t id);
+
+/*
+ * GetSPIPending - query whether an interrupt is pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetSPIPending(uint32_t id);
+
+/*
+ * SetSPISecurity - mark a shared peripheral interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityBlock - mark a block of 32 shared peripheral
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * block - which block to mark (e.g. 1 = Ints 32-63)
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityAll - mark all shared peripheral interrupts
+ * as security
+ *
+ * Inputs:
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityAll(GICIGROUPRBits_t group);
+
+/**********************************************************************/
+
+/*
+ * GIC Re-Distributor Function Prototypes
+ *
+ * The model for calling Redistributor functions is that, rather than
+ * identifying the target redistributor with every function call, the
+ * SelectRedistributor() function is used to identify which redistributor
+ * is to be used for all functions until a different redistributor is
+ * explicitly selected
+ */
+
+/*
+ * WakeupGICR - wake up a Redistributor
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to wakeup
+ *
+ * Returns:
+ *
+ *
+ */
+void WakeupGICR(uint32_t gicr);
+
+/*
+ * EnablePrivateInt - enable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * DisablePrivateInt - disable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority);
+
+/*
+ * GetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * Int priority
+ */
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPending - mark a private (SGI/PPI) interrupt as pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * ClearPrivateIntPending - mark a private (SGI/PPI) interrupt as not pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * GetPrivateIntPending - query whether a private (SGI/PPI) interrupt is pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntSecurity - mark a private (SGI/PPI) interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetPrivateIntSecurityBlock - mark all 32 private (SGI/PPI)
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * group - the group for the interrupt
+ *
+ * Returns:
+ *
+ *
+ */
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group);
+
+#endif /* ndef GICV3_h */
+
+/* EOF GICv3.h */
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_aliases.h b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_aliases.h
new file mode 100644
index 00000000..826ba973
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_aliases.h
@@ -0,0 +1,113 @@
+//
+// Aliases for GICv3 registers
+//
+// Copyright (c) 2016-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef GICV3_ALIASES_H
+#define GICV3_ALIASES_H
+
+#ifndef __clang__
+
+/*
+ * Mapping of MSR and MRS to physical and virtual CPU interface registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-5
+ */
+#define ICC_AP0R0_EL1 S3_0_C12_C8_4
+#define ICC_AP0R1_EL1 S3_0_C12_C8_5
+#define ICC_AP0R2_EL1 S3_0_C12_C8_6
+#define ICC_AP0R3_EL1 S3_0_C12_C8_7
+
+#define ICC_AP1R0_EL1 S3_0_C12_C9_0
+#define ICC_AP1R1_EL1 S3_0_C12_C9_1
+#define ICC_AP1R2_EL1 S3_0_C12_C9_2
+#define ICC_AP1R3_EL1 S3_0_C12_C9_3
+
+#define ICC_ASGI1R_EL1 S3_0_C12_C11_6
+
+#define ICC_BPR0_EL1 S3_0_C12_C8_3
+#define ICC_BPR1_EL1 S3_0_C12_C12_3
+
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+
+#define ICC_DIR_EL1 S3_0_C12_C11_1
+
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+#define ICC_SGI1R_EL1 S3_0_C12_C11_5
+
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+
+/*
+ * Mapping of MSR and MRS to virtual interface control registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-6
+ */
+#define ICH_AP0R0_EL2 S3_4_C12_C8_0
+#define ICH_AP0R1_EL2 S3_4_C12_C8_1
+#define ICH_AP0R2_EL2 S3_4_C12_C8_2
+#define ICH_AP0R3_EL2 S3_4_C12_C8_3
+
+#define ICH_AP1R0_EL2 S3_4_C12_C9_0
+#define ICH_AP1R1_EL2 S3_4_C12_C9_1
+#define ICH_AP1R2_EL2 S3_4_C12_C9_2
+#define ICH_AP1R3_EL2 S3_4_C12_C9_3
+
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+
+#define ICH_VTR_EL2 S3_4_C12_C11_1
+
+#define ICH_MISR_EL2 S3_4_C12_C11_2
+
+#define ICH_EISR_EL2 S3_4_C12_C11_3
+
+#define ICH_ELRSR_EL2 S3_4_C12_C11_5
+
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+
+#define ICH_LR0_EL2 S3_4_C12_C12_0
+#define ICH_LR1_EL2 S3_4_C12_C12_1
+#define ICH_LR2_EL2 S3_4_C12_C12_2
+#define ICH_LR3_EL2 S3_4_C12_C12_3
+#define ICH_LR4_EL2 S3_4_C12_C12_4
+#define ICH_LR5_EL2 S3_4_C12_C12_5
+#define ICH_LR6_EL2 S3_4_C12_C12_6
+#define ICH_LR7_EL2 S3_4_C12_C12_7
+#define ICH_LR8_EL2 S3_4_C12_C13_0
+#define ICH_LR9_EL2 S3_4_C12_C13_1
+#define ICH_LR10_EL2 S3_4_C12_C13_2
+#define ICH_LR11_EL2 S3_4_C12_C13_3
+#define ICH_LR12_EL2 S3_4_C12_C13_4
+#define ICH_LR13_EL2 S3_4_C12_C13_5
+#define ICH_LR14_EL2 S3_4_C12_C13_6
+#define ICH_LR15_EL2 S3_4_C12_C13_7
+
+#endif /* not __clang__ */
+
+#endif /* GICV3_ALIASES */
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicc.h
new file mode 100644
index 00000000..998d92b5
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicc.h
@@ -0,0 +1,254 @@
+/*
+ * GICv3_gicc.h - prototypes and inline functions for GICC system register operations
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_gicc_h
+#define GICV3_gicc_h
+
+#include "GICv3_aliases.h"
+
+#define stringify_no_expansion(x) #x
+#define stringify(x) stringify_no_expansion(x)
+
+/**********************************************************************/
+
+typedef enum
+{
+ sreSRE = (1 << 0),
+ sreDFB = (1 << 1),
+ sreDIB = (1 << 2),
+ sreEnable = (1 << 3)
+} ICC_SREBits_t;
+
+static inline void setICC_SRE_EL1(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL2(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL2)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL2(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL2)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL3(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ igrpEnable = (1 << 0),
+ igrpEnableGrp1NS = (1 << 0),
+ igrpEnableGrp1S = (1 << 2)
+} ICC_IGRPBits_t;
+
+static inline void setICC_IGRPEN0_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN0_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL3(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ ctlrCBPR = (1 << 0),
+ ctlrCBPR_EL1S = (1 << 0),
+ ctlrEOImode = (1 << 1),
+ ctlrCBPR_EL1NS = (1 << 1),
+ ctlrEOImode_EL3 = (1 << 2),
+ ctlrEOImode_EL1S = (1 << 3),
+ ctlrEOImode_EL1NS = (1 << 4),
+ ctlrRM = (1 << 5),
+ ctlrPMHE = (1 << 6)
+} ICC_CTLRBits_t;
+
+static inline void setICC_CTLR_EL1(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_CTLR_EL3(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+static inline uint64_t getICC_IAR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_IAR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_EOIR0(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR0_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_EOIR1(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR1_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_DIR(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_DIR_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_PMR(uint32_t priority)
+{
+ asm("msr "stringify(ICC_PMR_EL1)", %0\n; isb" :: "r" ((uint64_t)priority));
+}
+
+static inline void setICC_BPR0(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR0_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline void setICC_BPR1(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR1_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline uint64_t getICC_BPR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_BPR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_RPR(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_RPR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ sgirIRMTarget = 0,
+ sgirIRMAll = (1ull << 40)
+} ICC_SGIRBits_t;
+
+static inline void setICC_SGI0R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI0R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_ASGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+#endif /* ndef GICV3_gicc_h */
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicd.c
new file mode 100644
index 00000000..464ecced
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicd.c
@@ -0,0 +1,339 @@
+/*
+ * GICv3_gicd.c - generic driver code for GICv3 distributor
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include
+
+#include "GICv3.h"
+
+typedef struct
+{
+ volatile uint32_t GICD_CTLR; // +0x0000
+ const volatile uint32_t GICD_TYPER; // +0x0004
+ const volatile uint32_t GICD_IIDR; // +0x0008
+
+ const volatile uint32_t padding0; // +0x000c
+
+ volatile uint32_t GICD_STATUSR; // +0x0010
+
+ const volatile uint32_t padding1[3]; // +0x0014
+
+ volatile uint32_t IMP_DEF[8]; // +0x0020
+
+ volatile uint32_t GICD_SETSPI_NSR; // +0x0040
+ const volatile uint32_t padding2; // +0x0044
+ volatile uint32_t GICD_CLRSPI_NSR; // +0x0048
+ const volatile uint32_t padding3; // +0x004c
+ volatile uint32_t GICD_SETSPI_SR; // +0x0050
+ const volatile uint32_t padding4; // +0x0054
+ volatile uint32_t GICD_CLRSPI_SR; // +0x0058
+
+ const volatile uint32_t padding5[3]; // +0x005c
+
+ volatile uint32_t GICD_SEIR; // +0x0068
+
+ const volatile uint32_t padding6[5]; // +0x006c
+
+ volatile uint32_t GICD_IGROUPR[32]; // +0x0080
+
+ volatile uint32_t GICD_ISENABLER[32]; // +0x0100
+ volatile uint32_t GICD_ICENABLER[32]; // +0x0180
+ volatile uint32_t GICD_ISPENDR[32]; // +0x0200
+ volatile uint32_t GICD_ICPENDR[32]; // +0x0280
+ volatile uint32_t GICD_ISACTIVER[32]; // +0x0300
+ volatile uint32_t GICD_ICACTIVER[32]; // +0x0380
+
+ volatile uint8_t GICD_IPRIORITYR[1024]; // +0x0400
+ volatile uint8_t GICD_ITARGETSR[1024]; // +0x0800
+ volatile uint32_t GICD_ICFGR[64]; // +0x0c00
+ volatile uint32_t GICD_IGRPMODR[32]; // +0x0d00
+ const volatile uint32_t padding7[32]; // +0x0d80
+ volatile uint32_t GICD_NSACR[64]; // +0x0e00
+
+ volatile uint32_t GICD_SGIR; // +0x0f00
+
+ const volatile uint32_t padding8[3]; // +0x0f04
+
+ volatile uint32_t GICD_CPENDSGIR[4]; // +0x0f10
+ volatile uint32_t GICD_SPENDSGIR[4]; // +0x0f20
+
+ const volatile uint32_t padding9[52]; // +0x0f30
+ const volatile uint32_t padding10[5120]; // +0x1000
+
+ volatile uint64_t GICD_IROUTER[1024]; // +0x6000
+} GICv3_distributor;
+
+/*
+ * use the scatter file to place GICD
+ */
+GICv3_distributor __attribute__((section(".gicd"))) gicd;
+
+void ConfigGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR |= flags;
+}
+
+void DisableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR &= ~flags;
+}
+
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync)
+{
+ if (dosync)
+ {
+ const uint32_t tmask = gicdctlr_ARE_S | gicdctlr_ARE_NS;
+ const uint32_t tval = flags & tmask;
+
+ while ((gicd.GICD_CTLR & tmask) != tval)
+ continue;
+ }
+ else
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ISENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void DisableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ICENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void SetSPIPriority(uint32_t id, uint32_t priority)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ gicd.GICD_IPRIORITYR[bank] = priority;
+}
+
+uint32_t GetSPIPriority(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ return (uint32_t)(gicd.GICD_IPRIORITYR[bank]);
+}
+
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ gicd.GICD_IROUTER[bank] = affinity | (uint64_t)mode;
+}
+
+uint64_t GetSPIRoute(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ return gicd.GICD_IROUTER[bank];
+}
+
+void SetSPITarget(uint32_t id, uint32_t target)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ gicd.GICD_ITARGETSR[bank] = target;
+}
+
+uint32_t GetSPITarget(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ /*
+ * GICD_ITARGETSR has 4 interrupts per register, i.e. 8-bits of
+ * target bitmap per register
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ return (uint32_t)(gicd.GICD_ITARGETSR[bank]);
+}
+
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config)
+{
+ uint32_t bank, tmp;
+
+ /*
+ * GICD_ICFGR has 16 interrupts per register, i.e. 2-bits of
+ * configuration per register
+ */
+ bank = (id >> 4) & RANGE_LIMIT(gicd.GICD_ICFGR);
+ config &= 3;
+
+ id = (id & 0xf) << 1;
+
+ tmp = gicd.GICD_ICFGR[bank];
+ tmp &= ~(3 << id);
+ tmp |= config << id;
+ gicd.GICD_ICFGR[bank] = tmp;
+}
+
+void SetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ISPENDR[bank] = 1 << id;
+}
+
+void ClearSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ICPENDR[bank] = 1 << id;
+}
+
+uint32_t GetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ return (gicd.GICD_ICPENDR[bank] >> id) & 1;
+}
+
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group)
+{
+ uint32_t bank, groupmod;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_IGROUPR);
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicd.GICD_IGROUPR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGROUPR[bank] &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicd.GICD_IGRPMODR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGRPMODR[bank] &= ~(1 << id);
+}
+
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group)
+{
+ uint32_t groupmod;
+ const uint32_t nbits = (sizeof group * 8) - 1;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ block &= RANGE_LIMIT(gicd.GICD_IGROUPR);
+
+ /*
+ * get each bit of group config duplicated over all 32-bits in a word
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicd.GICD_IGROUPR[block] = group;
+ gicd.GICD_IGRPMODR[block] = groupmod;
+}
+
+void SetSPISecurityAll(GICIGROUPRBits_t group)
+{
+ uint32_t block;
+
+ /*
+ * GICD_TYPER.ITLinesNumber gives (No. SPIS / 32) - 1, and we
+ * want to iterate over all blocks excluding 0 (which are the
+ * SGI/PPI interrupts, and not relevant here)
+ */
+ for (block = (gicd.GICD_TYPER & ((1 << 5) - 1)); block > 0; --block)
+ SetSPISecurityBlock(block, group);
+}
+
+/* EOF GICv3_gicd.c */
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicr.c
new file mode 100644
index 00000000..26b5af8a
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/GICv3_gicr.c
@@ -0,0 +1,308 @@
+/*
+ * GICv3_gicr.c - generic driver code for GICv3 redistributor
+ *
+ * Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include "GICv3.h"
+
+/*
+ * physical LPI Redistributor register map
+ */
+typedef struct
+{
+ volatile uint32_t GICR_CTLR; // +0x0000 - RW - Redistributor Control Register
+ const volatile uint32_t GICR_IIDR; // +0x0004 - RO - Implementer Identification Register
+ const volatile uint32_t GICR_TYPER[2]; // +0x0008 - RO - Redistributor Type Register
+ volatile uint32_t GICR_STATUSR; // +0x0010 - RW - Error Reporting Status Register, optional
+ volatile uint32_t GICR_WAKER; // +0x0014 - RW - Redistributor Wake Register
+ const volatile uint32_t padding1[2]; // +0x0018 - RESERVED
+#ifndef USE_GIC600
+ volatile uint32_t IMPDEF1[8]; // +0x0020 - ?? - IMPLEMENTATION DEFINED
+#else
+ volatile uint32_t GICR_FCTLR; // +0x0020 - RW - Function Control Register
+ volatile uint32_t GICR_PWRR; // +0x0024 - RW - Power Management Control Register
+ volatile uint32_t GICR_CLASS; // +0x0028 - RW - Class Register
+ const volatile uint32_t padding2[5]; // +0x002C - RESERVED
+#endif
+ volatile uint64_t GICR_SETLPIR; // +0x0040 - WO - Set LPI Pending Register
+ volatile uint64_t GICR_CLRLPIR; // +0x0048 - WO - Clear LPI Pending Register
+ const volatile uint32_t padding3[8]; // +0x0050 - RESERVED
+ volatile uint64_t GICR_PROPBASER; // +0x0070 - RW - Redistributor Properties Base Address Register
+ volatile uint64_t GICR_PENDBASER; // +0x0078 - RW - Redistributor LPI Pending Table Base Address Register
+ const volatile uint32_t padding4[8]; // +0x0080 - RESERVED
+ volatile uint64_t GICR_INVLPIR; // +0x00A0 - WO - Redistributor Invalidate LPI Register
+ const volatile uint32_t padding5[2]; // +0x00A8 - RESERVED
+ volatile uint64_t GICR_INVALLR; // +0x00B0 - WO - Redistributor Invalidate All Register
+ const volatile uint32_t padding6[2]; // +0x00B8 - RESERVED
+ volatile uint64_t GICR_SYNCR; // +0x00C0 - RO - Redistributor Synchronize Register
+ const volatile uint32_t padding7[2]; // +0x00C8 - RESERVED
+ const volatile uint32_t padding8[12]; // +0x00D0 - RESERVED
+ volatile uint64_t IMPDEF2; // +0x0100 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding9[2]; // +0x0108 - RESERVED
+ volatile uint64_t IMPDEF3; // +0x0110 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding10[2]; // +0x0118 - RESERVED
+} GICv3_redistributor_RD;
+
+/*
+ * SGI and PPI Redistributor register map
+ */
+typedef struct
+{
+ const volatile uint32_t padding1[32]; // +0x0000 - RESERVED
+ volatile uint32_t GICR_IGROUPR0; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+ const volatile uint32_t padding2[31]; // +0x0084 - RESERVED
+ volatile uint32_t GICR_ISENABLER; // +0x0100 - RW - Interrupt Set-Enable Registers
+ const volatile uint32_t padding3[31]; // +0x0104 - RESERVED
+ volatile uint32_t GICR_ICENABLER; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ const volatile uint32_t padding4[31]; // +0x0184 - RESERVED
+ volatile uint32_t GICR_ISPENDR; // +0x0200 - RW - Interrupt Set-Pending Registers
+ const volatile uint32_t padding5[31]; // +0x0204 - RESERVED
+ volatile uint32_t GICR_ICPENDR; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ const volatile uint32_t padding6[31]; // +0x0284 - RESERVED
+ volatile uint32_t GICR_ISACTIVER; // +0x0300 - RW - Interrupt Set-Active Register
+ const volatile uint32_t padding7[31]; // +0x0304 - RESERVED
+ volatile uint32_t GICR_ICACTIVER; // +0x0380 - RW - Interrupt Clear-Active Register
+ const volatile uint32_t padding8[31]; // +0x0184 - RESERVED
+ volatile uint8_t GICR_IPRIORITYR[32]; // +0x0400 - RW - Interrupt Priority Registers
+ const volatile uint32_t padding9[504]; // +0x0420 - RESERVED
+ volatile uint32_t GICR_ICnoFGR[2]; // +0x0C00 - RW - Interrupt Configuration Registers
+ const volatile uint32_t padding10[62]; // +0x0C08 - RESERVED
+ volatile uint32_t GICR_IGRPMODR0; // +0x0D00 - RW - ????
+ const volatile uint32_t padding11[63]; // +0x0D04 - RESERVED
+ volatile uint32_t GICR_NSACR; // +0x0E00 - RW - Non-Secure Access Control Register
+} GICv3_redistributor_SGI;
+
+/*
+ * We have a multiplicity of GIC Redistributors; on the GIC-AEM and
+ * GIC-500 they are arranged as one 128KB region per redistributor: one
+ * 64KB page of GICR LPI registers, and one 64KB page of GICR Private
+ * Int registers
+ */
+typedef struct
+{
+ union
+ {
+ GICv3_redistributor_RD RD_base;
+ uint8_t padding[64 * 1024];
+ } RDblock;
+
+ union
+ {
+ GICv3_redistributor_SGI SGI_base;
+ uint8_t padding[64 * 1024];
+ } SGIblock;
+} GICv3_GICR;
+
+/*
+ * use the scatter file to place GIC Redistributor base address
+ *
+ * although this code doesn't know how many Redistributor banks
+ * a particular system will have, we declare gicrbase as an array
+ * to avoid unwanted compiler optimisations when calculating the
+ * base of a particular Redistributor bank
+ */
+static const GICv3_GICR gicrbase[2] __attribute__((section (".gicr")));
+
+/**********************************************************************/
+
+/*
+ * utility functions to calculate base of a particular
+ * Redistributor bank
+ */
+
+static inline GICv3_redistributor_RD *const getgicrRD(uint32_t gicr)
+{
+ GICv3_GICR *const arraybase = (GICv3_GICR *const)&gicrbase;
+
+ return &((arraybase + gicr)->RDblock.RD_base);
+}
+
+static inline GICv3_redistributor_SGI *const getgicrSGI(uint32_t gicr)
+{
+ GICv3_GICR *arraybase = (GICv3_GICR *)(&gicrbase);
+
+ return &(arraybase[gicr].SGIblock.SGI_base);
+}
+
+/**********************************************************************/
+
+// This function walks a block of RDs to find one with the matching affinity
+uint32_t GetGICR(uint32_t affinity)
+{
+ GICv3_redistributor_RD* gicr;
+ uint32_t index = 0;
+
+ do
+ {
+ gicr = getgicrRD(index);
+ if (gicr->GICR_TYPER[1] == affinity)
+ return index;
+
+ index++;
+ }
+ while((gicr->GICR_TYPER[0] & (1<<4)) == 0); // Keep looking until GICR_TYPER.Last reports no more RDs in block
+
+ return 0xFFFFFFFF; // return -1 to signal not RD found
+}
+
+void WakeupGICR(uint32_t gicr)
+{
+ GICv3_redistributor_RD *const gicrRD = getgicrRD(gicr);
+#ifdef USE_GIC600
+ //Power up Re-distributor for GIC-600
+ gicrRD->GICR_PWRR = 0x2;
+#endif
+
+ /*
+ * step 1 - ensure GICR_WAKER.ProcessorSleep is off
+ */
+ gicrRD->GICR_WAKER &= ~gicrwaker_ProcessorSleep;
+
+ /*
+ * step 2 - wait for children asleep to be cleared
+ */
+ while ((gicrRD->GICR_WAKER & gicrwaker_ChildrenAsleep) != 0)
+ continue;
+
+ /*
+ * OK, GICR is go
+ */
+ return;
+}
+
+void EnablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISENABLER = 1 << id;
+}
+
+void DisablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICENABLER = 1 << id;
+}
+
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ gicrSGI->GICR_IPRIORITYR[id] = priority;
+}
+
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ return (uint32_t)(gicrSGI->GICR_IPRIORITYR[id]);
+}
+
+void SetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISPENDR = 1 << id;
+}
+
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ICPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICPENDR = 1 << id;
+}
+
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ return (gicrSGI->GICR_ISPENDR >> id) & 0x01;
+}
+
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ uint32_t groupmod;
+
+ /*
+ * GICR_IGROUPR0 is one 32-bit register
+ */
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicrSGI->GICR_IGROUPR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGROUPR0 &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicrSGI->GICR_IGRPMODR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGRPMODR0 &= ~(1 << id);
+}
+
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ const uint32_t nbits = (sizeof group * 8) - 1;
+ uint32_t groupmod;
+
+ /*
+ * get each bit of group config duplicated over all 32-bits
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicrSGI->GICR_IGROUPR0 = group;
+ gicrSGI->GICR_IGRPMODR0 = groupmod;
+}
+
+/* EOF GICv3_gicr.c */
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a53/gnu/example_build/sample_threadx/MP_Mutexes.S
new file mode 100644
index 00000000..c787c3f5
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/MP_Mutexes.S
@@ -0,0 +1,133 @@
+//
+// Armv8-A AArch64 - Basic Mutex Example
+// Includes the option (USE_LSE_ATOMIC) to use Large System Extension (LSE) atomics introduced in Armv8.1-A
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+ .global _mutex_initialize
+ .global _mutex_acquire
+ .global _mutex_release
+
+//
+// These routines implement the mutex management functions required for running
+// the Arm C library in a multi-threaded environment.
+//
+// They use a value of 0 to represent an unlocked mutex, and 1 for a locked mutex
+//
+// **********************************************************************
+//
+
+ .type _mutex_initialize, "function"
+ .cfi_startproc
+_mutex_initialize:
+
+ //
+ // mark the mutex as unlocked
+ //
+ mov w1, #0
+ str w1, [x0]
+
+ //
+ // we are running multi-threaded, so set a non-zero return
+ // value (function prototype says use 1)
+ //
+ mov w0, #1
+ ret
+ .cfi_endproc
+
+#if !defined(USE_LSE_ATOMIC)
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+
+ //
+ // send ourselves an event, so we don't stick on the wfe at the
+ // top of the loop
+ //
+ sevl
+
+ //
+ // wait until the mutex is available
+ //
+loop:
+ wfe
+ ldaxr w1, [x0]
+ cbnz w1, loop
+
+ //
+ // mutex is (at least, it was) available - try to claim it
+ //
+ mov w1, #1
+ stxr w2, w1, [x0]
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+
+ mov w1, #0
+ stlr w1, [x0]
+ ret
+ .cfi_endproc
+
+#else // LSE version
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+ // This uses a "ticket lock". The lock is stored as a 32-bit value:
+ // - the upper 16-bits record the thread's ticket number ("take a ticket")
+ // - the lower 16-bits record the ticket being served ("now serving")
+
+ // atomically load then increment the thread's ticket number ("take a ticket")
+ mov w3, #(1 << 16)
+ ldadda w3, w1, [x0]
+
+ // is the ticket now being served?
+ eor w2, w1, w1, ror #16
+ cbz w2, loop_exit
+
+ // no, so wait for the ticket to be served
+
+ // send a local event to avoid missing an unlock before the exclusive load
+ sevl
+
+loop:
+ wfe
+ ldaxrh w3, [x0]
+ eor w2, w3, w1, lsr #16
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+loop_exit:
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+ mov w1, #1
+ staddlh w1, [x0]
+ ret
+ .cfi_endproc
+#endif
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/MP_Mutexes.h b/ports/cortex_a53/gnu/example_build/sample_threadx/MP_Mutexes.h
new file mode 100644
index 00000000..ec1a1d28
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/MP_Mutexes.h
@@ -0,0 +1,66 @@
+/*
+ * Armv8-A AArch64 - Basic Mutex Example
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef MP_MUTEX_H
+#define MP_MUTEX_H
+
+/*
+ * The Arm C library calls-out to these functions to manage multithreading.
+ * They can also be called by user application code.
+ *
+ * Mutex type is specified by the Arm C library
+ *
+ * Declare function prototypes for libc mutex routines
+ */
+typedef signed int *mutex;
+
+/*
+ * int _mutex_initialize(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ * 0 - application is non-threaded
+ * 1 - application is threaded
+ * The C library uses the return result to indicate whether it is being used in a multithreaded environment.
+ */
+int _mutex_initialize(mutex *m);
+
+/*
+ * void _mutex_acquire(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * Routine does not return until the mutex has been claimed. A load-acquire
+ * is used to guarantee that the mutex claim is properly ordered with
+ * respect to any accesses to the resource protected by the mutex
+ */
+void _mutex_acquire(mutex *m);
+
+/*
+ * void _mutex_release(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * A store-release is used to guarantee that the mutex release is properly
+ * ordered with respect any accesses to the resource protected by the mutex
+ */
+void _mutex_release(mutex *m);
+
+#endif
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a53/gnu/example_build/sample_threadx/PPM_AEM.h
new file mode 100644
index 00000000..f7501eeb
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/PPM_AEM.h
@@ -0,0 +1,66 @@
+//
+// Private Peripheral Map for the v8 Architecture Envelope Model
+//
+// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef PPM_AEM_H
+#define PPM_AEM_H
+
+//
+// Distributor layout
+//
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_IGROUP 0x0080
+#define GICD_ISENABLE 0x0100
+#define GICD_ICENABLE 0x0180
+#define GICD_ISPEND 0x0200
+#define GICD_ICPEND 0x0280
+#define GICD_ISACTIVE 0x0300
+#define GICD_ICACTIVE 0x0380
+#define GICD_IPRIORITY 0x0400
+#define GICD_ITARGETS 0x0800
+#define GICD_ICFG 0x0c00
+#define GICD_PPISR 0x0d00
+#define GICD_SPISR 0x0d04
+#define GICD_SGIR 0x0f00
+#define GICD_CPENDSGI 0x0f10
+#define GICD_SPENDSGI 0x0f20
+#define GICD_PIDR4 0x0fd0
+#define GICD_PIDR5 0x0fd4
+#define GICD_PIDR6 0x0fd8
+#define GICD_PIDR7 0x0fdc
+#define GICD_PIDR0 0x0fe0
+#define GICD_PIDR1 0x0fe4
+#define GICD_PIDR2 0x0fe8
+#define GICD_PIDR3 0x0fec
+#define GICD_CIDR0 0x0ff0
+#define GICD_CIDR1 0x0ff4
+#define GICD_CIDR2 0x0ff8
+#define GICD_CIDR3 0x0ffc
+
+//
+// CPU Interface layout
+//
+#define GICC_CTLR 0x0000
+#define GICC_PMR 0x0004
+#define GICC_BPR 0x0008
+#define GICC_IAR 0x000c
+#define GICC_EOIR 0x0010
+#define GICC_RPR 0x0014
+#define GICC_HPPIR 0x0018
+#define GICC_ABPR 0x001c
+#define GICC_AIAR 0x0020
+#define GICC_AEOIR 0x0024
+#define GICC_AHPPIR 0x0028
+#define GICC_APR0 0x00d0
+#define GICC_NSAPR0 0x00e0
+#define GICC_IIDR 0x00fc
+#define GICC_DIR 0x1000
+
+#endif // PPM_AEM_H
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..17cceb01
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,393 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_TIMER timer_0;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
+
+/* Define main entry point. */
+
+int main(void)
+{
+
+ /* Initialize timer. */
+ init_timer();
+
+ /* Enter ThreadX. */
+ tx_kernel_enter();
+
+ return 0;
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..a2971cdf
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,328 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.ld b/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.ld
new file mode 100644
index 00000000..eec8f12b
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/sample_threadx.ld
@@ -0,0 +1,245 @@
+/* Linker script to place sections and symbol values.
+ * It references following symbols, which must be defined in code:
+ * start64 : Entry point
+ *
+ * It defines following symbols, which code can use without definition:
+ * __cs3_peripherals
+ * __code_start
+ * __exidx_start
+ * __exidx_end
+ * __data_start
+ * __preinit_array_start
+ * __preinit_array_end
+ * __init_array_start
+ * __init_array_end
+ * __fini_array_start
+ * __fini_array_end
+ * __bss_start__
+ * __bss_end__
+ * __end__
+ * __stack
+ * __el3_stack
+ * __ttb0_l1
+ * __ttb0_l2_ram
+ * __ttb0_l2_private
+ * __ttb0_l2_periph
+ * __top_of_ram
+ */
+
+ENTRY(start64)
+
+SECTIONS
+{
+ /*
+ * CS3 Peripherals is a 64MB region from 0x1c000000
+ * that includes the following:
+ * System Registers at 0x1C010000
+ * UART0 (PL011) at 0x1C090000
+ * Color LCD Controller (PL111) at 0x1C1F0000
+ * plus a number of others.
+ * CS3_PERIPHERALS is used by the startup code for page-table generation
+ * This region is not truly empty, but we have no
+ * predefined objects that live within it
+ */
+ __cs3_peripherals = 0x1c000000;
+
+ /*
+ * GICv3 distributor
+ */
+ .gicd 0x2f000000 (NOLOAD):
+ {
+ *(.gicd)
+ }
+
+ /*
+ * GICv3 redistributors
+ * 128KB for each redistributor in the system
+ */
+ .gicr 0x2f100000 (NOLOAD):
+ {
+ *(.gicr)
+ }
+
+ .vectors 0x80000000:
+ {
+ __code_start = .;
+ KEEP(*(StartUp))
+ KEEP(*(EL1VECTORS EL2VECTORS EL3VECTORS))
+ }
+
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+
+ .text :
+ {
+ *(.text*)
+ }
+
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+
+ .rodata :
+ {
+ *(.rodata .rodata.* .gnu.linkonce.r.*)
+ }
+
+ .eh_frame :
+ {
+ KEEP (*(.eh_frame))
+ }
+
+ .ARM.extab :
+ {
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ }
+
+ .ARM.exidx :
+ {
+ __exidx_start = .;
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ __exidx_end = .;
+ }
+
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array ))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT(.fini_array.*)))
+ KEEP (*(.fini_array ))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+
+ .jcr :
+ {
+ KEEP (*(.jcr))
+ }
+
+ .data :
+ {
+ __data_start = . ;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+
+ .bss :
+ {
+ . = ALIGN(4);
+ __bss_start__ = .;
+ *(.bss*)
+ *(COMMON)
+ . = ALIGN(4);
+ __bss_end__ = .;
+ }
+
+ .heap (NOLOAD):
+ {
+ . = ALIGN(64);
+ __end__ = .;
+ PROVIDE(end = .);
+ . = . + 0x1000;
+ }
+
+ .stack (NOLOAD):
+ {
+ . = ALIGN(64);
+ . = . + 8 * 0x4000;
+ __handler_stack = .;
+ }
+
+ .stack (NOLOAD):
+ {
+ . = ALIGN(64);
+ . = . + 8 * 0x4000;
+ __stack = .;
+ }
+
+ .el3_stack (NOLOAD):
+ {
+ . = ALIGN(64);
+ . = . + 8 * 0x1000;
+ __el3_stack = .;
+ }
+
+ .ttb0_l1 (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l1 = .;
+ . = . + 0x1000;
+ }
+
+ .ttb0_l2_ram (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l2_ram = .;
+ . = . + 0x1000;
+ }
+
+ .ttb0_l2_private (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l2_private = .;
+ . = . + 0x1000;
+ }
+
+ .ttb0_l2_periph (NOLOAD):
+ {
+ . = ALIGN(4096);
+ __ttb0_l2_periph = .;
+ . = . + 0x1000;
+ }
+
+ /*
+ * The startup code uses the end of this region to calculate
+ * the top of memory - don't place any RAM regions after it
+ */
+ __top_of_ram = .;
+}
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a53/gnu/example_build/sample_threadx/sp804_timer.c
new file mode 100644
index 00000000..c2ce6faa
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/sp804_timer.c
@@ -0,0 +1,122 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "sp804_timer.h"
+
+#define TIMER_SP804_CTRL_TIMEREN (1 << 7)
+#define TIMER_SP804_CTRL_TIMERMODE (1 << 6) // Bit 6:
+#define TIMER_SP804_CTRL_INTENABLE (1 << 5)
+#define TIMER_SP804_CTRL_TIMERSIZE (1 << 1) // Bit 1: 0=16-bit, 1=32-bit
+#define TIMER_SP804_CTRL_ONESHOT (1 << 0) // Bit 0: 0=wrapping, 1=one-shot
+
+#define TIMER_SP804_CTRL_PRESCALE_1 (0 << 2) // clk/1
+#define TIMER_SP804_CTRL_PRESCALE_4 (1 << 2) // clk/4
+#define TIMER_SP804_CTRL_PRESCALE_8 (2 << 2) // clk/8
+
+struct sp804_timer
+{
+ volatile uint32_t Time1Load; // +0x00
+ const volatile uint32_t Time1Value; // +0x04 - RO
+ volatile uint32_t Timer1Control; // +0x08
+ volatile uint32_t Timer1IntClr; // +0x0C - WO
+ const volatile uint32_t Timer1RIS; // +0x10 - RO
+ const volatile uint32_t Timer1MIS; // +0x14 - RO
+ volatile uint32_t Timer1BGLoad; // +0x18
+
+ volatile uint32_t Time2Load; // +0x20
+ volatile uint32_t Time2Value; // +0x24
+ volatile uint8_t Timer2Control; // +0x28
+ volatile uint32_t Timer2IntClr; // +0x2C - WO
+ const volatile uint32_t Timer2RIS; // +0x30 - RO
+ const volatile uint32_t Timer2MIS; // +0x34 - RO
+ volatile uint32_t Timer2BGLoad; // +0x38
+
+ // Not including ID registers
+
+};
+
+// Instance of the dual timer, will be placed using the scatter file
+struct sp804_timer* dual_timer;
+
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address)
+{
+ dual_timer = (struct sp804_timer*)address;
+ return;
+}
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt)
+{
+ uint32_t tmp = 0;
+
+ dual_timer->Time1Load = load_value;
+
+ // Fixed setting: 32-bit, no prescaling
+ tmp = TIMER_SP804_CTRL_TIMERSIZE | TIMER_SP804_CTRL_PRESCALE_1 | TIMER_SP804_CTRL_TIMERMODE;
+
+ // Settings from parameters: interrupt generation & reload
+ tmp = tmp | interrupt | auto_reload;
+
+ // Write control register
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Starts the timer
+void startTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp | TIMER_SP804_CTRL_TIMEREN; // Set TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Stops the timer
+void stopTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp & ~TIMER_SP804_CTRL_TIMEREN; // Clear TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void)
+{
+ return dual_timer->Time1Value;
+}
+
+
+void clearTimerIrq(void)
+{
+ // A write to this register, of any value, clears the interrupt
+ dual_timer->Timer1IntClr = 1;
+}
+
+
+// ------------------------------------------------------------
+// End of sp804_timer.c
+// ------------------------------------------------------------
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a53/gnu/example_build/sample_threadx/sp804_timer.h
new file mode 100644
index 00000000..4d423904
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/sp804_timer.h
@@ -0,0 +1,53 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+// Header Filer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#ifndef _SP804_TIMER_
+#define _SP804_TIMER_
+
+#include
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address);
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+
+#define SP804_AUTORELOAD (0)
+#define SP804_SINGLESHOT (1)
+#define SP804_GENERATE_IRQ (1 << 5)
+#define SP804_NO_IRQ (0)
+
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt);
+
+
+// Starts the timer
+void startTimer(void);
+
+
+// Stops the timer
+void stopTimer(void);
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void);
+
+
+// Clears the timer interrupt
+void clearTimerIrq(void);
+
+#endif
+
+// ------------------------------------------------------------
+// End of sp804_timer.h
+// ------------------------------------------------------------
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/startup.S b/ports/cortex_a53/gnu/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..b71b45f8
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/startup.S
@@ -0,0 +1,787 @@
+// ------------------------------------------------------------
+// Armv8-A MPCore EL3 AArch64 Startup Code
+//
+// Basic Vectors, MMU, caches and GICv3 initialization
+//
+// Exits in EL1 AArch64
+//
+// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_mmu.h"
+#include "v8_system.h"
+#include "GICv3_aliases.h"
+
+ .section StartUp, "ax"
+ .balign 4
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+
+ .global InvalidateUDCaches
+ .global ZeroBlock
+
+ .global SetPrivateIntSecurityBlock
+ .global SetSPISecurityAll
+ .global SetPrivateIntPriority
+
+ .global GetGICR
+ .global WakeupGICR
+ .global SyncAREinGICD
+ .global EnableGICD
+ .global EnablePrivateInt
+ .global GetPrivateIntPending
+ .global ClearPrivateIntPending
+
+ .global _start
+ .global MainApp
+
+ .global __code_start
+ .global __ttb0_l1
+ .global __ttb0_l2_ram
+ .global __ttb0_l2_periph
+ .global __top_of_ram
+ .global gicd
+ .global __stack
+ .global __el3_stack
+ .global __cs3_peripherals
+
+
+
+
+// ------------------------------------------------------------
+
+ .global start64
+ .type start64, "function"
+start64:
+
+ //
+ // program the VBARs
+ //
+ ldr x1, =el1_vectors
+ msr VBAR_EL1, x1
+
+ ldr x1, =el2_vectors
+ msr VBAR_EL2, x1
+
+ ldr x1, =el3_vectors
+ msr VBAR_EL3, x1
+
+
+ // GIC-500 comes out of reset in GICv2 compatibility mode - first set
+ // system register enables for all relevant exception levels, and
+ // select GICv3 operating mode
+ //
+ msr SCR_EL3, xzr // Ensure NS bit is initially clear, so secure copy of ICC_SRE_EL1 can be configured
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL3, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Secure copy of ICC_SRE_EL1
+
+ //
+ // set lower exception levels as non-secure, with no access
+ // back to EL2 or EL3, and are AArch64 capable
+ //
+ mov x3, #(SCR_EL3_RW | \
+ SCR_EL3_SMD | \
+ SCR_EL3_NS) // Set NS bit, to access Non-secure registers
+ msr SCR_EL3, x3
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL2, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Non-secure copy of ICC_SRE_EL1
+
+
+ //
+ // no traps or VM modifications from the Hypervisor, EL1 is AArch64
+ //
+ mov x2, #HCR_EL2_RW
+ msr HCR_EL2, x2
+
+ //
+ // VMID is still significant, even when virtualisation is not
+ // being used, so ensure VTTBR_EL2 is properly initialised
+ //
+ msr VTTBR_EL2, xzr
+
+ //
+ // VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by Non-secure EL1 reads of MPIDR_EL1.
+ // VPIDR_EL2 holds the value of the Virtualization Processor ID. This is the value returned by Non-secure EL1 reads of MIDR_EL1.
+ // Both of these registers are architecturally UNKNOWN at reset, and so they must be set to the correct value
+ // (even if EL2/virtualization is not being used), otherwise non-secure EL1 reads of MPIDR_EL1/MIDR_EL1 will return garbage values.
+ // This guarantees that any future reads of MPIDR_EL1 and MIDR_EL1 from Non-secure EL1 will return the correct value.
+ //
+ mrs x0, MPIDR_EL1
+ msr VMPIDR_EL2, x0
+ mrs x0, MIDR_EL1
+ msr VPIDR_EL2, x0
+
+ // extract the core number from MPIDR_EL1 and store it in
+ // x19 (defined by the AAPCS as callee-saved), so we can re-use
+ // the number later
+ //
+ bl GetCPUID
+ mov x19, x0
+
+ //
+ // neither EL3 nor EL2 trap floating point or accesses to CPACR
+ //
+ msr CPTR_EL3, xzr
+ msr CPTR_EL2, xzr
+
+ //
+ // SCTLR_ELx may come out of reset with UNKNOWN values so we will
+ // set the fields to 0 except, possibly, the endianess field(s).
+ // Note that setting SCTLR_EL2 or the EL0 related fields of SCTLR_EL1
+ // is not strictly needed, since we're never in EL2 or EL0
+ //
+#ifdef __ARM_BIG_ENDIAN
+ mov x0, #(SCTLR_ELx_EE | SCTLR_EL1_E0E)
+#else
+ mov x0, #0
+#endif
+ msr SCTLR_EL3, x0
+ msr SCTLR_EL2, x0
+ msr SCTLR_EL1, x0
+
+#ifdef CORTEXA
+ //
+ // Configure ACTLR_EL[23]
+ // ----------------------
+ //
+ // These bits are IMPLEMENTATION DEFINED, so are different for
+ // different processors
+ //
+ // For Cortex-A57, the controls we set are:
+ //
+ // Enable lower level access to CPUACTLR_EL1
+ // Enable lower level access to CPUECTLR_EL1
+ // Enable lower level access to L2CTLR_EL1
+ // Enable lower level access to L2ECTLR_EL1
+ // Enable lower level access to L2ACTLR_EL1
+ //
+ mov x0, #((1 << 0) | \
+ (1 << 1) | \
+ (1 << 4) | \
+ (1 << 5) | \
+ (1 << 6))
+
+ msr ACTLR_EL3, x0
+ msr ACTLR_EL2, x0
+
+ //
+ // configure CPUECTLR_EL1
+ //
+ // These bits are IMP DEF, so need to different for different
+ // processors
+ //
+ // SMPEN - bit 6 - Enables the processor to receive cache
+ // and TLB maintenance operations
+ //
+ // Note: For Cortex-A57/53 SMPEN should be set before enabling
+ // the caches and MMU, or performing any cache and TLB
+ // maintenance operations.
+ //
+ // This register has a defined reset value, so we use a
+ // read-modify-write sequence to set SMPEN
+ //
+ mrs x0, S3_1_c15_c2_1 // Read EL1 CPU Extended Control Register
+ orr x0, x0, #(1 << 6) // Set the SMPEN bit
+ msr S3_1_c15_c2_1, x0 // Write EL1 CPU Extended Control Register
+
+ isb
+#endif
+
+ //
+ // That's the last of the control settings for now
+ //
+ // Note: no ISB after all these changes, as registers won't be
+ // accessed until after an exception return, which is itself a
+ // context synchronisation event
+ //
+
+ //
+ // Setup some EL3 stack space, ready for calling some subroutines, below.
+ //
+ // Stack space allocation is CPU-specific, so use CPU
+ // number already held in x19
+ //
+ // 2^12 bytes per CPU for the EL3 stacks
+ //
+ ldr x0, =__el3_stack
+ sub x0, x0, x19, lsl #12
+ mov sp, x0
+
+ //
+ // we need to configure the GIC while still in secure mode, specifically
+ // all PPIs and SPIs have to be programmed as Group1 interrupts
+ //
+
+ //
+ // Before the GIC can be reliably programmed, we need to
+ // enable Affinity Routing, as this affects where the configuration
+ // registers are (with Affinity Routing enabled, some registers are
+ // in the Redistributor, whereas those same registers are in the
+ // Distributor with Affinity Routing disabled (i.e. when in GICv2
+ // compatibility mode).
+ //
+ mov x0, #(1 << 4) | (1 << 5) // gicdctlr_ARE_S | gicdctlr_ARE_NS
+ mov x1, x19
+ bl SyncAREinGICD
+
+ //
+ // The Redistributor comes out of reset assuming the processor is
+ // asleep - correct that assumption
+ //
+ bl GetAffinity
+ bl GetGICR
+ mov w20, w0 // Keep a copy for later
+ bl WakeupGICR
+
+ //
+ // Now we're ready to set security and other initialisations
+ //
+ // This is a per-CPU configuration for these interrupts
+ //
+ // for the first cluster, CPU number is the redistributor index
+ //
+ mov w0, w20
+ mov w1, #1 // gicigroupr_G1NS
+ bl SetPrivateIntSecurityBlock
+
+ //
+ // While we're in the Secure World, set the priority mask low enough
+ // for it to be writable in the Non-Secure World
+ //
+ //mov x0, #16 << 3 // 5 bits of priority in the Secure world
+ mov x0, #0xFF // for Non-Secure interrupts
+ msr ICC_PMR_EL1, x0
+
+ //
+ // there's more GIC setup to do, but only for the primary CPU
+ //
+ cbnz x19, drop_to_el1
+
+ //
+ // There's more to do to the GIC - call the utility routine to set
+ // all SPIs to Group1
+ //
+ mov w0, #1 // gicigroupr_G1NS
+ bl SetSPISecurityAll
+
+ //
+ // Set up EL1 entry point and "dummy" exception return information,
+ // then perform exception return to enter EL1
+ //
+ .global drop_to_el1
+drop_to_el1:
+ adr x1, el1_entry_aarch64
+ msr ELR_EL3, x1
+ mov x1, #(AARCH64_SPSR_EL1h | \
+ AARCH64_SPSR_F | \
+ AARCH64_SPSR_I | \
+ AARCH64_SPSR_A)
+ msr SPSR_EL3, x1
+ eret
+
+
+
+// ------------------------------------------------------------
+// EL1 - Common start-up code
+// ------------------------------------------------------------
+
+ .global el1_entry_aarch64
+ .type el1_entry_aarch64, "function"
+el1_entry_aarch64:
+
+ //
+ // Now we're in EL1, setup the application stack
+ // the scatter file allocates 2^14 bytes per app stack
+ //
+ ldr x0, =__handler_stack
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+ MSR SPSel, #0
+ ISB
+ ldr x0, =__stack
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+
+ //
+ // Enable floating point
+ //
+ mov x0, #CPACR_EL1_FPEN
+ msr CPACR_EL1, x0
+
+ //
+ // Invalidate caches and TLBs for all stage 1
+ // translations used at EL1
+ //
+ // Cortex-A processors automatically invalidate their caches on reset
+ // (unless suppressed with the DBGL1RSTDISABLE or L2RSTDISABLE pins).
+ // It is therefore not necessary for software to invalidate the caches
+ // on startup, however, this is done here in case of a warm reset.
+ bl InvalidateUDCaches
+ tlbi VMALLE1
+
+
+ //
+ // Set TTBR0 Base address
+ //
+ // The CPUs share one set of translation tables that are
+ // generated by CPU0 at run-time
+ //
+ // TTBR1_EL1 is not used in this example
+ //
+ ldr x1, =__ttb0_l1
+ msr TTBR0_EL1, x1
+
+
+ //
+ // Set up memory attributes
+ //
+ // These equate to:
+ //
+ // 0 -> 0b01000100 = 0x00000044 = Normal, Inner/Outer Non-Cacheable
+ // 1 -> 0b11111111 = 0x0000ff00 = Normal, Inner/Outer WriteBack Read/Write Allocate
+ // 2 -> 0b00000100 = 0x00040000 = Device-nGnRE
+ //
+ mov x1, #0xff44
+ movk x1, #4, LSL #16 // equiv to: movk x1, #0x0000000000040000
+ msr MAIR_EL1, x1
+
+
+ //
+ // Set up TCR_EL1
+ //
+ // We're using only TTBR0 (EPD1 = 1), and the page table entries:
+ // - are using an 8-bit ASID from TTBR0
+ // - have a 4K granularity (TG0 = 0b00)
+ // - are outer-shareable (SH0 = 0b10)
+ // - are using Inner & Outer WBWA Normal memory ([IO]RGN0 = 0b01)
+ // - map
+ // + 32 bits of VA space (T0SZ = 0x20)
+ // + into a 32-bit PA space (IPS = 0b000)
+ //
+ // 36 32 28 24 20 16 12 8 4 0
+ // -----+----+----+----+----+----+----+----+----+----+
+ // | | |OOII| | | |OOII| | |
+ // TT | | |RRRR|E T | T| |RRRR|E T | T|
+ // BB | I I|TTSS|GGGG|P 1 | 1|TTSS|GGGG|P 0 | 0|
+ // IIA| P P|GGHH|NNNN|DAS | S|GGHH|NNNN|D S | S|
+ // 10S| S-S|1111|1111|11Z-|---Z|0000|0000|0 Z-|---Z|
+ //
+ // 000 0000 0000 0000 1000 0000 0010 0101 0010 0000
+ //
+ // 0x 8 0 2 5 2 0
+ //
+ // Note: the ISB is needed to ensure the changes to system
+ // context are before the write of SCTLR_EL1.M to enable
+ // the MMU. It is likely on a "real" implementation that
+ // this setup would work without an ISB, due to the
+ // amount of code that gets executed before enabling the
+ // MMU, but that would not be architecturally correct.
+ //
+ ldr x1, =0x0000000000802520
+ msr TCR_EL1, x1
+ isb
+
+ //
+ // x19 already contains the CPU number, so branch to secondary
+ // code if we're not on CPU0
+ //
+ cbnz x19, el1_secondary
+
+ //
+ // Fall through to primary code
+ //
+
+
+//
+// ------------------------------------------------------------
+//
+// EL1 - primary CPU init code
+//
+// This code is run on CPU0, while the other CPUs are in the
+// holding pen
+//
+
+ .global el1_primary
+ .type el1_primary, "function"
+el1_primary:
+
+ //
+ // Turn on the banked GIC distributor enable,
+ // ready for individual CPU enables later
+ //
+ mov w0, #(1 << 1) // gicdctlr_EnableGrp1A
+ bl EnableGICD
+
+ //
+ // Generate TTBR0 L1
+ //
+ // at 4KB granularity, 32-bit VA space, table lookup starts at
+ // L1, with 1GB regions
+ //
+ // we are going to create entries pointing to L2 tables for a
+ // couple of these 1GB regions, the first of which is the
+ // RAM on the VE board model - get the table addresses and
+ // start by emptying out the L1 page tables (4 entries at L1
+ // for a 4K granularity)
+ //
+ // x21 = address of L1 tables
+ //
+ ldr x21, =__ttb0_l1
+ mov x0, x21
+ mov x1, #(4 << 3)
+ bl ZeroBlock
+
+ //
+ // time to start mapping the RAM regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // x22 = address of L2 tables, needs to be remembered in case
+ // we want to re-use the tables for mapping peripherals
+ //
+ ldr x22, =__ttb0_l2_ram
+ mov x1, #(512 << 3)
+ mov x0, x22
+ bl ZeroBlock
+
+ //
+ // Get the start address of RAM (the EXEC region) into x4
+ // and calculate the offset into the L1 table (1GB per region,
+ // max 4GB)
+ //
+ // x23 = L1 table offset, saved for later comparison against
+ // peripheral offset
+ //
+ ldr x4, =__code_start
+ ubfx x23, x4, #30, #2
+
+ orr x1, x22, #TT_S1_ATTR_PAGE
+ str x1, [x21, x23, lsl #3]
+
+ //
+ // we've already used the RAM start address in x4 - we now need
+ // to get this in terms of an offset into the L2 page tables,
+ // where each entry covers 2MB
+ //
+ ubfx x2, x4, #21, #9
+
+ //
+ // TOP_OF_RAM in the scatter file marks the end of the
+ // Execute region in RAM: convert the end of this region to an
+ // offset too, being careful to round up, then calculate the
+ // number of entries to write
+ //
+ ldr x5, =__top_of_ram
+ sub x3, x5, #1
+ ubfx x3, x3, #21, #9
+ add x3, x3, #1
+ sub x3, x3, x2
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as Shared, Normal WBWA (MAIR[1]) with a flat
+ // VA->PA translation
+ //
+ bic x4, x4, #((1 << 21) - 1)
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (1 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_SH_INNER | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // factor the offset into the page table address and then write
+ // the entries
+ //
+ add x0, x22, x2, lsl #3
+
+loop1:
+ subs x3, x3, #1
+ str x1, [x0], #8
+ add x1, x1, #0x200, LSL #12 // equiv to add x1, x1, #(1 << 21) // 2MB per entry
+ bne loop1
+
+
+ //
+ // now mapping the Peripheral regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // The assumption here is that all peripherals live within
+ // a common 1GB region (i.e. that there's a single set of
+ // L2 pages for all the peripherals). We only use a UART
+ // and the GIC in this example, so the assumption is sound
+ //
+ // x24 = address of L2 peripheral tables
+ //
+ ldr x24, =__ttb0_l2_periph
+
+ //
+ // get the GICD address into x4 and calculate
+ // the offset into the L1 table
+ //
+ // x25 = L1 table offset
+ //
+ ldr x4, =gicd
+ ubfx x25, x4, #30, #2
+
+ //
+ // here's the tricky bit: it's possible that the peripherals are
+ // in the same 1GB region as the RAM, in which case we don't need
+ // to prime a separate set of L2 page tables, nor add them to the
+ // L1 tables
+ //
+ // if we're going to re-use the TTB0_L2_RAM tables, get their
+ // address into x24, which is used later on to write the PTEs
+ //
+ cmp x25, x23
+ csel x24, x22, x24, EQ
+ b.eq nol2setup
+
+ //
+ // Peripherals are in a separate 1GB region, and so have their own
+ // set of L2 tables - clean out the tables and add them to the L1
+ // table
+ //
+ mov x0, x24
+ mov x1, #512 << 3
+ bl ZeroBlock
+
+ orr x1, x24, #TT_S1_ATTR_PAGE
+ str x1, [x21, x25, lsl #3]
+
+ //
+ // there's only going to be a single 2MB region for GICD (in
+ // x4) - get this in terms of an offset into the L2 page tables
+ //
+ // with larger systems, it is possible that the GIC redistributor
+ // registers require extra 2MB pages, in which case extra code
+ // would be required here
+ //
+nol2setup:
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry for this, so no loop as we have for RAM, above
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // we have CS3_PERIPHERALS that include the UART controller
+ //
+ // Again, the code is making assumptions - this time that the CS3_PERIPHERALS
+ // region uses the same 1GB portion of the address space as the GICD,
+ // and thus shares the same set of L2 page tables
+ //
+ // Get CS3_PERIPHERALS address into x4 and calculate the offset into the
+ // L2 tables
+ //
+ ldr x4, =__cs3_peripherals
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry again - write it
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // issue a barrier to ensure all table entry writes are complete
+ //
+ dsb ish
+
+ //
+ // Enable the MMU. Caches will be enabled later, after scatterloading.
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // The Arm Architecture Reference Manual for Armv8-A states:
+ //
+ // Instruction accesses to Non-cacheable Normal memory can be held in instruction caches.
+ // Correspondingly, the sequence for ensuring that modifications to instructions are available
+ // for execution must include invalidation of the modified locations from the instruction cache,
+ // even if the instructions are held in Normal Non-cacheable memory.
+ // This includes cases where the instruction cache is disabled.
+ //
+
+ dsb ish // ensure all previous stores have completed before invalidating
+ ic ialluis // I cache invalidate all inner shareable to PoU (which includes secondary cores)
+ dsb ish // ensure completion on inner shareable domain (which includes secondary cores)
+ isb
+
+ // Scatter-loading is complete, so enable the caches here, so that the C-library's mutex initialization later will work
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ msr SCTLR_EL1, x1
+ isb
+
+ // Zero the bss
+ ldr x0, =__bss_start__ // Start of block
+ mov x1, #0 // Fill value
+ ldr x2, =__bss_end__ // End of block
+ sub x2, x2, x0 // Length of block
+ bl memset
+
+ // Set up the standard file handles
+ bl initialise_monitor_handles
+
+ // Set up _fini and fini_array to be called at exit
+ ldr x0, =__libc_fini_array
+ bl atexit
+
+ // Call preinit_array, _init and init_array
+ bl __libc_init_array
+
+ // Set argc = 1, argv[0] = "" and then call main
+ .pushsection .data
+ .align 3
+argv:
+ .dword arg0
+ .dword 0
+arg0:
+ .byte 0
+ .popsection
+
+ mov x0, #1
+ ldr x1, =argv
+ bl main
+
+ b exit // Will not return
+
+// ------------------------------------------------------------
+// EL1 - secondary CPU init code
+//
+// This code is run on CPUs 1, 2, 3 etc....
+// ------------------------------------------------------------
+
+ .global el1_secondary
+ .type el1_secondary, "function"
+el1_secondary:
+
+ //
+ // the primary CPU is going to use SGI 15 as a wakeup event
+ // to let us know when it is OK to proceed, so prepare for
+ // receiving that interrupt
+ //
+ // NS interrupt priorities run from 0 to 15, with 15 being
+ // too low a priority to ever raise an interrupt, so let's
+ // use 14
+ //
+ mov w0, w20
+ mov w1, #15
+ mov w2, #14 << 4 // we're in NS world, so 4 bits of priority,
+ // 8-bit field, - 4 = 4-bit shift
+ bl SetPrivateIntPriority
+
+ mov w0, w20
+ mov w1, #15
+ bl EnablePrivateInt
+
+ //
+ // set priority mask as low as possible; although,being in the
+ // NS World, we can't set bit[7] of the priority, we still
+ // write all 8-bits of priority to an ICC register
+ //
+ mov x0, #31 << 3
+ msr ICC_PMR_EL1, x0
+
+ //
+ // set global enable and wait for our interrupt to arrive
+ //
+ mov x0, #1
+ msr ICC_IGRPEN1_EL1, x0
+ isb
+
+loop_wfi:
+ dsb SY // Clear all pending data accesses
+ wfi // Go to sleep
+
+ //
+ // something woke us from our wait, was it the required interrupt?
+ //
+ mov w0, w20
+ mov w1, #15
+ bl GetPrivateIntPending
+ cbz w0, loop_wfi
+
+ //
+ // it was - there's no need to actually take the interrupt,
+ // so just clear it
+ //
+ mov w0, w20
+ mov w1, #15
+ bl ClearPrivateIntPending
+
+ //
+ // Enable the MMU and caches
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to thread start
+ //
+ //B MainApp
+
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/timer_interrupts.c b/ports/cortex_a53/gnu/example_build/sample_threadx/timer_interrupts.c
new file mode 100644
index 00000000..8f522217
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/timer_interrupts.c
@@ -0,0 +1,152 @@
+/* Bare-metal example for Armv8-A FVP Base model */
+
+/* Timer and interrupts */
+
+/* Copyright (c) 2016-2018 Arm Limited (or its affiliates). All rights reserved. */
+/* Use, modification and redistribution of this file is subject to your possession of a */
+/* valid End User License Agreement for the Arm Product of which these examples are part of */
+/* and your compliance with all applicable terms and conditions of such licence agreement. */
+
+#include
+
+#include "GICv3.h"
+#include "GICv3_gicc.h"
+#include "sp804_timer.h"
+
+void _tx_timer_interrupt(void);
+
+// LED Base address
+#define LED_BASE (volatile unsigned int *)0x1C010008
+
+
+void nudge_leds(void) // Move LEDs along
+{
+ static int state = 1;
+ static int value = 1;
+
+ if (state)
+ {
+ int max = (1 << 7);
+ value <<= 1;
+ if (value == max)
+ state = 0;
+ }
+ else
+ {
+ value >>= 1;
+ if (value == 1)
+ state = 1;
+ }
+
+ *LED_BASE = value; // Update LEDs hardware
+}
+
+
+// Initialize Timer 0 and Interrupt Controller
+void init_timer(void)
+{
+ // Enable interrupts
+ __asm("MSR DAIFClr, #0xF");
+ setICC_IGRPEN1_EL1(igrpEnable);
+
+ // Configure the SP804 timer to generate an interrupt
+ setTimerBaseAddress(0x1C110000);
+ initTimer(0x200, SP804_AUTORELOAD, SP804_GENERATE_IRQ);
+ startTimer();
+
+ // The SP804 timer generates SPI INTID 34. Enable
+ // this ID, and route it to core 0.0.0.0 (this one!)
+ SetSPIRoute(34, 0, gicdirouter_ModeSpecific); // Route INTID 34 to 0.0.0.0 (this core)
+ SetSPIPriority(34, 0); // Set INTID 34 to priority to 0
+ ConfigureSPI(34, gicdicfgr_Level); // Set INTID 34 as level-sensitive
+ EnableSPI(34); // Enable INTID 34
+}
+
+
+// --------------------------------------------------------
+
+void irqHandler(void)
+{
+ unsigned int ID;
+
+ ID = getICC_IAR1(); // readIntAck();
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("irqHandler() - Reserved INTID %d\n\n", ID);
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("irqHandler() - External timer interrupt\n\n");
+ nudge_leds();
+ clearTimerIrq();
+
+ /* Call ThreadX timer interrupt processing. */
+ _tx_timer_interrupt();
+
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("irqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
+
+// --------------------------------------------------------
+
+// Not actually used in this example, but provided for completeness
+
+void fiqHandler(void)
+{
+ unsigned int ID;
+ unsigned int aliased = 0;
+
+ ID = getICC_IAR0(); // readIntAck();
+ //printf("fiqHandler() - Read %d from IAR0\n", ID);
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("fiqHandler() - Reserved INTID %d\n\n", ID);
+ ID = getICC_IAR1(); // readAliasedIntAck();
+ //printf("fiqHandler() - Read %d from AIAR\n", ID);
+ aliased = 1;
+
+ // If still spurious then simply return
+ if ((1020 <= ID) && (ID <= 1023))
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("fiqHandler() - External timer interrupt\n\n");
+ clearTimerIrq();
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("fiqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ // NOTE: If the ID was read from the Aliased IAR, then
+ // the aliased EOI register must be used
+ if (aliased == 0)
+ setICC_EOIR0(ID); // writeEOI(ID);
+ else
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/use_model_semihosting.ds b/ports/cortex_a53/gnu/example_build/sample_threadx/use_model_semihosting.ds
new file mode 100644
index 00000000..6fde52b2
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/use_model_semihosting.ds
@@ -0,0 +1 @@
+set semihosting enabled off
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_aarch64.S
new file mode 100644
index 00000000..45445a98
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_aarch64.S
@@ -0,0 +1,179 @@
+// ------------------------------------------------------------
+// Armv8-A AArch64 - Common helper functions
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global EnableCachesEL1
+ .global DisableCachesEL1
+ .global InvalidateUDCaches
+ .global GetMIDR
+ .global GetMPIDR
+ .global GetAffinity
+ .global GetCPUID
+
+// ------------------------------------------------------------
+
+//
+// void EnableCachesEL1(void)
+//
+// enable Instruction and Data caches
+//
+ .type EnableCachesEL1, "function"
+ .cfi_startproc
+EnableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_ELx_I
+ orr x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+ .type DisableCachesEL1, "function"
+ .cfi_startproc
+DisableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_ELx_I
+ bic x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// void InvalidateUDCaches(void)
+//
+// Invalidate data and unified caches
+//
+ .type InvalidateUDCaches, "function"
+ .cfi_startproc
+InvalidateUDCaches:
+ // From the Armv8-A Architecture Reference Manual
+
+ dmb ish // ensure all prior inner-shareable accesses have been observed
+
+ mrs x0, CLIDR_EL1
+ and w3, w0, #0x07000000 // get 2 x level of coherence
+ lsr w3, w3, #23
+ cbz w3, finished
+ mov w10, #0 // w10 = 2 x cache level
+ mov w8, #1 // w8 = constant 0b1
+loop_level:
+ add w2, w10, w10, lsr #1 // calculate 3 x cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7
+ cmp w1, #2
+ b.lt next_level // no data or unified cache at this level
+ msr CSSELR_EL1, x10 // select this cache level
+ isb // synchronize change of csselr
+ mrs x1, CCSIDR_EL1 // read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen)-4
+ add w2, w2, #4 // w2 = log2(linelen)
+ ubfx w4, w1, #3, #10 // w4 = max way number, right aligned
+ clz w5, w4 // w5 = 32-log2(ways), bit position of way in dc operand
+ lsl w9, w4, w5 // w9 = max way number, aligned to position in dc operand
+ lsl w16, w8, w5 // w16 = amount to decrement way number per iteration
+loop_way:
+ ubfx w7, w1, #13, #15 // w7 = max set number, right aligned
+ lsl w7, w7, w2 // w7 = max set number, aligned to position in dc operand
+ lsl w17, w8, w2 // w17 = amount to decrement set number per iteration
+loop_set:
+ orr w11, w10, w9 // w11 = combine way number and cache number ...
+ orr w11, w11, w7 // ... and set number for dc operand
+ dc isw, x11 // do data cache invalidate by set and way
+ subs w7, w7, w17 // decrement set number
+ b.ge loop_set
+ subs x9, x9, x16 // decrement way number
+ b.ge loop_way
+next_level:
+ add w10, w10, #2 // increment 2 x cache level
+ cmp w3, w10
+ b.gt loop_level
+ dsb sy // ensure completion of previous cache maintenance operation
+ isb
+finished:
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// ID Register functions
+//
+
+ .type GetMIDR, "function"
+ .cfi_startproc
+GetMIDR:
+
+ mrs x0, MIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetMPIDR, "function"
+ .cfi_startproc
+GetMPIDR:
+
+ mrs x0, MPIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetAffinity, "function"
+ .cfi_startproc
+GetAffinity:
+
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #32, #8
+ bfi w0, w1, #24, #8
+ ret
+ .cfi_endproc
+
+
+ .type GetCPUID, "function"
+ .cfi_startproc
+GetCPUID:
+
+ mrs x0, MIDR_EL1
+ ubfx x0, x0, #4, #12 // extract PartNum
+ cmp x0, #0xD0D // Cortex-A77
+ b.eq DynamIQ
+ cmp x0, #0xD0B // Cortex-A76
+ b.eq DynamIQ
+ cmp x0, #0xD0A // Cortex-A75
+ b.eq DynamIQ
+ cmp x0, #0xD05 // Cortex-A55
+ b.eq DynamIQ
+ b Others
+DynamIQ:
+ mrs x0, MPIDR_EL1
+ ubfx x0, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ ret
+
+Others:
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #MPIDR_EL1_AFF0_LSB, #MPIDR_EL1_AFF_WIDTH
+ ubfx x2, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ add x0, x1, x2, LSL #2
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/v8_aarch64.h b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_aarch64.h
new file mode 100644
index 00000000..b09079a4
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_aarch64.h
@@ -0,0 +1,103 @@
+/*
+ *
+ * Armv8-A AArch64 common helper functions
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+
+#ifndef V8_AARCH64_H
+#define V8_AARCH64_H
+
+/*
+ * Parameters for data barriers
+ */
+#define OSHLD 1
+#define OSHST 2
+#define OSH 3
+#define NSHLD 5
+#define NSHST 6
+#define NSH 7
+#define ISHLD 9
+#define ISHST 10
+#define ISH 11
+#define LD 13
+#define ST 14
+#define SY 15
+
+/**********************************************************************/
+
+/*
+ * function prototypes
+ */
+
+/*
+ * void InvalidateUDCaches(void)
+ * invalidates all Unified and Data Caches
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * guarantees that all levels of cache will be invalidated before
+ * returning to caller
+ */
+void InvalidateUDCaches(void);
+
+/*
+ * unsigned long long EnableCachesEL1(void)
+ * enables I- and D- caches at EL1
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * New value of SCTLR_EL1
+ *
+ * Side Effects
+ * context will be synchronised before returning to caller
+ */
+unsigned long long EnableCachesEL1(void);
+
+/*
+ * unsigned long long GetMIDR(void)
+ * returns the contents of MIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MIDR_EL0
+ */
+unsigned long long GetMIDR(void);
+
+/*
+ * unsigned long long GetMPIDR(void)
+ * returns the contents of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0
+ */
+unsigned long long GetMPIDR(void);
+
+/*
+ * unsigned int GetCPUID(void)
+ * returns the Aff0 field of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0[7:0]
+ */
+unsigned int GetCPUID(void);
+
+#endif
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_mmu.h
new file mode 100644
index 00000000..bce62b54
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_mmu.h
@@ -0,0 +1,128 @@
+//
+// Defines for v8 Memory Model
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_MMU_H
+#define V8_MMU_H
+
+//
+// Translation Control Register fields
+//
+// RGN field encodings
+//
+#define TCR_RGN_NC 0b00
+#define TCR_RGN_WBWA 0b01
+#define TCR_RGN_WT 0b10
+#define TCR_RGN_WBRA 0b11
+
+//
+// Shareability encodings
+//
+#define TCR_SHARE_NONE 0b00
+#define TCR_SHARE_OUTER 0b10
+#define TCR_SHARE_INNER 0b11
+
+//
+// Granule size encodings
+//
+#define TCR_GRANULE_4K 0b00
+#define TCR_GRANULE_64K 0b01
+#define TCR_GRANULE_16K 0b10
+
+//
+// Physical Address sizes
+//
+#define TCR_SIZE_4G 0b000
+#define TCR_SIZE_64G 0b001
+#define TCR_SIZE_1T 0b010
+#define TCR_SIZE_4T 0b011
+#define TCR_SIZE_16T 0b100
+#define TCR_SIZE_256T 0b101
+
+//
+// Translation Control Register fields
+//
+#define TCR_EL1_T0SZ_SHIFT 0
+#define TCR_EL1_EPD0 (1 << 7)
+#define TCR_EL1_IRGN0_SHIFT 8
+#define TCR_EL1_ORGN0_SHIFT 10
+#define TCR_EL1_SH0_SHIFT 12
+#define TCR_EL1_TG0_SHIFT 14
+
+#define TCR_EL1_T1SZ_SHIFT 16
+#define TCR_EL1_A1 (1 << 22)
+#define TCR_EL1_EPD1 (1 << 23)
+#define TCR_EL1_IRGN1_SHIFT 24
+#define TCR_EL1_ORGN1_SHIFT 26
+#define TCR_EL1_SH1_SHIFT 28
+#define TCR_EL1_TG1_SHIFT 30
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_EL1_AS (1 << 36)
+#define TCR_EL1_TBI0 (1 << 37)
+#define TCR_EL1_TBI1 (1 << 38)
+
+//
+// Stage 1 Translation Table descriptor fields
+//
+#define TT_S1_ATTR_FAULT (0b00 << 0)
+#define TT_S1_ATTR_BLOCK (0b01 << 0) // Level 1/2
+#define TT_S1_ATTR_TABLE (0b11 << 0) // Level 0/1/2
+#define TT_S1_ATTR_PAGE (0b11 << 0) // Level 3
+
+#define TT_S1_ATTR_MATTR_LSB 2
+
+#define TT_S1_ATTR_NS (1 << 5)
+
+#define TT_S1_ATTR_AP_RW_PL1 (0b00 << 6)
+#define TT_S1_ATTR_AP_RW_ANY (0b01 << 6)
+#define TT_S1_ATTR_AP_RO_PL1 (0b10 << 6)
+#define TT_S1_ATTR_AP_RO_ANY (0b11 << 6)
+
+#define TT_S1_ATTR_SH_NONE (0b00 << 8)
+#define TT_S1_ATTR_SH_OUTER (0b10 << 8)
+#define TT_S1_ATTR_SH_INNER (0b11 << 8)
+
+#define TT_S1_ATTR_AF (1 << 10)
+#define TT_S1_ATTR_nG (1 << 11)
+
+// OA bits [15:12] - If Armv8.2-LPA is implemented, bits[15:12] are bits[51:48]
+// and bits[47:16] are bits[47:16] of the output address for a page of memory
+
+#define TT_S1_ATTR_nT (1 << 16) // Present if Armv8.4-TTRem is implemented, otherwise RES0
+
+#define TT_S1_ATTR_DBM (1 << 51) // Present if Armv8.1-TTHM is implemented, otherwise RES0
+
+#define TT_S1_ATTR_CONTIG (1 << 52)
+#define TT_S1_ATTR_PXN (1 << 53)
+#define TT_S1_ATTR_UXN (1 << 54)
+
+// PBHA bits[62:59] - If Armv8.2-TTPBHA is implemented, hardware can use these bits
+// for IMPLEMENTATIONDEFINED purposes, otherwise IGNORED
+
+#define TT_S1_MAIR_DEV_nGnRnE 0b00000000
+#define TT_S1_MAIR_DEV_nGnRE 0b00000100
+#define TT_S1_MAIR_DEV_nGRE 0b00001000
+#define TT_S1_MAIR_DEV_GRE 0b00001100
+
+//
+// Inner and Outer Normal memory attributes use the same bit patterns
+// Outer attributes just need to be shifted up
+//
+#define TT_S1_MAIR_OUTER_SHIFT 4
+
+#define TT_S1_MAIR_WT_TRANS_RA 0b0010
+
+#define TT_S1_MAIR_WB_TRANS_RA 0b0110
+#define TT_S1_MAIR_WB_TRANS_RWA 0b0111
+
+#define TT_S1_MAIR_WT_RA 0b1010
+
+#define TT_S1_MAIR_WB_RA 0b1110
+#define TT_S1_MAIR_WB_RWA 0b1111
+
+#endif // V8_MMU_H
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/v8_system.h b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_system.h
new file mode 100644
index 00000000..a62d2a33
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_system.h
@@ -0,0 +1,115 @@
+//
+// Defines for v8 System Registers
+//
+// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_SYSTEM_H
+#define V8_SYSTEM_H
+
+//
+// AArch64 SPSR
+//
+#define AARCH64_SPSR_EL3h 0b1101
+#define AARCH64_SPSR_EL3t 0b1100
+#define AARCH64_SPSR_EL2h 0b1001
+#define AARCH64_SPSR_EL2t 0b1000
+#define AARCH64_SPSR_EL1h 0b0101
+#define AARCH64_SPSR_EL1t 0b0100
+#define AARCH64_SPSR_EL0t 0b0000
+#define AARCH64_SPSR_RW (1 << 4)
+#define AARCH64_SPSR_F (1 << 6)
+#define AARCH64_SPSR_I (1 << 7)
+#define AARCH64_SPSR_A (1 << 8)
+#define AARCH64_SPSR_D (1 << 9)
+#define AARCH64_SPSR_IL (1 << 20)
+#define AARCH64_SPSR_SS (1 << 21)
+#define AARCH64_SPSR_V (1 << 28)
+#define AARCH64_SPSR_C (1 << 29)
+#define AARCH64_SPSR_Z (1 << 30)
+#define AARCH64_SPSR_N (1 << 31)
+
+//
+// Multiprocessor Affinity Register
+//
+#define MPIDR_EL1_AFF3_LSB 32
+#define MPIDR_EL1_U (1 << 30)
+#define MPIDR_EL1_MT (1 << 24)
+#define MPIDR_EL1_AFF2_LSB 16
+#define MPIDR_EL1_AFF1_LSB 8
+#define MPIDR_EL1_AFF0_LSB 0
+#define MPIDR_EL1_AFF_WIDTH 8
+
+//
+// Data Cache Zero ID Register
+//
+#define DCZID_EL0_BS_LSB 0
+#define DCZID_EL0_BS_WIDTH 4
+#define DCZID_EL0_DZP_LSB 5
+#define DCZID_EL0_DZP (1 << 5)
+
+//
+// System Control Register
+//
+#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_EL1_E0E (1 << 24)
+#define SCTLR_ELx_WXN (1 << 19)
+#define SCTLR_EL1_nTWE (1 << 18)
+#define SCTLR_EL1_nTWI (1 << 16)
+#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_EL1_UMA (1 << 9)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
+#define SCTLR_EL1_THEE (1 << 6)
+#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M (1 << 0)
+
+//
+// Architectural Feature Access Control Register
+//
+#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_FPEN (3 << 20)
+
+//
+// Architectural Feature Trap Register
+//
+#define CPTR_ELx_TCPAC (1 << 31)
+#define CPTR_ELx_TTA (1 << 20)
+#define CPTR_ELx_TFP (1 << 10)
+
+//
+// Secure Configuration Register
+//
+#define SCR_EL3_TWE (1 << 13)
+#define SCR_EL3_TWI (1 << 12)
+#define SCR_EL3_ST (1 << 11)
+#define SCR_EL3_RW (1 << 10)
+#define SCR_EL3_SIF (1 << 9)
+#define SCR_EL3_HCE (1 << 8)
+#define SCR_EL3_SMD (1 << 7)
+#define SCR_EL3_EA (1 << 3)
+#define SCR_EL3_FIQ (1 << 2)
+#define SCR_EL3_IRQ (1 << 1)
+#define SCR_EL3_NS (1 << 0)
+
+//
+// Hypervisor Configuration Register
+//
+#define HCR_EL2_ID (1 << 33)
+#define HCR_EL2_CD (1 << 32)
+#define HCR_EL2_RW (1 << 31)
+#define HCR_EL2_TRVM (1 << 30)
+#define HCR_EL2_HVC (1 << 29)
+#define HCR_EL2_TDZ (1 << 28)
+
+#endif // V8_SYSTEM_H
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/v8_utils.S b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_utils.S
new file mode 100644
index 00000000..888892a0
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/v8_utils.S
@@ -0,0 +1,69 @@
+//
+// Simple utility routines for baremetal v8 code
+//
+// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+//
+// void *ZeroBlock(void *blockPtr, unsigned int nBytes)
+//
+// Zero fill a block of memory
+// Fill memory pages or similar structures with zeros.
+// The byte count must be a multiple of the block fill size (16 bytes)
+//
+// Inputs:
+// blockPtr - base address of block to fill
+// nBytes - block size, in bytes
+//
+// Returns:
+// pointer to just filled block, NULL if nBytes is
+// incompatible with block fill size
+//
+ .global ZeroBlock
+ .type ZeroBlock, "function"
+ .cfi_startproc
+ZeroBlock:
+
+ //
+ // we fill data by steam, 16 bytes at a time: check that
+ // blocksize is a multiple of that
+ //
+ ubfx x2, x1, #0, #4
+ cbnz x2, incompatible
+
+ //
+ // we already have one register full of zeros, get another
+ //
+ mov x3, x2
+
+ //
+ // OK, set temporary pointer and away we go
+ //
+ add x0, x0, x1
+
+loop0:
+ subs x1, x1, #16
+ stp x2, x3, [x0, #-16]!
+ b.ne loop0
+
+ //
+ // that's all - x0 will be back to its start value
+ //
+ ret
+
+ //
+ // parameters are incompatible with block size - return
+ // an indication that this is so
+ //
+incompatible:
+ mov x0,#0
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a53/gnu/example_build/sample_threadx/vectors.S b/ports/cortex_a53/gnu/example_build/sample_threadx/vectors.S
new file mode 100644
index 00000000..7784f98e
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/sample_threadx/vectors.S
@@ -0,0 +1,252 @@
+// ------------------------------------------------------------
+// Armv8-A Vector tables
+//
+// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+ .global c0sync1
+ .global irqHandler
+ .global fiqHandler
+ .global irqFirstLevelHandler
+ .global fiqFirstLevelHandler
+
+ .section EL1VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el1_vectors:
+c0sync1: B c0sync1
+
+ .balign 0x80
+c0irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr1: B c0serr1
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync1: B cxsync1
+
+ .balign 0x80
+cxirq1: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr1: B cxserr1
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync1: B l64sync1
+
+ .balign 0x80
+l64irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr1: B l64serr1
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync1: B l32sync1
+
+ .balign 0x80
+l32irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr1: B l32serr1
+
+//----------------------------------------------------------------
+
+ .section EL2VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el2_vectors:
+c0sync2: B c0sync2
+
+ .balign 0x80
+c0irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr2: B c0serr2
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync2: B cxsync2
+
+ .balign 0x80
+cxirq2: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr2: B cxserr2
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync2: B l64sync2
+
+ .balign 0x80
+l64irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr2: B l64serr2
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync2: B l32sync2
+
+ .balign 0x80
+l32irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr2: B l32serr2
+
+//----------------------------------------------------------------
+
+ .section EL3VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el3_vectors:
+c0sync3: B c0sync3
+
+ .balign 0x80
+c0irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr3: B c0serr3
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync3: B cxsync3
+
+ .balign 0x80
+cxirq3: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr3: B cxserr3
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync3: B l64sync3
+
+ .balign 0x80
+l64irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr3: B l64serr3
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync3: B l32sync3
+
+ .balign 0x80
+l32irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr3: B l32serr3
+
+
+ .section InterruptHandlers, "ax"
+ .balign 4
+
+ .type irqFirstLevelHandler, "function"
+irqFirstLevelHandler:
+ MSR SPSel, 0
+ STP x29, x30, [sp, #-16]!
+ BL _tx_thread_context_save
+ BL irqHandler
+ B _tx_thread_context_restore
+
+ .type fiqFirstLevelHandler, "function"
+fiqFirstLevelHandler:
+ STP x29, x30, [sp, #-16]!
+ STP x18, x19, [sp, #-16]!
+ STP x16, x17, [sp, #-16]!
+ STP x14, x15, [sp, #-16]!
+ STP x12, x13, [sp, #-16]!
+ STP x10, x11, [sp, #-16]!
+ STP x8, x9, [sp, #-16]!
+ STP x6, x7, [sp, #-16]!
+ STP x4, x5, [sp, #-16]!
+ STP x2, x3, [sp, #-16]!
+ STP x0, x1, [sp, #-16]!
+
+ BL fiqHandler
+
+ LDP x0, x1, [sp], #16
+ LDP x2, x3, [sp], #16
+ LDP x4, x5, [sp], #16
+ LDP x6, x7, [sp], #16
+ LDP x8, x9, [sp], #16
+ LDP x10, x11, [sp], #16
+ LDP x12, x13, [sp], #16
+ LDP x14, x15, [sp], #16
+ LDP x16, x17, [sp], #16
+ LDP x18, x19, [sp], #16
+ LDP x29, x30, [sp], #16
+ ERET
diff --git a/ports/cortex_a53/gnu/example_build/tx/.cproject b/ports/cortex_a53/gnu/example_build/tx/.cproject
new file mode 100644
index 00000000..01bcd509
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/tx/.cproject
@@ -0,0 +1,162 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a53/gnu/example_build/tx/.project b/ports/cortex_a53/gnu/example_build/tx/.project
new file mode 100644
index 00000000..863ca5cb
--- /dev/null
+++ b/ports/cortex_a53/gnu/example_build/tx/.project
@@ -0,0 +1,48 @@
+
+
+ tx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
+
+ inc_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/inc
+
+
+ inc_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/inc
+
+
+ src_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/src
+
+
+ src_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/src
+
+
+
diff --git a/ports/cortex_a53/gnu/inc/tx_port.h b/ports/cortex_a53/gnu/inc/tx_port.h
new file mode 100644
index 00000000..33bccbf1
--- /dev/null
+++ b/ports/cortex_a53/gnu/inc/tx_port.h
@@ -0,0 +1,379 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef int LONG;
+typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
+typedef short SHORT;
+typedef unsigned short USHORT;
+#define ULONG64_DEFINED
+
+/* Override the alignment type to use 64-bit alignment and storage for pointers. */
+
+#define ALIGN_TYPE_DEFINED
+typedef unsigned long long ALIGN_TYPE;
+
+
+/* Override the free block marker for byte pools to be a 64-bit constant. */
+
+#define TX_BYTE_BLOCK_FREE ((ALIGN_TYPE) 0xFFFFEEEEFFFFEEEE)
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_MISRA_ENABLE
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
+#define TX_INLINE_INITIALIZATION
+#endif
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifndef TX_MISRA_ENABLE
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#ifndef TX_DISABLE_INLINE
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) b = (UINT) __builtin_ctz((unsigned int) m);
+
+#endif
+
+
+/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
+ can figure out what thread timeout to process. */
+
+#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define the thread timeout setup logic in _tx_thread_create. */
+
+#define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
+ (t) -> tx_thread_timer.tx_timer_internal_timeout_param = 0; \
+ (t) -> tx_thread_timer.tx_timer_internal_thread_timeout_ptr = (VOID *) (t);
+
+
+/* Define the thread timeout pointer setup in _tx_thread_timeout. */
+
+#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifndef TX_DISABLE_INLINE
+
+/* Define macros, with in-line assembly for performance. */
+
+__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
+{
+
+unsigned long long daif_value;
+
+ __asm__ volatile (" MRS %0, DAIF ": "=r" (daif_value) );
+ __asm__ volatile (" MSR DAIFSet, 0x3" : : : "memory" );
+ return((unsigned int) daif_value);
+}
+
+__attribute__( ( always_inline ) ) static inline void __restore_interrupts(unsigned int daif_value)
+{
+
+unsigned long long temp;
+
+ temp = (unsigned long long) daif_value;
+ __asm__ volatile (" MSR DAIF,%0": : "r" (temp): "memory" );
+}
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+#define TX_DISABLE interrupt_save = __disable_interrupts();
+#define TX_RESTORE __restore_interrupts(interrupt_save);
+
+#else
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+#endif
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
diff --git a/ports/cortex_a53/gnu/src/tx_initialize_low_level.S b/ports/cortex_a53/gnu/src/tx_initialize_low_level.S
new file mode 100644
index 00000000..bf04784e
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_initialize_low_level.S
@@ -0,0 +1,98 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_initialize_low_level(VOID)
+// {
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level, @function
+_tx_initialize_low_level:
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+
+ /* Save the system stack pointer. */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
+
+ LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
+ MOV x1, sp // Pickup SP
+ SUB x1, x1, #15 //
+ BIC x1, x1, #0xF // Get 16-bit alignment
+ STR x1, [x0] // Store system stack
+
+ /* Save the first available memory address. */
+ // _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit;
+
+ LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
+ LDR x1, =__top_of_ram // Pickup unused memory address
+ LDR x1, [x1] //
+ STR x1, [x0] // Store unused memory address
+
+ /* Done, return to caller. */
+
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_context_restore.S b/ports/cortex_a53/gnu/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..994c404d
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_context_restore.S
@@ -0,0 +1,287 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_restore(VOID)
+// {
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore, @function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ // if (--_tx_thread_system_state)
+ // {
+
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ SUB w2, w2, #1 // Decrement the counter
+ STR w2, [x3, #0] // Store the counter
+ CMP w2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup actual current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR x3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR w2, [x3, #0] // Pickup actual preempt disable flag
+ CMP w2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR x2, [x3, #0] // Pickup actual execute thread pointer
+ CMP x0, x2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+
+ /* Pickup the saved stack pointer. */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+ // else
+ // {
+__tx_thread_preempt_restore:
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ STP x20, x21, [sp, #-16]! // Save x20, x21
+ STP x22, x23, [sp, #-16]! // Save x22, x23
+ STP x24, x25, [sp, #-16]! // Save x24, x25
+ STP x26, x27, [sp, #-16]! // Save x26, x27
+ STP x28, x29, [sp, #-16]! // Save x28, x29
+#ifdef ENABLE_ARM_FP
+ LDR w3, [x0, #248] // Pickup FP enable flag
+ CMP w3, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q0, q1, [sp, #-32]! // Save q0, q1
+ STP q2, q3, [sp, #-32]! // Save q2, q3
+ STP q4, q5, [sp, #-32]! // Save q4, q5
+ STP q6, q7, [sp, #-32]! // Save q6, q7
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ STP q16, q17, [sp, #-32]! // Save q16, q17
+ STP q18, q19, [sp, #-32]! // Save q18, q19
+ STP q20, q21, [sp, #-32]! // Save q20, q21
+ STP q22, q23, [sp, #-32]! // Save q22, q23
+ STP q24, q25, [sp, #-32]! // Save q24, q25
+ STP q26, q27, [sp, #-32]! // Save q26, q27
+ STP q28, q29, [sp, #-32]! // Save q28, q29
+ STP q30, q31, [sp, #-32]! // Save q30, q31
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+ STP x4, x5, [sp, #-16]! // Save x4 (SPSR_EL3), x5 (ELR_E3)
+
+ MOV x3, sp // Move sp into x3
+ STR x3, [x0, #8] // Save stack pointer in thread control
+ // block
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+
+ /* Save the remaining time-slice and disable it. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w2, [x0, #36] // Save thread's time-slice
+ MOV w2, #0 // Clear value
+ STR w2, [x3, #0] // Disable global time-slice flag
+
+ // }
+__tx_thread_dont_save_ts:
+
+
+ /* Clear the current task pointer. */
+ /* _tx_thread_current_ptr = TX_NULL; */
+
+ MOV x0, #0 // NULL value
+ STR x0, [x1, #0] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ // _tx_thread_schedule();
+
+ // }
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ LDR x1, =_tx_thread_schedule // Build address for _tx_thread_schedule
+#ifdef EL1
+ MSR ELR_EL1, x1 // Setup point of interrupt
+// MOV x1, #0x4 // Setup EL1 return
+// MSR spsr_el1, x1 // Move into SPSR
+#else
+#ifdef EL2
+ MSR ELR_EL2, x1 // Setup point of interrupt
+// MOV x1, #0x8 // Setup EL2 return
+// MSR spsr_el2, x1 // Move into SPSR
+#else
+ MSR ELR_EL3, x1 // Setup point of interrupt
+// MOV x1, #0xC // Setup EL3 return
+// MSR spsr_el3, x1 // Move into SPSR
+#endif
+#endif
+ ERET // Return to scheduler
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_context_save.S b/ports/cortex_a53/gnu/src/tx_thread_context_save.S
new file mode 100644
index 00000000..859a1e44
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_context_save.S
@@ -0,0 +1,216 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_save(VOID)
+// {
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save, @function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ and all other registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+ // if (_tx_thread_system_state++)
+ // {
+
+ STP x0, x1, [sp, #-16]! // Save x0, x1
+ STP x2, x3, [sp, #-16]! // Save x2, x3
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ CMP w2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD w2, w2, #1 // Increment the nested interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x0, SPSR_EL1 // Pickup SPSR
+ MRS x1, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x0, SPSR_EL2 // Pickup SPSR
+ MRS x1, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x0, SPSR_EL3 // Pickup SPSR
+ MRS x1, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x0, x1, [sp, #-16]! // Save SPSR, ELR
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ /* Return to the ISR. */
+
+ RET // Return to ISR
+
+__tx_thread_not_nested_save:
+ // }
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ // else if (_tx_thread_current_ptr)
+ // {
+
+ ADD w2, w2, #1 // Increment the interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x4, SPSR_EL1 // Pickup SPSR
+ MRS x5, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x4, SPSR_EL2 // Pickup SPSR
+ MRS x5, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x4, SPSR_EL3 // Pickup SPSR
+ MRS x5, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x4, x5, [sp, #-16]! // Save SPSR, ELR
+
+ /* Save the current stack pointer in the thread's control block. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+
+ MOV x4, sp //
+ STR x4, [x0, #8] // Save thread stack pointer
+
+ /* Switch to the system stack. */
+ // sp = _tx_thread_system_stack_ptr;
+
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ RET // Return to caller
+
+ // }
+ // else
+ // {
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ ADD sp, sp, #48 // Recover saved registers
+ RET // Continue IRQ processing
+
+ // }
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_fp_disable.c b/ports/cortex_a53/gnu/src/tx_thread_fp_disable.c
new file mode 100644
index 00000000..3e5d7e21
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_fp_disable.c
@@ -0,0 +1,97 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_disable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_FALSE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a53/gnu/src/tx_thread_fp_enable.c b/ports/cortex_a53/gnu/src/tx_thread_fp_enable.c
new file mode 100644
index 00000000..4e69205c
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_fp_enable.c
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_enable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now setup the FP enable flag in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_TRUE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a53/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a53/gnu/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..6a5a7741
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_interrupt_control.S
@@ -0,0 +1,81 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control, @function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x1, DAIF // Pickup current interrupt posture
+
+ /* Apply the new interrupt posture. */
+
+ MSR DAIF, x0 // Set new interrupt posture
+ MOV x0, x1 // Setup return value
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a53/gnu/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..d0062ef8
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,79 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_disable(void)
+// {
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable, @function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x0, DAIF // Pickup current interrupt lockout posture
+
+ /* Mask interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a53/gnu/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..1b6261ba
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,77 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore, @function
+_tx_thread_interrupt_restore:
+
+ /* Restore the old interrupt posture. */
+
+ MSR DAIF, x0 // Setup the old posture
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_schedule.S b/ports/cortex_a53/gnu/src/tx_thread_schedule.S
new file mode 100644
index 00000000..9a7a7262
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_schedule.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_schedule(VOID)
+// {
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule, @function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+ MSR DAIFClr, 0x3 // Enable interrupts
+
+ /* Wait for a thread to execute. */
+ // do
+ // {
+
+ LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+#ifdef TX_ENABLE_WFI
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BNE _tx_thread_schedule_thread //
+ WFI //
+ B __tx_thread_schedule_loop // Keep looking for a thread
+_tx_thread_schedule_thread:
+#else
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+#endif
+
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+ /* Setup the current thread pointer. */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR x0, [x1, #0] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
+
+ LDR w2, [x0, #4] // Pickup run counter
+ LDR w3, [x0, #36] // Pickup time-slice for this thread
+ ADD w2, w2, #1 // Increment thread run-counter
+ STR w2, [x0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ // variable
+ LDR x4, [x0, #8] // Switch stack pointers
+ MOV sp, x4 //
+ STR w3, [x2, #0] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV x19, x0 // Save x0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV x0, x19 // Restore x0
+#endif
+
+ /* Switch to the thread's stack. */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL)
+ BEQ _tx_solicited_return
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_interrupt_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q30, q31, [sp], #32 // Recover q30, q31
+ LDP q28, q29, [sp], #32 // Recover q28, q29
+ LDP q26, q27, [sp], #32 // Recover q26, q27
+ LDP q24, q25, [sp], #32 // Recover q24, q25
+ LDP q22, q23, [sp], #32 // Recover q22, q23
+ LDP q20, q21, [sp], #32 // Recover q20, q21
+ LDP q18, q19, [sp], #32 // Recover q18, q19
+ LDP q16, q17, [sp], #32 // Recover q16, q17
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+ LDP q6, q7, [sp], #32 // Recover q6, q7
+ LDP q4, q5, [sp], #32 // Recover q4, q5
+ LDP q2, q3, [sp], #32 // Recover q2, q3
+ LDP q0, q1, [sp], #32 // Recover q0, q1
+_skip_interrupt_fp_restore:
+#endif
+ LDP x28, x29, [sp], #16 // Recover x28
+ LDP x26, x27, [sp], #16 // Recover x26, x27
+ LDP x24, x25, [sp], #16 // Recover x24, x25
+ LDP x22, x23, [sp], #16 // Recover x22, x23
+ LDP x20, x21, [sp], #16 // Recover x20, x21
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+_tx_solicited_return:
+
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_solicited_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+_skip_solicited_fp_restore:
+#endif
+ LDP x27, x28, [sp], #16 // Recover x27, x28
+ LDP x25, x26, [sp], #16 // Recover x25, x26
+ LDP x23, x24, [sp], #16 // Recover x23, x24
+ LDP x21, x22, [sp], #16 // Recover x21, x22
+ LDP x19, x20, [sp], #16 // Recover x19, x20
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ MSR DAIF, x4 // Recover DAIF
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_stack_build.S b/ports/cortex_a53/gnu/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..5b7e945a
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_stack_build.S
@@ -0,0 +1,158 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build, @function
+_tx_thread_stack_build:
+
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
+ Stack Top: SSPR Initial SSPR
+ ELR Point of interrupt
+ x28 Initial value for x28
+ not used Not used
+ x26 Initial value for x26
+ x27 Initial value for x27
+ x24 Initial value for x24
+ x25 Initial value for x25
+ x22 Initial value for x22
+ x23 Initial value for x23
+ x20 Initial value for x20
+ x21 Initial value for x21
+ x18 Initial value for x18
+ x19 Initial value for x19
+ x16 Initial value for x16
+ x17 Initial value for x17
+ x14 Initial value for x14
+ x15 Initial value for x15
+ x12 Initial value for x12
+ x13 Initial value for x13
+ x10 Initial value for x10
+ x11 Initial value for x11
+ x8 Initial value for x8
+ x9 Initial value for x9
+ x6 Initial value for x6
+ x7 Initial value for x7
+ x4 Initial value for x4
+ x5 Initial value for x5
+ x2 Initial value for x2
+ x3 Initial value for x3
+ x0 Initial value for x0
+ x1 Initial value for x1
+ x29 Initial value for x29 (frame pointer)
+ x30 Initial value for x30 (link register)
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR x4, [x0, #24] // Pickup end of stack area
+ BIC x4, x4, #0xF // Ensure 16-byte alignment
+
+ /* Actually build the stack frame. */
+
+ MOV x2, #0 // Build clear value
+ MOV x3, #0 //
+
+ STP x2, x3, [x4, #-16]! // Set backtrace to 0
+ STP x2, x3, [x4, #-16]! // Set initial x29, x30
+ STP x2, x3, [x4, #-16]! // Set initial x0, x1
+ STP x2, x3, [x4, #-16]! // Set initial x2, x3
+ STP x2, x3, [x4, #-16]! // Set initial x4, x5
+ STP x2, x3, [x4, #-16]! // Set initial x6, x7
+ STP x2, x3, [x4, #-16]! // Set initial x8, x9
+ STP x2, x3, [x4, #-16]! // Set initial x10, x11
+ STP x2, x3, [x4, #-16]! // Set initial x12, x13
+ STP x2, x3, [x4, #-16]! // Set initial x14, x15
+ STP x2, x3, [x4, #-16]! // Set initial x16, x17
+ STP x2, x3, [x4, #-16]! // Set initial x18, x19
+ STP x2, x3, [x4, #-16]! // Set initial x20, x21
+ STP x2, x3, [x4, #-16]! // Set initial x22, x23
+ STP x2, x3, [x4, #-16]! // Set initial x24, x25
+ STP x2, x3, [x4, #-16]! // Set initial x26, x27
+ STP x2, x3, [x4, #-16]! // Set initial x28
+#ifdef EL1
+ MOV x2, #0x4 // Build initial SPSR (EL1)
+#else
+#ifdef EL2
+ MOV x2, #0x8 // Build initial SPSR (EL2)
+#else
+ MOV x2, #0xC // Build initial SPSR (EL3)
+#endif
+#endif
+ MOV x3, x1 // Build initial ELR
+ STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
+
+ /* Setup stack pointer. */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
+
+ STR x4, [x0, #8] // Save stack pointer in thread's
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_thread_system_return.S b/ports/cortex_a53/gnu/src/tx_thread_system_return.S
new file mode 100644
index 00000000..7d42b63d
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_thread_system_return.S
@@ -0,0 +1,151 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_system_return(VOID)
+// {
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return, @function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ MRS x0, DAIF // Pickup DAIF
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+ STP x19, x20, [sp, #-16]! // Save x19, x20
+ STP x21, x22, [sp, #-16]! // Save x21, x22
+ STP x23, x24, [sp, #-16]! // Save x23, x24
+ STP x25, x26, [sp, #-16]! // Save x25, x26
+ STP x27, x28, [sp, #-16]! // Save x27, x28
+ LDR x5, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR x6, [x5, #0] // Pickup current thread pointer
+
+#ifdef ENABLE_ARM_FP
+ LDR w7, [x6, #248] // Pickup FP enable flag
+ CMP w7, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+
+ MOV x1, #0 // Clear x1
+ STP x0, x1, [sp, #-16]! // Save DAIF and clear value for ELR_EK1
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ MOV x19, x5 // Save x5
+ MOV x20, x6 // Save x6
+ BL _tx_execution_thread_exit // Call the thread exit function
+ MOV x5, x19 // Restore x5
+ MOV x6, x20 // Restore x6
+#endif
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR w1, [x2, #0] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
+
+ MOV x4, sp //
+ STR x4, [x6, #8] // Save thread stack pointer
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+ /* Determine if the time-slice is active. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ MOV x4, #0 // Build clear value
+ CMP w1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save the current remaining time-slice. */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w4, [x2, #0] // Clear time-slice
+ STR w1, [x6, #36] // Store current time-slice
+
+ // }
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+ // _tx_thread_current_ptr = TX_NULL;
+
+ STR x4, [x5, #0] // Clear current thread pointer
+
+ B _tx_thread_schedule // Jump to scheduler!
+
+// }
diff --git a/ports/cortex_a53/gnu/src/tx_timer_interrupt.S b/ports/cortex_a53/gnu/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..5810b5c2
--- /dev/null
+++ b/ports/cortex_a53/gnu/src/tx_timer_interrupt.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_timer_interrupt(VOID)
+// {
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt, @function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+ // _tx_timer_system_clock++;
+
+ LDR x1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR w0, [x1, #0] // Pickup system clock
+ ADD w0, w0, #1 // Increment system clock
+ STR w0, [x1, #0] // Store new system clock
+
+ /* Test for time-slice expiration. */
+ /* if (_tx_timer_time_slice)
+ { */
+
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+ /* _tx_timer_time_slice--; */
+
+ SUB w2, w2, #1 // Decrement the time-slice
+ STR w2, [x3, #0] // Store new time-slice value
+
+ /* Check for expiration. */
+ /* if (__tx_timer_time_slice == 0) */
+
+ CMP w2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+ /* _tx_timer_expired_time_slice = TX_TRUE; */
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV w0, #1 // Build expired value
+ STR w0, [x3, #0] // Set time-slice expiration flag
+
+ /* } */
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+ // if (*_tx_timer_current_ptr)
+ // {
+
+ LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
+ LDR x0, [x1, #0] // Pickup current timer
+ LDR x2, [x0, #0] // Pickup timer list entry
+ CMP x2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+ // _tx_timer_expired = TX_TRUE;
+
+ LDR x3, =_tx_timer_expired // Pickup expiration flag address
+ MOV w2, #1 // Build expired value
+ STR w2, [x3, #0] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+ // }
+ // else
+ // {
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ // _tx_timer_current_ptr++;
+
+ ADD x0, x0, #8 // Move to next timer
+
+ /* Check for wrap-around. */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
+
+ LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
+ LDR x2, [x3, #0] // Pickup list end
+ CMP x0, x2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wrap-around logic
+
+ /* Wrap to beginning of list. */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
+
+ LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
+ LDR x0, [x3, #0] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR x0, [x1, #0] // Store new current timer pointer
+ // }
+
+__tx_timer_done:
+
+
+ /* See if anything has expired. */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
+ LDR w2, [x3, #0] // Pickup time-slice expired flag
+ CMP w2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR x1, =_tx_timer_expired // Pickup addr of other expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+
+ /* Did a timer expire? */
+ // if (_tx_timer_expired)
+ // {
+
+ LDR x1, =_tx_timer_expired // Pickup addr of expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ // _tx_timer_expiration_process();
+
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+ // }
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+ // if (_tx_timer_expired_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR w2, [x3, #0] // Pickup the actual flag
+ CMP w2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+ // _tx_thread_time_slice();
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+ // }/
+
+__tx_timer_not_ts_expiration:
+
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ // }
+
+__tx_timer_nothing_expired:
+
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a55/ac6/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..f4e329dc
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/.cproject
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/.project b/ports/cortex_a55/ac6/example_build/sample_threadx/.project
new file mode 100644
index 00000000..a1b15572
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/.project
@@ -0,0 +1,26 @@
+
+
+ sample_threadx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3.h b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3.h
new file mode 100644
index 00000000..dfe37586
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3.h
@@ -0,0 +1,561 @@
+/*
+ * GICv3.h - data types and function prototypes for GICv3 utility routines
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_h
+#define GICV3_h
+
+#include
+
+/*
+ * extra flags for GICD enable
+ */
+typedef enum
+{
+ gicdctlr_EnableGrp0 = (1 << 0),
+ gicdctlr_EnableGrp1NS = (1 << 1),
+ gicdctlr_EnableGrp1A = (1 << 1),
+ gicdctlr_EnableGrp1S = (1 << 2),
+ gicdctlr_EnableAll = (1 << 2) | (1 << 1) | (1 << 0),
+ gicdctlr_ARE_S = (1 << 4), /* Enable Secure state affinity routing */
+ gicdctlr_ARE_NS = (1 << 5), /* Enable Non-Secure state affinity routing */
+ gicdctlr_DS = (1 << 6), /* Disable Security support */
+ gicdctlr_E1NWF = (1 << 7) /* Enable "1-of-N" wakeup model */
+} GICDCTLRFlags_t;
+
+/*
+ * modes for SPI routing
+ */
+typedef enum
+{
+ gicdirouter_ModeSpecific = 0,
+ gicdirouter_ModeAny = (1 << 31)
+} GICDIROUTERBits_t;
+
+typedef enum
+{
+ gicdicfgr_Level = 0,
+ gicdicfgr_Edge = (1 << 1)
+} GICDICFGRBits_t;
+
+typedef enum
+{
+ gicigroupr_G0S = 0,
+ gicigroupr_G1NS = (1 << 0),
+ gicigroupr_G1S = (1 << 2)
+} GICIGROUPRBits_t;
+
+typedef enum
+{
+ gicrwaker_ProcessorSleep = (1 << 1),
+ gicrwaker_ChildrenAsleep = (1 << 2)
+} GICRWAKERBits_t;
+
+/**********************************************************************/
+
+/*
+ * Utility macros & functions
+ */
+#define RANGE_LIMIT(x) ((sizeof(x) / sizeof((x)[0])) - 1)
+
+static inline uint64_t gicv3PackAffinity(uint32_t aff3, uint32_t aff2,
+ uint32_t aff1, uint32_t aff0)
+{
+ /*
+ * only need to cast aff3 to get type promotion for all affinities
+ */
+ return ((((uint64_t)aff3 & 0xff) << 32) |
+ ((aff2 & 0xff) << 16) |
+ ((aff1 & 0xff) << 8) | aff0);
+}
+
+/**********************************************************************/
+
+/*
+ * GIC Distributor Function Prototypes
+ */
+
+/*
+ * ConfigGICD - configure GIC Distributor prior to enabling it
+ *
+ * Inputs:
+ *
+ * control - control flags
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void ConfigGICD(GICDCTLRFlags_t flags);
+
+/*
+ * EnableGICD - top-level enable for GIC Distributor
+ *
+ * Inputs:
+ *
+ * flags - new control flags to set
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void EnableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * DisableGICD - top-level disable for GIC Distributor
+ *
+ * Inputs
+ *
+ * flags - control flags to clear
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void DisableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * SyncAREinGICD - synchronise GICD Address Routing Enable bits
+ *
+ * Inputs
+ *
+ * flags - absolute flag bits to set in GIC Distributor
+ *
+ * dosync - flag whether to wait for ARE bits to match passed
+ * flag field (dosync = true), or whether to set absolute
+ * flag bits (dosync = false)
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * This function is used to resolve a race in an MP system whereby secondary
+ * CPUs cannot reliably program all Redistributor registers until the
+ * primary CPU has enabled Address Routing. The primary CPU will call this
+ * function with dosync = false, while the secondaries will call it with
+ * dosync = true.
+ */
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync);
+
+/*
+ * EnableSPI - enable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnableSPI(uint32_t id);
+
+/*
+ * DisableSPI - disable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisableSPI(uint32_t id);
+
+/*
+ * SetSPIPriority - configure the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetSPIPriority(uint32_t id, uint32_t priority);
+
+/*
+ * GetSPIPriority - determine the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * interrupt priority in the range 0 - 0xff
+ */
+uint32_t GetSPIPriority(uint32_t id);
+
+/*
+ * SetSPIRoute - specify interrupt routing when gicdctlr_ARE is enabled
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * affinity - prepacked "dotted quad" affinity routing. NOTE: use the
+ * gicv3PackAffinity() helper routine to generate this input
+ *
+ * mode - select routing mode (specific affinity, or any recipient)
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode);
+
+/*
+ * GetSPIRoute - read ARE-enabled interrupt routing information
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * routing configuration
+ */
+uint64_t GetSPIRoute(uint32_t id);
+
+/*
+ * SetSPITarget - configure the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * target - 8-bit target bitmap
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPITarget(uint32_t id, uint32_t target);
+
+/*
+ * GetSPITarget - read the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * 8-bit target bitmap
+ */
+uint32_t GetSPITarget(uint32_t id);
+
+/*
+ * ConfigureSPI - setup an interrupt as edge- or level-triggered
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * config - desired configuration
+ *
+ * Returns
+ *
+ *
+ */
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config);
+
+/*
+ * SetSPIPending - mark an interrupt as pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPIPending(uint32_t id);
+
+/*
+ * ClearSPIPending - mark an interrupt as not pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearSPIPending(uint32_t id);
+
+/*
+ * GetSPIPending - query whether an interrupt is pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetSPIPending(uint32_t id);
+
+/*
+ * SetSPISecurity - mark a shared peripheral interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityBlock - mark a block of 32 shared peripheral
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * block - which block to mark (e.g. 1 = Ints 32-63)
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityAll - mark all shared peripheral interrupts
+ * as security
+ *
+ * Inputs:
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityAll(GICIGROUPRBits_t group);
+
+/**********************************************************************/
+
+/*
+ * GIC Re-Distributor Function Prototypes
+ *
+ * The model for calling Redistributor functions is that, rather than
+ * identifying the target redistributor with every function call, the
+ * SelectRedistributor() function is used to identify which redistributor
+ * is to be used for all functions until a different redistributor is
+ * explicitly selected
+ */
+
+/*
+ * WakeupGICR - wake up a Redistributor
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to wakeup
+ *
+ * Returns:
+ *
+ *
+ */
+void WakeupGICR(uint32_t gicr);
+
+/*
+ * EnablePrivateInt - enable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * DisablePrivateInt - disable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority);
+
+/*
+ * GetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * Int priority
+ */
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPending - mark a private (SGI/PPI) interrupt as pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * ClearPrivateIntPending - mark a private (SGI/PPI) interrupt as not pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * GetPrivateIntPending - query whether a private (SGI/PPI) interrupt is pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntSecurity - mark a private (SGI/PPI) interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetPrivateIntSecurityBlock - mark all 32 private (SGI/PPI)
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * group - the group for the interrupt
+ *
+ * Returns:
+ *
+ *
+ */
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group);
+
+#endif /* ndef GICV3_h */
+
+/* EOF GICv3.h */
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_aliases.h b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_aliases.h
new file mode 100644
index 00000000..826ba973
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_aliases.h
@@ -0,0 +1,113 @@
+//
+// Aliases for GICv3 registers
+//
+// Copyright (c) 2016-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef GICV3_ALIASES_H
+#define GICV3_ALIASES_H
+
+#ifndef __clang__
+
+/*
+ * Mapping of MSR and MRS to physical and virtual CPU interface registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-5
+ */
+#define ICC_AP0R0_EL1 S3_0_C12_C8_4
+#define ICC_AP0R1_EL1 S3_0_C12_C8_5
+#define ICC_AP0R2_EL1 S3_0_C12_C8_6
+#define ICC_AP0R3_EL1 S3_0_C12_C8_7
+
+#define ICC_AP1R0_EL1 S3_0_C12_C9_0
+#define ICC_AP1R1_EL1 S3_0_C12_C9_1
+#define ICC_AP1R2_EL1 S3_0_C12_C9_2
+#define ICC_AP1R3_EL1 S3_0_C12_C9_3
+
+#define ICC_ASGI1R_EL1 S3_0_C12_C11_6
+
+#define ICC_BPR0_EL1 S3_0_C12_C8_3
+#define ICC_BPR1_EL1 S3_0_C12_C12_3
+
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+
+#define ICC_DIR_EL1 S3_0_C12_C11_1
+
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+#define ICC_SGI1R_EL1 S3_0_C12_C11_5
+
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+
+/*
+ * Mapping of MSR and MRS to virtual interface control registers
+ *
+ * Arm Generic Interrupt Controller Architecture Specification
+ * GIC architecture version 3.0 and version 4.0
+ * Table 8-6
+ */
+#define ICH_AP0R0_EL2 S3_4_C12_C8_0
+#define ICH_AP0R1_EL2 S3_4_C12_C8_1
+#define ICH_AP0R2_EL2 S3_4_C12_C8_2
+#define ICH_AP0R3_EL2 S3_4_C12_C8_3
+
+#define ICH_AP1R0_EL2 S3_4_C12_C9_0
+#define ICH_AP1R1_EL2 S3_4_C12_C9_1
+#define ICH_AP1R2_EL2 S3_4_C12_C9_2
+#define ICH_AP1R3_EL2 S3_4_C12_C9_3
+
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+
+#define ICH_VTR_EL2 S3_4_C12_C11_1
+
+#define ICH_MISR_EL2 S3_4_C12_C11_2
+
+#define ICH_EISR_EL2 S3_4_C12_C11_3
+
+#define ICH_ELRSR_EL2 S3_4_C12_C11_5
+
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+
+#define ICH_LR0_EL2 S3_4_C12_C12_0
+#define ICH_LR1_EL2 S3_4_C12_C12_1
+#define ICH_LR2_EL2 S3_4_C12_C12_2
+#define ICH_LR3_EL2 S3_4_C12_C12_3
+#define ICH_LR4_EL2 S3_4_C12_C12_4
+#define ICH_LR5_EL2 S3_4_C12_C12_5
+#define ICH_LR6_EL2 S3_4_C12_C12_6
+#define ICH_LR7_EL2 S3_4_C12_C12_7
+#define ICH_LR8_EL2 S3_4_C12_C13_0
+#define ICH_LR9_EL2 S3_4_C12_C13_1
+#define ICH_LR10_EL2 S3_4_C12_C13_2
+#define ICH_LR11_EL2 S3_4_C12_C13_3
+#define ICH_LR12_EL2 S3_4_C12_C13_4
+#define ICH_LR13_EL2 S3_4_C12_C13_5
+#define ICH_LR14_EL2 S3_4_C12_C13_6
+#define ICH_LR15_EL2 S3_4_C12_C13_7
+
+#endif /* not __clang__ */
+
+#endif /* GICV3_ALIASES */
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicc.h b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicc.h
new file mode 100644
index 00000000..998d92b5
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicc.h
@@ -0,0 +1,254 @@
+/*
+ * GICv3_gicc.h - prototypes and inline functions for GICC system register operations
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_gicc_h
+#define GICV3_gicc_h
+
+#include "GICv3_aliases.h"
+
+#define stringify_no_expansion(x) #x
+#define stringify(x) stringify_no_expansion(x)
+
+/**********************************************************************/
+
+typedef enum
+{
+ sreSRE = (1 << 0),
+ sreDFB = (1 << 1),
+ sreDIB = (1 << 2),
+ sreEnable = (1 << 3)
+} ICC_SREBits_t;
+
+static inline void setICC_SRE_EL1(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL2(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL2)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL2(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL2)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_SRE_EL3(ICC_SREBits_t mode)
+{
+ asm("msr "stringify(ICC_SRE_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_SRE_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_SRE_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ igrpEnable = (1 << 0),
+ igrpEnableGrp1NS = (1 << 0),
+ igrpEnableGrp1S = (1 << 2)
+} ICC_IGRPBits_t;
+
+static inline void setICC_IGRPEN0_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN0_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL1(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline void setICC_IGRPEN1_EL3(ICC_IGRPBits_t mode)
+{
+ asm("msr "stringify(ICC_IGRPEN1_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ ctlrCBPR = (1 << 0),
+ ctlrCBPR_EL1S = (1 << 0),
+ ctlrEOImode = (1 << 1),
+ ctlrCBPR_EL1NS = (1 << 1),
+ ctlrEOImode_EL3 = (1 << 2),
+ ctlrEOImode_EL1S = (1 << 3),
+ ctlrEOImode_EL1NS = (1 << 4),
+ ctlrRM = (1 << 5),
+ ctlrPMHE = (1 << 6)
+} ICC_CTLRBits_t;
+
+static inline void setICC_CTLR_EL1(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL1)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_CTLR_EL3(ICC_CTLRBits_t mode)
+{
+ asm("msr "stringify(ICC_CTLR_EL3)", %0\n; isb" :: "r" ((uint64_t)mode));
+}
+
+static inline uint64_t getICC_CTLR_EL3(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_CTLR_EL3)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+static inline uint64_t getICC_IAR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_IAR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_IAR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline void setICC_EOIR0(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR0_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_EOIR1(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_EOIR1_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_DIR(uint32_t interrupt)
+{
+ asm("msr "stringify(ICC_DIR_EL1)", %0\n; isb" :: "r" ((uint64_t)interrupt));
+}
+
+static inline void setICC_PMR(uint32_t priority)
+{
+ asm("msr "stringify(ICC_PMR_EL1)", %0\n; isb" :: "r" ((uint64_t)priority));
+}
+
+static inline void setICC_BPR0(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR0_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline void setICC_BPR1(uint32_t binarypoint)
+{
+ asm("msr "stringify(ICC_BPR1_EL1)", %0\n; isb" :: "r" ((uint64_t)binarypoint));
+}
+
+static inline uint64_t getICC_BPR0(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR0_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_BPR1(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_BPR1_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+static inline uint64_t getICC_RPR(void)
+{
+ uint64_t retc;
+
+ asm("mrs %0, "stringify(ICC_RPR_EL1)"\n" : "=r" (retc));
+
+ return retc;
+}
+
+/**********************************************************************/
+
+typedef enum
+{
+ sgirIRMTarget = 0,
+ sgirIRMAll = (1ull << 40)
+} ICC_SGIRBits_t;
+
+static inline void setICC_SGI0R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI0R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_SGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_SGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+static inline void setICC_ASGI1R(uint8_t aff3, uint8_t aff2,
+ uint8_t aff1, ICC_SGIRBits_t irm,
+ uint16_t targetlist, uint8_t intid)
+{
+ uint64_t packedbits = (((uint64_t)aff3 << 48) | ((uint64_t)aff2 << 32) | \
+ ((uint64_t)aff1 << 16) | irm | targetlist | \
+ ((uint64_t)(intid & 0x0f) << 24));
+
+ asm("msr "stringify(ICC_ASGI1R_EL1)", %0\n; isb" :: "r" (packedbits));
+}
+
+#endif /* ndef GICV3_gicc_h */
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicd.c b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicd.c
new file mode 100644
index 00000000..2cf1553b
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicd.c
@@ -0,0 +1,339 @@
+/*
+ * GICv3_gicd.c - generic driver code for GICv3 distributor
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include
+
+#include "GICv3.h"
+
+typedef struct
+{
+ volatile uint32_t GICD_CTLR; // +0x0000
+ const volatile uint32_t GICD_TYPER; // +0x0004
+ const volatile uint32_t GICD_IIDR; // +0x0008
+
+ const volatile uint32_t padding0; // +0x000c
+
+ volatile uint32_t GICD_STATUSR; // +0x0010
+
+ const volatile uint32_t padding1[3]; // +0x0014
+
+ volatile uint32_t IMP_DEF[8]; // +0x0020
+
+ volatile uint32_t GICD_SETSPI_NSR; // +0x0040
+ const volatile uint32_t padding2; // +0x0044
+ volatile uint32_t GICD_CLRSPI_NSR; // +0x0048
+ const volatile uint32_t padding3; // +0x004c
+ volatile uint32_t GICD_SETSPI_SR; // +0x0050
+ const volatile uint32_t padding4; // +0x0054
+ volatile uint32_t GICD_CLRSPI_SR; // +0x0058
+
+ const volatile uint32_t padding5[3]; // +0x005c
+
+ volatile uint32_t GICD_SEIR; // +0x0068
+
+ const volatile uint32_t padding6[5]; // +0x006c
+
+ volatile uint32_t GICD_IGROUPR[32]; // +0x0080
+
+ volatile uint32_t GICD_ISENABLER[32]; // +0x0100
+ volatile uint32_t GICD_ICENABLER[32]; // +0x0180
+ volatile uint32_t GICD_ISPENDR[32]; // +0x0200
+ volatile uint32_t GICD_ICPENDR[32]; // +0x0280
+ volatile uint32_t GICD_ISACTIVER[32]; // +0x0300
+ volatile uint32_t GICD_ICACTIVER[32]; // +0x0380
+
+ volatile uint8_t GICD_IPRIORITYR[1024]; // +0x0400
+ volatile uint8_t GICD_ITARGETSR[1024]; // +0x0800
+ volatile uint32_t GICD_ICFGR[64]; // +0x0c00
+ volatile uint32_t GICD_IGRPMODR[32]; // +0x0d00
+ const volatile uint32_t padding7[32]; // +0x0d80
+ volatile uint32_t GICD_NSACR[64]; // +0x0e00
+
+ volatile uint32_t GICD_SGIR; // +0x0f00
+
+ const volatile uint32_t padding8[3]; // +0x0f04
+
+ volatile uint32_t GICD_CPENDSGIR[4]; // +0x0f10
+ volatile uint32_t GICD_SPENDSGIR[4]; // +0x0f20
+
+ const volatile uint32_t padding9[52]; // +0x0f30
+ const volatile uint32_t padding10[5120]; // +0x1000
+
+ volatile uint64_t GICD_IROUTER[1024]; // +0x6000
+} GICv3_distributor;
+
+/*
+ * use the scatter file to place GICD
+ */
+static GICv3_distributor __attribute__((section(".bss.distributor"))) gicd;
+
+void ConfigGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR |= flags;
+}
+
+void DisableGICD(GICDCTLRFlags_t flags)
+{
+ gicd.GICD_CTLR &= ~flags;
+}
+
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync)
+{
+ if (dosync)
+ {
+ const uint32_t tmask = gicdctlr_ARE_S | gicdctlr_ARE_NS;
+ const uint32_t tval = flags & tmask;
+
+ while ((gicd.GICD_CTLR & tmask) != tval)
+ continue;
+ }
+ else
+ gicd.GICD_CTLR = flags;
+}
+
+void EnableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ISENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void DisableSPI(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISENABLER has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICENABLER);
+ id &= 32 - 1;
+
+ gicd.GICD_ICENABLER[bank] = 1 << id;
+
+ return;
+}
+
+void SetSPIPriority(uint32_t id, uint32_t priority)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ gicd.GICD_IPRIORITYR[bank] = priority;
+}
+
+uint32_t GetSPIPriority(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IPRIORITYR);
+
+ return (uint32_t)(gicd.GICD_IPRIORITYR[bank]);
+}
+
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ gicd.GICD_IROUTER[bank] = affinity | (uint64_t)mode;
+}
+
+uint64_t GetSPIRoute(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_IROUTER has one doubleword-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_IROUTER);
+
+ return gicd.GICD_IROUTER[bank];
+}
+
+void SetSPITarget(uint32_t id, uint32_t target)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ gicd.GICD_ITARGETSR[bank] = target;
+}
+
+uint32_t GetSPITarget(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ITARGETSR has one byte-wide entry per interrupt
+ */
+ /*
+ * GICD_ITARGETSR has 4 interrupts per register, i.e. 8-bits of
+ * target bitmap per register
+ */
+ bank = id & RANGE_LIMIT(gicd.GICD_ITARGETSR);
+
+ return (uint32_t)(gicd.GICD_ITARGETSR[bank]);
+}
+
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config)
+{
+ uint32_t bank, tmp;
+
+ /*
+ * GICD_ICFGR has 16 interrupts per register, i.e. 2-bits of
+ * configuration per register
+ */
+ bank = (id >> 4) & RANGE_LIMIT(gicd.GICD_ICFGR);
+ config &= 3;
+
+ id = (id & 0xf) << 1;
+
+ tmp = gicd.GICD_ICFGR[bank];
+ tmp &= ~(3 << id);
+ tmp |= config << id;
+ gicd.GICD_ICFGR[bank] = tmp;
+}
+
+void SetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ISPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ISPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ISPENDR[bank] = 1 << id;
+}
+
+void ClearSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ gicd.GICD_ICPENDR[bank] = 1 << id;
+}
+
+uint32_t GetSPIPending(uint32_t id)
+{
+ uint32_t bank;
+
+ /*
+ * GICD_ICPENDR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_ICPENDR);
+ id &= 0x1f;
+
+ return (gicd.GICD_ICPENDR[bank] >> id) & 1;
+}
+
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group)
+{
+ uint32_t bank, groupmod;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ bank = (id >> 5) & RANGE_LIMIT(gicd.GICD_IGROUPR);
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicd.GICD_IGROUPR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGROUPR[bank] &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicd.GICD_IGRPMODR[bank] |= 1 << id;
+ else
+ gicd.GICD_IGRPMODR[bank] &= ~(1 << id);
+}
+
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group)
+{
+ uint32_t groupmod;
+ const uint32_t nbits = (sizeof group * 8) - 1;
+
+ /*
+ * GICD_IGROUPR has 32 interrupts per register
+ */
+ block &= RANGE_LIMIT(gicd.GICD_IGROUPR);
+
+ /*
+ * get each bit of group config duplicated over all 32-bits in a word
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicd.GICD_IGROUPR[block] = group;
+ gicd.GICD_IGRPMODR[block] = groupmod;
+}
+
+void SetSPISecurityAll(GICIGROUPRBits_t group)
+{
+ uint32_t block;
+
+ /*
+ * GICD_TYPER.ITLinesNumber gives (No. SPIS / 32) - 1, and we
+ * want to iterate over all blocks excluding 0 (which are the
+ * SGI/PPI interrupts, and not relevant here)
+ */
+ for (block = (gicd.GICD_TYPER & ((1 << 5) - 1)); block > 0; --block)
+ SetSPISecurityBlock(block, group);
+}
+
+/* EOF GICv3_gicd.c */
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicr.c b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicr.c
new file mode 100644
index 00000000..d91aeb27
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/GICv3_gicr.c
@@ -0,0 +1,308 @@
+/*
+ * GICv3_gicr.c - generic driver code for GICv3 redistributor
+ *
+ * Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#include "GICv3.h"
+
+/*
+ * physical LPI Redistributor register map
+ */
+typedef struct
+{
+ volatile uint32_t GICR_CTLR; // +0x0000 - RW - Redistributor Control Register
+ const volatile uint32_t GICR_IIDR; // +0x0004 - RO - Implementer Identification Register
+ const volatile uint32_t GICR_TYPER[2]; // +0x0008 - RO - Redistributor Type Register
+ volatile uint32_t GICR_STATUSR; // +0x0010 - RW - Error Reporting Status Register, optional
+ volatile uint32_t GICR_WAKER; // +0x0014 - RW - Redistributor Wake Register
+ const volatile uint32_t padding1[2]; // +0x0018 - RESERVED
+#ifndef USE_GIC600
+ volatile uint32_t IMPDEF1[8]; // +0x0020 - ?? - IMPLEMENTATION DEFINED
+#else
+ volatile uint32_t GICR_FCTLR; // +0x0020 - RW - Function Control Register
+ volatile uint32_t GICR_PWRR; // +0x0024 - RW - Power Management Control Register
+ volatile uint32_t GICR_CLASS; // +0x0028 - RW - Class Register
+ const volatile uint32_t padding2[5]; // +0x002C - RESERVED
+#endif
+ volatile uint64_t GICR_SETLPIR; // +0x0040 - WO - Set LPI Pending Register
+ volatile uint64_t GICR_CLRLPIR; // +0x0048 - WO - Clear LPI Pending Register
+ const volatile uint32_t padding3[8]; // +0x0050 - RESERVED
+ volatile uint64_t GICR_PROPBASER; // +0x0070 - RW - Redistributor Properties Base Address Register
+ volatile uint64_t GICR_PENDBASER; // +0x0078 - RW - Redistributor LPI Pending Table Base Address Register
+ const volatile uint32_t padding4[8]; // +0x0080 - RESERVED
+ volatile uint64_t GICR_INVLPIR; // +0x00A0 - WO - Redistributor Invalidate LPI Register
+ const volatile uint32_t padding5[2]; // +0x00A8 - RESERVED
+ volatile uint64_t GICR_INVALLR; // +0x00B0 - WO - Redistributor Invalidate All Register
+ const volatile uint32_t padding6[2]; // +0x00B8 - RESERVED
+ volatile uint64_t GICR_SYNCR; // +0x00C0 - RO - Redistributor Synchronize Register
+ const volatile uint32_t padding7[2]; // +0x00C8 - RESERVED
+ const volatile uint32_t padding8[12]; // +0x00D0 - RESERVED
+ volatile uint64_t IMPDEF2; // +0x0100 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding9[2]; // +0x0108 - RESERVED
+ volatile uint64_t IMPDEF3; // +0x0110 - WO - IMPLEMENTATION DEFINED
+ const volatile uint32_t padding10[2]; // +0x0118 - RESERVED
+} GICv3_redistributor_RD;
+
+/*
+ * SGI and PPI Redistributor register map
+ */
+typedef struct
+{
+ const volatile uint32_t padding1[32]; // +0x0000 - RESERVED
+ volatile uint32_t GICR_IGROUPR0; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+ const volatile uint32_t padding2[31]; // +0x0084 - RESERVED
+ volatile uint32_t GICR_ISENABLER; // +0x0100 - RW - Interrupt Set-Enable Registers
+ const volatile uint32_t padding3[31]; // +0x0104 - RESERVED
+ volatile uint32_t GICR_ICENABLER; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ const volatile uint32_t padding4[31]; // +0x0184 - RESERVED
+ volatile uint32_t GICR_ISPENDR; // +0x0200 - RW - Interrupt Set-Pending Registers
+ const volatile uint32_t padding5[31]; // +0x0204 - RESERVED
+ volatile uint32_t GICR_ICPENDR; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ const volatile uint32_t padding6[31]; // +0x0284 - RESERVED
+ volatile uint32_t GICR_ISACTIVER; // +0x0300 - RW - Interrupt Set-Active Register
+ const volatile uint32_t padding7[31]; // +0x0304 - RESERVED
+ volatile uint32_t GICR_ICACTIVER; // +0x0380 - RW - Interrupt Clear-Active Register
+ const volatile uint32_t padding8[31]; // +0x0184 - RESERVED
+ volatile uint8_t GICR_IPRIORITYR[32]; // +0x0400 - RW - Interrupt Priority Registers
+ const volatile uint32_t padding9[504]; // +0x0420 - RESERVED
+ volatile uint32_t GICR_ICnoFGR[2]; // +0x0C00 - RW - Interrupt Configuration Registers
+ const volatile uint32_t padding10[62]; // +0x0C08 - RESERVED
+ volatile uint32_t GICR_IGRPMODR0; // +0x0D00 - RW - ????
+ const volatile uint32_t padding11[63]; // +0x0D04 - RESERVED
+ volatile uint32_t GICR_NSACR; // +0x0E00 - RW - Non-Secure Access Control Register
+} GICv3_redistributor_SGI;
+
+/*
+ * We have a multiplicity of GIC Redistributors; on the GIC-AEM and
+ * GIC-500 they are arranged as one 128KB region per redistributor: one
+ * 64KB page of GICR LPI registers, and one 64KB page of GICR Private
+ * Int registers
+ */
+typedef struct
+{
+ union
+ {
+ GICv3_redistributor_RD RD_base;
+ uint8_t padding[64 * 1024];
+ } RDblock;
+
+ union
+ {
+ GICv3_redistributor_SGI SGI_base;
+ uint8_t padding[64 * 1024];
+ } SGIblock;
+} GICv3_GICR;
+
+/*
+ * use the scatter file to place GIC Redistributor base address
+ *
+ * although this code doesn't know how many Redistributor banks
+ * a particular system will have, we declare gicrbase as an array
+ * to avoid unwanted compiler optimisations when calculating the
+ * base of a particular Redistributor bank
+ */
+static const GICv3_GICR gicrbase[2] __attribute__((section (".bss.redistributor")));
+
+/**********************************************************************/
+
+/*
+ * utility functions to calculate base of a particular
+ * Redistributor bank
+ */
+
+static inline GICv3_redistributor_RD *const getgicrRD(uint32_t gicr)
+{
+ GICv3_GICR *const arraybase = (GICv3_GICR *const)&gicrbase;
+
+ return &((arraybase + gicr)->RDblock.RD_base);
+}
+
+static inline GICv3_redistributor_SGI *const getgicrSGI(uint32_t gicr)
+{
+ GICv3_GICR *arraybase = (GICv3_GICR *)(&gicrbase);
+
+ return &(arraybase[gicr].SGIblock.SGI_base);
+}
+
+/**********************************************************************/
+
+// This function walks a block of RDs to find one with the matching affinity
+uint32_t GetGICR(uint32_t affinity)
+{
+ GICv3_redistributor_RD* gicr;
+ uint32_t index = 0;
+
+ do
+ {
+ gicr = getgicrRD(index);
+ if (gicr->GICR_TYPER[1] == affinity)
+ return index;
+
+ index++;
+ }
+ while((gicr->GICR_TYPER[0] & (1<<4)) == 0); // Keep looking until GICR_TYPER.Last reports no more RDs in block
+
+ return 0xFFFFFFFF; // return -1 to signal not RD found
+}
+
+void WakeupGICR(uint32_t gicr)
+{
+ GICv3_redistributor_RD *const gicrRD = getgicrRD(gicr);
+#ifdef USE_GIC600
+ //Power up Re-distributor for GIC-600
+ gicrRD->GICR_PWRR = 0x2;
+#endif
+
+ /*
+ * step 1 - ensure GICR_WAKER.ProcessorSleep is off
+ */
+ gicrRD->GICR_WAKER &= ~gicrwaker_ProcessorSleep;
+
+ /*
+ * step 2 - wait for children asleep to be cleared
+ */
+ while ((gicrRD->GICR_WAKER & gicrwaker_ChildrenAsleep) != 0)
+ continue;
+
+ /*
+ * OK, GICR is go
+ */
+ return;
+}
+
+void EnablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISENABLER = 1 << id;
+}
+
+void DisablePrivateInt(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICENABLER = 1 << id;
+}
+
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ gicrSGI->GICR_IPRIORITYR[id] = priority;
+}
+
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICD_IPRIORITYR has one byte-wide entry per interrupt
+ */
+ id &= RANGE_LIMIT(gicrSGI->GICR_IPRIORITYR);
+
+ return (uint32_t)(gicrSGI->GICR_IPRIORITYR[id]);
+}
+
+void SetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ISPENDR = 1 << id;
+}
+
+void ClearPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ICPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ gicrSGI->GICR_ICPENDR = 1 << id;
+}
+
+uint32_t GetPrivateIntPending(uint32_t gicr, uint32_t id)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+
+ /*
+ * GICR_ISPENDR is one 32-bit register
+ */
+ id &= 0x1f;
+
+ return (gicrSGI->GICR_ISPENDR >> id) & 0x01;
+}
+
+void SetPrivateIntSecurity(uint32_t gicr, uint32_t id, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ uint32_t groupmod;
+
+ /*
+ * GICR_IGROUPR0 is one 32-bit register
+ */
+ id &= 0x1f;
+
+ /*
+ * the single group argument is split into two separate
+ * registers, so filter out and remove the (new to gicv3)
+ * group modifier bit
+ */
+ groupmod = (group >> 1) & 1;
+ group &= 1;
+
+ /*
+ * either set or clear the Group bit for the interrupt as appropriate
+ */
+ if (group)
+ gicrSGI->GICR_IGROUPR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGROUPR0 &= ~(1 << id);
+
+ /*
+ * now deal with groupmod
+ */
+ if (groupmod)
+ gicrSGI->GICR_IGRPMODR0 |= 1 << id;
+ else
+ gicrSGI->GICR_IGRPMODR0 &= ~(1 << id);
+}
+
+void SetPrivateIntSecurityBlock(uint32_t gicr, GICIGROUPRBits_t group)
+{
+ GICv3_redistributor_SGI *const gicrSGI = getgicrSGI(gicr);
+ const uint32_t nbits = (sizeof group * 8) - 1;
+ uint32_t groupmod;
+
+ /*
+ * get each bit of group config duplicated over all 32-bits
+ */
+ groupmod = (uint32_t)(((int32_t)group << (nbits - 1)) >> 31);
+ group = (uint32_t)(((int32_t)group << nbits) >> 31);
+
+ /*
+ * set the security state for this block of SPIs
+ */
+ gicrSGI->GICR_IGROUPR0 = group;
+ gicrSGI->GICR_IGRPMODR0 = groupmod;
+}
+
+/* EOF GICv3_gicr.c */
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/MP_Mutexes.S b/ports/cortex_a55/ac6/example_build/sample_threadx/MP_Mutexes.S
new file mode 100644
index 00000000..c787c3f5
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/MP_Mutexes.S
@@ -0,0 +1,133 @@
+//
+// Armv8-A AArch64 - Basic Mutex Example
+// Includes the option (USE_LSE_ATOMIC) to use Large System Extension (LSE) atomics introduced in Armv8.1-A
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+ .global _mutex_initialize
+ .global _mutex_acquire
+ .global _mutex_release
+
+//
+// These routines implement the mutex management functions required for running
+// the Arm C library in a multi-threaded environment.
+//
+// They use a value of 0 to represent an unlocked mutex, and 1 for a locked mutex
+//
+// **********************************************************************
+//
+
+ .type _mutex_initialize, "function"
+ .cfi_startproc
+_mutex_initialize:
+
+ //
+ // mark the mutex as unlocked
+ //
+ mov w1, #0
+ str w1, [x0]
+
+ //
+ // we are running multi-threaded, so set a non-zero return
+ // value (function prototype says use 1)
+ //
+ mov w0, #1
+ ret
+ .cfi_endproc
+
+#if !defined(USE_LSE_ATOMIC)
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+
+ //
+ // send ourselves an event, so we don't stick on the wfe at the
+ // top of the loop
+ //
+ sevl
+
+ //
+ // wait until the mutex is available
+ //
+loop:
+ wfe
+ ldaxr w1, [x0]
+ cbnz w1, loop
+
+ //
+ // mutex is (at least, it was) available - try to claim it
+ //
+ mov w1, #1
+ stxr w2, w1, [x0]
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+
+ mov w1, #0
+ stlr w1, [x0]
+ ret
+ .cfi_endproc
+
+#else // LSE version
+
+ .type _mutex_acquire, "function"
+ .cfi_startproc
+_mutex_acquire:
+ // This uses a "ticket lock". The lock is stored as a 32-bit value:
+ // - the upper 16-bits record the thread's ticket number ("take a ticket")
+ // - the lower 16-bits record the ticket being served ("now serving")
+
+ // atomically load then increment the thread's ticket number ("take a ticket")
+ mov w3, #(1 << 16)
+ ldadda w3, w1, [x0]
+
+ // is the ticket now being served?
+ eor w2, w1, w1, ror #16
+ cbz w2, loop_exit
+
+ // no, so wait for the ticket to be served
+
+ // send a local event to avoid missing an unlock before the exclusive load
+ sevl
+
+loop:
+ wfe
+ ldaxrh w3, [x0]
+ eor w2, w3, w1, lsr #16
+ cbnz w2, loop
+
+ //
+ // OK, we have the mutex, our work is done here
+ //
+loop_exit:
+ ret
+ .cfi_endproc
+
+
+ .type _mutex_release, "function"
+ .cfi_startproc
+_mutex_release:
+ mov w1, #1
+ staddlh w1, [x0]
+ ret
+ .cfi_endproc
+#endif
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/MP_Mutexes.h b/ports/cortex_a55/ac6/example_build/sample_threadx/MP_Mutexes.h
new file mode 100644
index 00000000..ec1a1d28
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/MP_Mutexes.h
@@ -0,0 +1,66 @@
+/*
+ * Armv8-A AArch64 - Basic Mutex Example
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef MP_MUTEX_H
+#define MP_MUTEX_H
+
+/*
+ * The Arm C library calls-out to these functions to manage multithreading.
+ * They can also be called by user application code.
+ *
+ * Mutex type is specified by the Arm C library
+ *
+ * Declare function prototypes for libc mutex routines
+ */
+typedef signed int *mutex;
+
+/*
+ * int _mutex_initialize(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ * 0 - application is non-threaded
+ * 1 - application is threaded
+ * The C library uses the return result to indicate whether it is being used in a multithreaded environment.
+ */
+int _mutex_initialize(mutex *m);
+
+/*
+ * void _mutex_acquire(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * Routine does not return until the mutex has been claimed. A load-acquire
+ * is used to guarantee that the mutex claim is properly ordered with
+ * respect to any accesses to the resource protected by the mutex
+ */
+void _mutex_acquire(mutex *m);
+
+/*
+ * void _mutex_release(mutex *m)
+ *
+ * Inputs
+ * mutex *m - pointer to the 32-bit word associated with the mutex
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * A store-release is used to guarantee that the mutex release is properly
+ * ordered with respect any accesses to the resource protected by the mutex
+ */
+void _mutex_release(mutex *m);
+
+#endif
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/PPM_AEM.h b/ports/cortex_a55/ac6/example_build/sample_threadx/PPM_AEM.h
new file mode 100644
index 00000000..f7501eeb
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/PPM_AEM.h
@@ -0,0 +1,66 @@
+//
+// Private Peripheral Map for the v8 Architecture Envelope Model
+//
+// Copyright (c) 2012-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef PPM_AEM_H
+#define PPM_AEM_H
+
+//
+// Distributor layout
+//
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_IGROUP 0x0080
+#define GICD_ISENABLE 0x0100
+#define GICD_ICENABLE 0x0180
+#define GICD_ISPEND 0x0200
+#define GICD_ICPEND 0x0280
+#define GICD_ISACTIVE 0x0300
+#define GICD_ICACTIVE 0x0380
+#define GICD_IPRIORITY 0x0400
+#define GICD_ITARGETS 0x0800
+#define GICD_ICFG 0x0c00
+#define GICD_PPISR 0x0d00
+#define GICD_SPISR 0x0d04
+#define GICD_SGIR 0x0f00
+#define GICD_CPENDSGI 0x0f10
+#define GICD_SPENDSGI 0x0f20
+#define GICD_PIDR4 0x0fd0
+#define GICD_PIDR5 0x0fd4
+#define GICD_PIDR6 0x0fd8
+#define GICD_PIDR7 0x0fdc
+#define GICD_PIDR0 0x0fe0
+#define GICD_PIDR1 0x0fe4
+#define GICD_PIDR2 0x0fe8
+#define GICD_PIDR3 0x0fec
+#define GICD_CIDR0 0x0ff0
+#define GICD_CIDR1 0x0ff4
+#define GICD_CIDR2 0x0ff8
+#define GICD_CIDR3 0x0ffc
+
+//
+// CPU Interface layout
+//
+#define GICC_CTLR 0x0000
+#define GICC_PMR 0x0004
+#define GICC_BPR 0x0008
+#define GICC_IAR 0x000c
+#define GICC_EOIR 0x0010
+#define GICC_RPR 0x0014
+#define GICC_HPPIR 0x0018
+#define GICC_ABPR 0x001c
+#define GICC_AIAR 0x0020
+#define GICC_AEOIR 0x0024
+#define GICC_AHPPIR 0x0028
+#define GICC_APR0 0x00d0
+#define GICC_NSAPR0 0x00e0
+#define GICC_IIDR 0x00fc
+#define GICC_DIR 0x1000
+
+#endif // PPM_AEM_H
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..17cceb01
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,393 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+
+extern void init_timer(void); /* in timer_interrupts.c */
+
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define byte pool memory. */
+
+UCHAR byte_pool_memory[DEMO_BYTE_POOL_SIZE];
+
+
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_TIMER timer_0;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+UCHAR event_buffer[65536];
+
+#endif
+
+
+/* Define main entry point. */
+
+int main(void)
+{
+
+ /* Initialize timer. */
+ init_timer();
+
+ /* Enter ThreadX. */
+ tx_kernel_enter();
+
+ return 0;
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+ tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
+#endif
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", byte_pool_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..c5238ecd
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,325 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.scat
new file mode 100644
index 00000000..e5783c7c
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -0,0 +1,103 @@
+;********************************************************
+; Scatter file for Armv8-A Startup code on FVP Base model
+; Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+; Use, modification and redistribution of this file is subject to your possession of a
+; valid End User License Agreement for the Arm Product of which these examples are part of
+; and your compliance with all applicable terms and conditions of such licence agreement.
+;********************************************************
+
+LOAD 0x80000000
+{
+ EXEC +0
+ {
+ startup.o (StartUp, +FIRST)
+ * (+RO, +RW, +ZI)
+ }
+
+ ;
+ ; App stacks for all CPUs
+ ; All stacks and heap are aligned to a cache-line boundary
+ ;
+ ARM_LIB_STACK +0 ALIGN 64 EMPTY 8 * 0x4000 {}
+
+ ;
+ ; Separate heap - import symbol __use_two_region_memory
+ ; in source code for this to work correctly
+ ;
+ ARM_LIB_HEAP +0 ALIGN 64 EMPTY 0xA0000 {}
+
+ ;
+ ; Handler stacks for all CPUs
+ ; All stacks and heap are aligned to a cache-line boundary
+ ;
+ HANDLER_STACK +0 ALIGN 64 EMPTY 4 * 0x4000 {}
+
+ ;
+ ; Stacks for EL3
+ ;
+ EL3_STACKS +0 ALIGN 64 EMPTY 8 * 0x1000 {}
+ ;
+ ; Strictly speaking, the L1 tables do not need to
+ ; be so strongly aligned, but no matter
+ ;
+ TTB0_L1 +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ ;
+ ; Various sets of L2 tables
+ ;
+ ; Alignment is 4KB, since the code uses a 4K page
+ ; granularity - larger granularities would require
+ ; correspondingly stricter alignment
+ ;
+ TTB0_L2_RAM +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ TTB0_L2_PRIVATE +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ TTB0_L2_PERIPH +0 ALIGN 4096 EMPTY 0x1000 {}
+
+ ;
+ ; The startup code uses the end of this region to calculate
+ ; the top of memory - do not place any RAM regions after it
+ ;
+ TOP_OF_RAM +0 EMPTY 4 {}
+
+ ;
+ ; CS3 Peripherals is a 64MB region from 0x1c000000
+ ; that includes the following:
+ ; System Registers at 0x1C010000
+ ; UART0 (PL011) at 0x1C090000
+ ; Color LCD Controller (PL111) at 0x1C1F0000
+ ; plus a number of others.
+ ; CS3_PERIPHERALS is used by the startup code for page-table generation
+ ; This region is not truly empty, but we have no
+ ; predefined objects that live within it
+ ;
+ CS3_PERIPHERALS 0x1c000000 EMPTY 0x90000 {}
+
+ ;
+ ; Place the UART peripheral registers data structure
+ ; This is only really needed if USE_SERIAL_PORT is defined, but
+ ; the linker will remove unused sections if not needed
+; PL011 0x1c090000 UNINIT 0x1000
+; {
+; uart.o (+ZI)
+; }
+ ; Note that some other CS3_PERIPHERALS follow this
+
+ ;
+ ; GICv3 distributor
+ ;
+ GICD 0x2f000000 UNINIT 0x8000
+ {
+ GICv3_gicd.o (.bss.distributor)
+ }
+
+ ;
+ ; GICv3 redistributors
+ ; 128KB for each redistributor in the system
+ ;
+ GICR 0x2f100000 UNINIT 0x80000
+ {
+ GICv3_gicr.o (.bss.redistributor)
+ }
+}
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/sp804_timer.c b/ports/cortex_a55/ac6/example_build/sample_threadx/sp804_timer.c
new file mode 100644
index 00000000..c2ce6faa
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/sp804_timer.c
@@ -0,0 +1,122 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "sp804_timer.h"
+
+#define TIMER_SP804_CTRL_TIMEREN (1 << 7)
+#define TIMER_SP804_CTRL_TIMERMODE (1 << 6) // Bit 6:
+#define TIMER_SP804_CTRL_INTENABLE (1 << 5)
+#define TIMER_SP804_CTRL_TIMERSIZE (1 << 1) // Bit 1: 0=16-bit, 1=32-bit
+#define TIMER_SP804_CTRL_ONESHOT (1 << 0) // Bit 0: 0=wrapping, 1=one-shot
+
+#define TIMER_SP804_CTRL_PRESCALE_1 (0 << 2) // clk/1
+#define TIMER_SP804_CTRL_PRESCALE_4 (1 << 2) // clk/4
+#define TIMER_SP804_CTRL_PRESCALE_8 (2 << 2) // clk/8
+
+struct sp804_timer
+{
+ volatile uint32_t Time1Load; // +0x00
+ const volatile uint32_t Time1Value; // +0x04 - RO
+ volatile uint32_t Timer1Control; // +0x08
+ volatile uint32_t Timer1IntClr; // +0x0C - WO
+ const volatile uint32_t Timer1RIS; // +0x10 - RO
+ const volatile uint32_t Timer1MIS; // +0x14 - RO
+ volatile uint32_t Timer1BGLoad; // +0x18
+
+ volatile uint32_t Time2Load; // +0x20
+ volatile uint32_t Time2Value; // +0x24
+ volatile uint8_t Timer2Control; // +0x28
+ volatile uint32_t Timer2IntClr; // +0x2C - WO
+ const volatile uint32_t Timer2RIS; // +0x30 - RO
+ const volatile uint32_t Timer2MIS; // +0x34 - RO
+ volatile uint32_t Timer2BGLoad; // +0x38
+
+ // Not including ID registers
+
+};
+
+// Instance of the dual timer, will be placed using the scatter file
+struct sp804_timer* dual_timer;
+
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address)
+{
+ dual_timer = (struct sp804_timer*)address;
+ return;
+}
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt)
+{
+ uint32_t tmp = 0;
+
+ dual_timer->Time1Load = load_value;
+
+ // Fixed setting: 32-bit, no prescaling
+ tmp = TIMER_SP804_CTRL_TIMERSIZE | TIMER_SP804_CTRL_PRESCALE_1 | TIMER_SP804_CTRL_TIMERMODE;
+
+ // Settings from parameters: interrupt generation & reload
+ tmp = tmp | interrupt | auto_reload;
+
+ // Write control register
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Starts the timer
+void startTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp | TIMER_SP804_CTRL_TIMEREN; // Set TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Stops the timer
+void stopTimer(void)
+{
+ uint32_t tmp;
+
+ tmp = dual_timer->Timer1Control;
+ tmp = tmp & ~TIMER_SP804_CTRL_TIMEREN; // Clear TimerEn (bit 7)
+ dual_timer->Timer1Control = tmp;
+
+ return;
+}
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void)
+{
+ return dual_timer->Time1Value;
+}
+
+
+void clearTimerIrq(void)
+{
+ // A write to this register, of any value, clears the interrupt
+ dual_timer->Timer1IntClr = 1;
+}
+
+
+// ------------------------------------------------------------
+// End of sp804_timer.c
+// ------------------------------------------------------------
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/sp804_timer.h b/ports/cortex_a55/ac6/example_build/sample_threadx/sp804_timer.h
new file mode 100644
index 00000000..4d423904
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/sp804_timer.h
@@ -0,0 +1,53 @@
+// ------------------------------------------------------------
+// SP804 Dual Timer
+// Header Filer
+//
+// Copyright (c) 2009-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#ifndef _SP804_TIMER_
+#define _SP804_TIMER_
+
+#include
+
+// Set base address of timer
+// address - virtual address of SP804 timer
+void setTimerBaseAddress(uint64_t address);
+
+
+// Sets up the private timer
+// load_value - Initial value of timer
+// auto_reload - Periodic (SP804_AUTORELOAD) or one shot (SP804_SINGLESHOT)
+// interrupt - Whether to generate an interrupt
+
+#define SP804_AUTORELOAD (0)
+#define SP804_SINGLESHOT (1)
+#define SP804_GENERATE_IRQ (1 << 5)
+#define SP804_NO_IRQ (0)
+
+void initTimer(uint32_t load_value, uint32_t auto_reload, uint32_t interrupt);
+
+
+// Starts the timer
+void startTimer(void);
+
+
+// Stops the timer
+void stopTimer(void);
+
+
+// Returns the current timer count
+uint32_t getTimerCount(void);
+
+
+// Clears the timer interrupt
+void clearTimerIrq(void);
+
+#endif
+
+// ------------------------------------------------------------
+// End of sp804_timer.h
+// ------------------------------------------------------------
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a55/ac6/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..3952a200
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/startup.S
@@ -0,0 +1,779 @@
+// ------------------------------------------------------------
+// Armv8-A MPCore EL3 AArch64 Startup Code
+//
+// Basic Vectors, MMU, caches and GICv3 initialization
+//
+// Exits in EL1 AArch64
+//
+// Copyright (c) 2014-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_mmu.h"
+#include "v8_system.h"
+#include "GICv3_aliases.h"
+
+ .section StartUp, "ax"
+ .balign 4
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+
+ .global InvalidateUDCaches
+ .global ZeroBlock
+
+ .global SetPrivateIntSecurityBlock
+ .global SetSPISecurityAll
+ .global SetPrivateIntPriority
+
+ .global GetGICR
+ .global WakeupGICR
+ .global SyncAREinGICD
+ .global EnableGICD
+ .global EnablePrivateInt
+ .global GetPrivateIntPending
+ .global ClearPrivateIntPending
+
+ .global __main
+ //.global MainApp
+
+ .global Image$$EXEC$$RO$$Base
+ .global Image$$TTB0_L1$$ZI$$Base
+ .global Image$$TTB0_L2_RAM$$ZI$$Base
+ .global Image$$TTB0_L2_PERIPH$$ZI$$Base
+ .global Image$$TOP_OF_RAM$$ZI$$Base
+ .global Image$$GICD$$ZI$$Base
+ .global Image$$ARM_LIB_STACK$$ZI$$Limit
+ .global Image$$EL3_STACKS$$ZI$$Limit
+ .global Image$$CS3_PERIPHERALS$$ZI$$Base
+ // use separate stack and heap, as anticipated by scatter.scat
+ .global __use_two_region_memory
+
+
+// ------------------------------------------------------------
+
+ .global start64
+ .type start64, "function"
+start64:
+
+ //
+ // program the VBARs
+ //
+ ldr x1, =el1_vectors
+ msr VBAR_EL1, x1
+
+ ldr x1, =el2_vectors
+ msr VBAR_EL2, x1
+
+ ldr x1, =el3_vectors
+ msr VBAR_EL3, x1
+
+
+ // GIC-500 comes out of reset in GICv2 compatibility mode - first set
+ // system register enables for all relevant exception levels, and
+ // select GICv3 operating mode
+ //
+ msr SCR_EL3, xzr // Ensure NS bit is initially clear, so secure copy of ICC_SRE_EL1 can be configured
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL3, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Secure copy of ICC_SRE_EL1
+
+ //
+ // set lower exception levels as non-secure, with no access
+ // back to EL2 or EL3, and are AArch64 capable
+ //
+ mov x3, #(SCR_EL3_RW | \
+ SCR_EL3_SMD | \
+ SCR_EL3_NS) // Set NS bit, to access Non-secure registers
+ msr SCR_EL3, x3
+ isb
+
+ mov x0, #15
+ msr ICC_SRE_EL2, x0
+ isb
+ msr ICC_SRE_EL1, x0 // Non-secure copy of ICC_SRE_EL1
+
+
+ //
+ // no traps or VM modifications from the Hypervisor, EL1 is AArch64
+ //
+ mov x2, #HCR_EL2_RW
+ msr HCR_EL2, x2
+
+ //
+ // VMID is still significant, even when virtualisation is not
+ // being used, so ensure VTTBR_EL2 is properly initialised
+ //
+ msr VTTBR_EL2, xzr
+
+ //
+ // VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by Non-secure EL1 reads of MPIDR_EL1.
+ // VPIDR_EL2 holds the value of the Virtualization Processor ID. This is the value returned by Non-secure EL1 reads of MIDR_EL1.
+ // Both of these registers are architecturally UNKNOWN at reset, and so they must be set to the correct value
+ // (even if EL2/virtualization is not being used), otherwise non-secure EL1 reads of MPIDR_EL1/MIDR_EL1 will return garbage values.
+ // This guarantees that any future reads of MPIDR_EL1 and MIDR_EL1 from Non-secure EL1 will return the correct value.
+ //
+ mrs x0, MPIDR_EL1
+ msr VMPIDR_EL2, x0
+ mrs x0, MIDR_EL1
+ msr VPIDR_EL2, x0
+
+ // extract the core number from MPIDR_EL1 and store it in
+ // x19 (defined by the AAPCS as callee-saved), so we can re-use
+ // the number later
+ //
+ bl GetCPUID
+ mov x19, x0
+
+ //
+ // neither EL3 nor EL2 trap floating point or accesses to CPACR
+ //
+ msr CPTR_EL3, xzr
+ msr CPTR_EL2, xzr
+
+ //
+ // SCTLR_ELx may come out of reset with UNKNOWN values so we will
+ // set the fields to 0 except, possibly, the endianess field(s).
+ // Note that setting SCTLR_EL2 or the EL0 related fields of SCTLR_EL1
+ // is not strictly needed, since we're never in EL2 or EL0
+ //
+#ifdef __ARM_BIG_ENDIAN
+ mov x0, #(SCTLR_ELx_EE | SCTLR_EL1_E0E)
+#else
+ mov x0, #0
+#endif
+ msr SCTLR_EL3, x0
+ msr SCTLR_EL2, x0
+ msr SCTLR_EL1, x0
+
+#ifdef CORTEXA
+ //
+ // Configure ACTLR_EL[23]
+ // ----------------------
+ //
+ // These bits are IMPLEMENTATION DEFINED, so are different for
+ // different processors
+ //
+ // For Cortex-A57, the controls we set are:
+ //
+ // Enable lower level access to CPUACTLR_EL1
+ // Enable lower level access to CPUECTLR_EL1
+ // Enable lower level access to L2CTLR_EL1
+ // Enable lower level access to L2ECTLR_EL1
+ // Enable lower level access to L2ACTLR_EL1
+ //
+ mov x0, #((1 << 0) | \
+ (1 << 1) | \
+ (1 << 4) | \
+ (1 << 5) | \
+ (1 << 6))
+
+ msr ACTLR_EL3, x0
+ msr ACTLR_EL2, x0
+
+ //
+ // configure CPUECTLR_EL1
+ //
+ // These bits are IMP DEF, so need to different for different
+ // processors
+ //
+ // SMPEN - bit 6 - Enables the processor to receive cache
+ // and TLB maintenance operations
+ //
+ // Note: For Cortex-A57/53 SMPEN should be set before enabling
+ // the caches and MMU, or performing any cache and TLB
+ // maintenance operations.
+ //
+ // This register has a defined reset value, so we use a
+ // read-modify-write sequence to set SMPEN
+ //
+ mrs x0, S3_1_c15_c2_1 // Read EL1 CPU Extended Control Register
+ orr x0, x0, #(1 << 6) // Set the SMPEN bit
+ msr S3_1_c15_c2_1, x0 // Write EL1 CPU Extended Control Register
+
+ isb
+#endif
+
+ //
+ // That's the last of the control settings for now
+ //
+ // Note: no ISB after all these changes, as registers won't be
+ // accessed until after an exception return, which is itself a
+ // context synchronisation event
+ //
+
+ //
+ // Setup some EL3 stack space, ready for calling some subroutines, below.
+ //
+ // Stack space allocation is CPU-specific, so use CPU
+ // number already held in x19
+ //
+ // 2^12 bytes per CPU for the EL3 stacks
+ //
+ ldr x0, =Image$$EL3_STACKS$$ZI$$Limit
+ sub x0, x0, x19, lsl #12
+ mov sp, x0
+
+ //
+ // we need to configure the GIC while still in secure mode, specifically
+ // all PPIs and SPIs have to be programmed as Group1 interrupts
+ //
+
+ //
+ // Before the GIC can be reliably programmed, we need to
+ // enable Affinity Routing, as this affects where the configuration
+ // registers are (with Affinity Routing enabled, some registers are
+ // in the Redistributor, whereas those same registers are in the
+ // Distributor with Affinity Routing disabled (i.e. when in GICv2
+ // compatibility mode).
+ //
+ mov x0, #(1 << 4) | (1 << 5) // gicdctlr_ARE_S | gicdctlr_ARE_NS
+ mov x1, x19
+ bl SyncAREinGICD
+
+ //
+ // The Redistributor comes out of reset assuming the processor is
+ // asleep - correct that assumption
+ //
+ bl GetAffinity
+ bl GetGICR
+ mov w20, w0 // Keep a copy for later
+ bl WakeupGICR
+
+ //
+ // Now we're ready to set security and other initialisations
+ //
+ // This is a per-CPU configuration for these interrupts
+ //
+ // for the first cluster, CPU number is the redistributor index
+ //
+ mov w0, w20
+ mov w1, #1 // gicigroupr_G1NS
+ bl SetPrivateIntSecurityBlock
+
+ //
+ // While we're in the Secure World, set the priority mask low enough
+ // for it to be writable in the Non-Secure World
+ //
+ //mov x0, #16 << 3 // 5 bits of priority in the Secure world
+ mov x0, #0xFF // for Non-Secure interrupts
+ msr ICC_PMR_EL1, x0
+
+ //
+ // there's more GIC setup to do, but only for the primary CPU
+ //
+ cbnz x19, drop_to_el1
+
+ //
+ // There's more to do to the GIC - call the utility routine to set
+ // all SPIs to Group1
+ //
+ mov w0, #1 // gicigroupr_G1NS
+ bl SetSPISecurityAll
+
+ //
+ // Set up EL1 entry point and "dummy" exception return information,
+ // then perform exception return to enter EL1
+ //
+ .global drop_to_el1
+drop_to_el1:
+ adr x1, el1_entry_aarch64
+ msr ELR_EL3, x1
+ mov x1, #(AARCH64_SPSR_EL1h | \
+ AARCH64_SPSR_F | \
+ AARCH64_SPSR_I | \
+ AARCH64_SPSR_A)
+ msr SPSR_EL3, x1
+ eret
+
+
+
+// ------------------------------------------------------------
+// EL1 - Common start-up code
+// ------------------------------------------------------------
+
+ .global el1_entry_aarch64
+ .type el1_entry_aarch64, "function"
+el1_entry_aarch64:
+
+ //
+ // Now we're in EL1, setup the application stack
+ // the scatter file allocates 2^14 bytes per app stack
+ //
+ ldr x0, =Image$$HANDLER_STACK$$ZI$$Limit
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+ MSR SPSel, #0
+ ISB
+ ldr x0, =Image$$ARM_LIB_STACK$$ZI$$Limit
+ sub x0, x0, x19, lsl #14
+ mov sp, x0
+
+ //
+ // Enable floating point
+ //
+ mov x0, #CPACR_EL1_FPEN
+ msr CPACR_EL1, x0
+
+ //
+ // Invalidate caches and TLBs for all stage 1
+ // translations used at EL1
+ //
+ // Cortex-A processors automatically invalidate their caches on reset
+ // (unless suppressed with the DBGL1RSTDISABLE or L2RSTDISABLE pins).
+ // It is therefore not necessary for software to invalidate the caches
+ // on startup, however, this is done here in case of a warm reset.
+ bl InvalidateUDCaches
+ tlbi VMALLE1
+
+
+ //
+ // Set TTBR0 Base address
+ //
+ // The CPUs share one set of translation tables that are
+ // generated by CPU0 at run-time
+ //
+ // TTBR1_EL1 is not used in this example
+ //
+ ldr x1, =Image$$TTB0_L1$$ZI$$Base
+ msr TTBR0_EL1, x1
+
+
+ //
+ // Set up memory attributes
+ //
+ // These equate to:
+ //
+ // 0 -> 0b01000100 = 0x00000044 = Normal, Inner/Outer Non-Cacheable
+ // 1 -> 0b11111111 = 0x0000ff00 = Normal, Inner/Outer WriteBack Read/Write Allocate
+ // 2 -> 0b00000100 = 0x00040000 = Device-nGnRE
+ //
+ mov x1, #0xff44
+ movk x1, #4, LSL #16 // equiv to: movk x1, #0x0000000000040000
+ msr MAIR_EL1, x1
+
+
+ //
+ // Set up TCR_EL1
+ //
+ // We're using only TTBR0 (EPD1 = 1), and the page table entries:
+ // - are using an 8-bit ASID from TTBR0
+ // - have a 4K granularity (TG0 = 0b00)
+ // - are outer-shareable (SH0 = 0b10)
+ // - are using Inner & Outer WBWA Normal memory ([IO]RGN0 = 0b01)
+ // - map
+ // + 32 bits of VA space (T0SZ = 0x20)
+ // + into a 32-bit PA space (IPS = 0b000)
+ //
+ // 36 32 28 24 20 16 12 8 4 0
+ // -----+----+----+----+----+----+----+----+----+----+
+ // | | |OOII| | | |OOII| | |
+ // TT | | |RRRR|E T | T| |RRRR|E T | T|
+ // BB | I I|TTSS|GGGG|P 1 | 1|TTSS|GGGG|P 0 | 0|
+ // IIA| P P|GGHH|NNNN|DAS | S|GGHH|NNNN|D S | S|
+ // 10S| S-S|1111|1111|11Z-|---Z|0000|0000|0 Z-|---Z|
+ //
+ // 000 0000 0000 0000 1000 0000 0010 0101 0010 0000
+ //
+ // 0x 8 0 2 5 2 0
+ //
+ // Note: the ISB is needed to ensure the changes to system
+ // context are before the write of SCTLR_EL1.M to enable
+ // the MMU. It is likely on a "real" implementation that
+ // this setup would work without an ISB, due to the
+ // amount of code that gets executed before enabling the
+ // MMU, but that would not be architecturally correct.
+ //
+ ldr x1, =0x0000000000802520
+ msr TCR_EL1, x1
+ isb
+
+ //
+ // x19 already contains the CPU number, so branch to secondary
+ // code if we're not on CPU0
+ //
+ cbnz x19, el1_secondary
+
+ //
+ // Fall through to primary code
+ //
+
+
+//
+// ------------------------------------------------------------
+//
+// EL1 - primary CPU init code
+//
+// This code is run on CPU0, while the other CPUs are in the
+// holding pen
+//
+
+ .global el1_primary
+ .type el1_primary, "function"
+el1_primary:
+
+ //
+ // Turn on the banked GIC distributor enable,
+ // ready for individual CPU enables later
+ //
+ mov w0, #(1 << 1) // gicdctlr_EnableGrp1A
+ bl EnableGICD
+
+ //
+ // Generate TTBR0 L1
+ //
+ // at 4KB granularity, 32-bit VA space, table lookup starts at
+ // L1, with 1GB regions
+ //
+ // we are going to create entries pointing to L2 tables for a
+ // couple of these 1GB regions, the first of which is the
+ // RAM on the VE board model - get the table addresses and
+ // start by emptying out the L1 page tables (4 entries at L1
+ // for a 4K granularity)
+ //
+ // x21 = address of L1 tables
+ //
+ ldr x21, =Image$$TTB0_L1$$ZI$$Base
+ mov x0, x21
+ mov x1, #(4 << 3)
+ bl ZeroBlock
+
+ //
+ // time to start mapping the RAM regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // x22 = address of L2 tables, needs to be remembered in case
+ // we want to re-use the tables for mapping peripherals
+ //
+ ldr x22, =Image$$TTB0_L2_RAM$$ZI$$Base
+ mov x1, #(512 << 3)
+ mov x0, x22
+ bl ZeroBlock
+
+ //
+ // Get the start address of RAM (the EXEC region) into x4
+ // and calculate the offset into the L1 table (1GB per region,
+ // max 4GB)
+ //
+ // x23 = L1 table offset, saved for later comparison against
+ // peripheral offset
+ //
+ ldr x4, =Image$$EXEC$$RO$$Base
+ ubfx x23, x4, #30, #2
+
+ orr x1, x22, #TT_S1_ATTR_PAGE
+ str x1, [x21, x23, lsl #3]
+
+ //
+ // we've already used the RAM start address in x4 - we now need
+ // to get this in terms of an offset into the L2 page tables,
+ // where each entry covers 2MB
+ //
+ ubfx x2, x4, #21, #9
+
+ //
+ // TOP_OF_RAM in the scatter file marks the end of the
+ // Execute region in RAM: convert the end of this region to an
+ // offset too, being careful to round up, then calculate the
+ // number of entries to write
+ //
+ ldr x5, =Image$$TOP_OF_RAM$$ZI$$Base
+ sub x3, x5, #1
+ ubfx x3, x3, #21, #9
+ add x3, x3, #1
+ sub x3, x3, x2
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as Shared, Normal WBWA (MAIR[1]) with a flat
+ // VA->PA translation
+ //
+ bic x4, x4, #((1 << 21) - 1)
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (1 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_SH_INNER | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // factor the offset into the page table address and then write
+ // the entries
+ //
+ add x0, x22, x2, lsl #3
+
+loop1:
+ subs x3, x3, #1
+ str x1, [x0], #8
+ add x1, x1, #0x200, LSL #12 // equiv to add x1, x1, #(1 << 21) // 2MB per entry
+ bne loop1
+
+
+ //
+ // now mapping the Peripheral regions - clear out the
+ // L2 tables and point to them from the L1 tables
+ //
+ // The assumption here is that all peripherals live within
+ // a common 1GB region (i.e. that there's a single set of
+ // L2 pages for all the peripherals). We only use a UART
+ // and the GIC in this example, so the assumption is sound
+ //
+ // x24 = address of L2 peripheral tables
+ //
+ ldr x24, =Image$$TTB0_L2_PERIPH$$ZI$$Base
+
+ //
+ // get the GICD address into x4 and calculate
+ // the offset into the L1 table
+ //
+ // x25 = L1 table offset
+ //
+ ldr x4, =Image$$GICD$$ZI$$Base
+ ubfx x25, x4, #30, #2
+
+ //
+ // here's the tricky bit: it's possible that the peripherals are
+ // in the same 1GB region as the RAM, in which case we don't need
+ // to prime a separate set of L2 page tables, nor add them to the
+ // L1 tables
+ //
+ // if we're going to re-use the TTB0_L2_RAM tables, get their
+ // address into x24, which is used later on to write the PTEs
+ //
+ cmp x25, x23
+ csel x24, x22, x24, EQ
+ b.eq nol2setup
+
+ //
+ // Peripherals are in a separate 1GB region, and so have their own
+ // set of L2 tables - clean out the tables and add them to the L1
+ // table
+ //
+ mov x0, x24
+ mov x1, #512 << 3
+ bl ZeroBlock
+
+ orr x1, x24, #TT_S1_ATTR_PAGE
+ str x1, [x21, x25, lsl #3]
+
+ //
+ // there's only going to be a single 2MB region for GICD (in
+ // x4) - get this in terms of an offset into the L2 page tables
+ //
+ // with larger systems, it is possible that the GIC redistributor
+ // registers require extra 2MB pages, in which case extra code
+ // would be required here
+ //
+nol2setup:
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry for this, so no loop as we have for RAM, above
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // we have CS3_PERIPHERALS that include the UART controller
+ //
+ // Again, the code is making assumptions - this time that the CS3_PERIPHERALS
+ // region uses the same 1GB portion of the address space as the GICD,
+ // and thus shares the same set of L2 page tables
+ //
+ // Get CS3_PERIPHERALS address into x4 and calculate the offset into the
+ // L2 tables
+ //
+ ldr x4, =Image$$CS3_PERIPHERALS$$ZI$$Base
+ ubfx x2, x4, #21, #9
+
+ //
+ // set x1 to the required page table attributes, then orr
+ // in the start address (modulo 2MB)
+ //
+ // L2 tables in our configuration cover 2MB per entry - map
+ // memory as NS Device-nGnRE (MAIR[2]) with a flat VA->PA
+ // translation
+ //
+ bic x4, x4, #((1 << 21) - 1) // start address mod 2MB
+ ldr x1, =(TT_S1_ATTR_BLOCK | \
+ (2 << TT_S1_ATTR_MATTR_LSB) | \
+ TT_S1_ATTR_NS | \
+ TT_S1_ATTR_AP_RW_PL1 | \
+ TT_S1_ATTR_AF | \
+ TT_S1_ATTR_nG)
+ orr x1, x1, x4
+
+ //
+ // only a single L2 entry again - write it
+ //
+ str x1, [x24, x2, lsl #3]
+
+ //
+ // issue a barrier to ensure all table entry writes are complete
+ //
+ dsb ish
+
+ //
+ // Enable the MMU. Caches will be enabled later, after scatterloading.
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to C library init code
+ //
+ b __main
+
+
+// ------------------------------------------------------------
+
+// AArch64 Arm C library startup add-in:
+
+// The Arm Architecture Reference Manual for Armv8-A states:
+//
+// Instruction accesses to Non-cacheable Normal memory can be held in instruction caches.
+// Correspondingly, the sequence for ensuring that modifications to instructions are available
+// for execution must include invalidation of the modified locations from the instruction cache,
+// even if the instructions are held in Normal Non-cacheable memory.
+// This includes cases where the instruction cache is disabled.
+//
+// To invalidate the AArch64 instruction cache after scatter-loading and before initialization of the stack and heap,
+// it is necessary for the user to:
+//
+// * Implement instruction cache invalidation code in _platform_pre_stackheap_init.
+// * Ensure all code on the path from the program entry up to and including _platform_pre_stackheap_init is located in a root region.
+//
+// In this example, this function is only called once, by the primary core
+
+ .global _platform_pre_stackheap_init
+ .type _platform_pre_stackheap_init, "function"
+ .cfi_startproc
+_platform_pre_stackheap_init:
+ dsb ish // ensure all previous stores have completed before invalidating
+ ic ialluis // I cache invalidate all inner shareable to PoU (which includes secondary cores)
+ dsb ish // ensure completion on inner shareable domain (which includes secondary cores)
+ isb
+
+ // Scatter-loading is complete, so enable the caches here, so that the C-library's mutex initialization later will work
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ msr SCTLR_EL1, x1
+ isb
+
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+// EL1 - secondary CPU init code
+//
+// This code is run on CPUs 1, 2, 3 etc....
+// ------------------------------------------------------------
+
+ .global el1_secondary
+ .type el1_secondary, "function"
+el1_secondary:
+
+ //
+ // the primary CPU is going to use SGI 15 as a wakeup event
+ // to let us know when it is OK to proceed, so prepare for
+ // receiving that interrupt
+ //
+ // NS interrupt priorities run from 0 to 15, with 15 being
+ // too low a priority to ever raise an interrupt, so let's
+ // use 14
+ //
+ mov w0, w20
+ mov w1, #15
+ mov w2, #14 << 4 // we're in NS world, so 4 bits of priority,
+ // 8-bit field, - 4 = 4-bit shift
+ bl SetPrivateIntPriority
+
+ mov w0, w20
+ mov w1, #15
+ bl EnablePrivateInt
+
+ //
+ // set priority mask as low as possible; although,being in the
+ // NS World, we can't set bit[7] of the priority, we still
+ // write all 8-bits of priority to an ICC register
+ //
+ mov x0, #31 << 3
+ msr ICC_PMR_EL1, x0
+
+ //
+ // set global enable and wait for our interrupt to arrive
+ //
+ mov x0, #1
+ msr ICC_IGRPEN1_EL1, x0
+ isb
+
+loop_wfi:
+ dsb SY // Clear all pending data accesses
+ wfi // Go to sleep
+
+ //
+ // something woke us from our wait, was it the required interrupt?
+ //
+ mov w0, w20
+ mov w1, #15
+ bl GetPrivateIntPending
+ cbz w0, loop_wfi
+
+ //
+ // it was - there's no need to actually take the interrupt,
+ // so just clear it
+ //
+ mov w0, w20
+ mov w1, #15
+ bl ClearPrivateIntPending
+
+ //
+ // Enable the MMU and caches
+ //
+ mrs x1, SCTLR_EL1
+ orr x1, x1, #SCTLR_ELx_M
+ orr x1, x1, #SCTLR_ELx_C
+ orr x1, x1, #SCTLR_ELx_I
+ bic x1, x1, #SCTLR_ELx_A // Disable alignment fault checking. To enable, change bic to orr
+ msr SCTLR_EL1, x1
+ isb
+
+ //
+ // Branch to thread start
+ //
+ //B MainApp
+ b __main
+
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/timer_interrupts.c b/ports/cortex_a55/ac6/example_build/sample_threadx/timer_interrupts.c
new file mode 100644
index 00000000..8f522217
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/timer_interrupts.c
@@ -0,0 +1,152 @@
+/* Bare-metal example for Armv8-A FVP Base model */
+
+/* Timer and interrupts */
+
+/* Copyright (c) 2016-2018 Arm Limited (or its affiliates). All rights reserved. */
+/* Use, modification and redistribution of this file is subject to your possession of a */
+/* valid End User License Agreement for the Arm Product of which these examples are part of */
+/* and your compliance with all applicable terms and conditions of such licence agreement. */
+
+#include
+
+#include "GICv3.h"
+#include "GICv3_gicc.h"
+#include "sp804_timer.h"
+
+void _tx_timer_interrupt(void);
+
+// LED Base address
+#define LED_BASE (volatile unsigned int *)0x1C010008
+
+
+void nudge_leds(void) // Move LEDs along
+{
+ static int state = 1;
+ static int value = 1;
+
+ if (state)
+ {
+ int max = (1 << 7);
+ value <<= 1;
+ if (value == max)
+ state = 0;
+ }
+ else
+ {
+ value >>= 1;
+ if (value == 1)
+ state = 1;
+ }
+
+ *LED_BASE = value; // Update LEDs hardware
+}
+
+
+// Initialize Timer 0 and Interrupt Controller
+void init_timer(void)
+{
+ // Enable interrupts
+ __asm("MSR DAIFClr, #0xF");
+ setICC_IGRPEN1_EL1(igrpEnable);
+
+ // Configure the SP804 timer to generate an interrupt
+ setTimerBaseAddress(0x1C110000);
+ initTimer(0x200, SP804_AUTORELOAD, SP804_GENERATE_IRQ);
+ startTimer();
+
+ // The SP804 timer generates SPI INTID 34. Enable
+ // this ID, and route it to core 0.0.0.0 (this one!)
+ SetSPIRoute(34, 0, gicdirouter_ModeSpecific); // Route INTID 34 to 0.0.0.0 (this core)
+ SetSPIPriority(34, 0); // Set INTID 34 to priority to 0
+ ConfigureSPI(34, gicdicfgr_Level); // Set INTID 34 as level-sensitive
+ EnableSPI(34); // Enable INTID 34
+}
+
+
+// --------------------------------------------------------
+
+void irqHandler(void)
+{
+ unsigned int ID;
+
+ ID = getICC_IAR1(); // readIntAck();
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("irqHandler() - Reserved INTID %d\n\n", ID);
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("irqHandler() - External timer interrupt\n\n");
+ nudge_leds();
+ clearTimerIrq();
+
+ /* Call ThreadX timer interrupt processing. */
+ _tx_timer_interrupt();
+
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("irqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
+
+// --------------------------------------------------------
+
+// Not actually used in this example, but provided for completeness
+
+void fiqHandler(void)
+{
+ unsigned int ID;
+ unsigned int aliased = 0;
+
+ ID = getICC_IAR0(); // readIntAck();
+ //printf("fiqHandler() - Read %d from IAR0\n", ID);
+
+ // Check for reserved IDs
+ if ((1020 <= ID) && (ID <= 1023))
+ {
+ //printf("fiqHandler() - Reserved INTID %d\n\n", ID);
+ ID = getICC_IAR1(); // readAliasedIntAck();
+ //printf("fiqHandler() - Read %d from AIAR\n", ID);
+ aliased = 1;
+
+ // If still spurious then simply return
+ if ((1020 <= ID) && (ID <= 1023))
+ return;
+ }
+
+ switch(ID)
+ {
+ case 34:
+ // Dual-Timer 0 (SP804)
+ //printf("fiqHandler() - External timer interrupt\n\n");
+ clearTimerIrq();
+ break;
+
+ default:
+ // Unexpected ID value
+ //printf("fiqHandler() - Unexpected INTID %d\n\n", ID);
+ break;
+ }
+
+ // Write the End of Interrupt register to tell the GIC
+ // we've finished handling the interrupt
+ // NOTE: If the ID was read from the Aliased IAR, then
+ // the aliased EOI register must be used
+ if (aliased == 0)
+ setICC_EOIR0(ID); // writeEOI(ID);
+ else
+ setICC_EOIR1(ID); // writeAliasedEOI(ID);
+}
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/use_model_semihosting.ds b/ports/cortex_a55/ac6/example_build/sample_threadx/use_model_semihosting.ds
new file mode 100644
index 00000000..6fde52b2
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/use_model_semihosting.ds
@@ -0,0 +1 @@
+set semihosting enabled off
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/v8_aarch64.S b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_aarch64.S
new file mode 100644
index 00000000..45445a98
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_aarch64.S
@@ -0,0 +1,179 @@
+// ------------------------------------------------------------
+// Armv8-A AArch64 - Common helper functions
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+ .global EnableCachesEL1
+ .global DisableCachesEL1
+ .global InvalidateUDCaches
+ .global GetMIDR
+ .global GetMPIDR
+ .global GetAffinity
+ .global GetCPUID
+
+// ------------------------------------------------------------
+
+//
+// void EnableCachesEL1(void)
+//
+// enable Instruction and Data caches
+//
+ .type EnableCachesEL1, "function"
+ .cfi_startproc
+EnableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_ELx_I
+ orr x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+ .type DisableCachesEL1, "function"
+ .cfi_startproc
+DisableCachesEL1:
+
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_ELx_I
+ bic x0, x0, #SCTLR_ELx_C
+ msr SCTLR_EL1, x0
+
+ isb
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// void InvalidateUDCaches(void)
+//
+// Invalidate data and unified caches
+//
+ .type InvalidateUDCaches, "function"
+ .cfi_startproc
+InvalidateUDCaches:
+ // From the Armv8-A Architecture Reference Manual
+
+ dmb ish // ensure all prior inner-shareable accesses have been observed
+
+ mrs x0, CLIDR_EL1
+ and w3, w0, #0x07000000 // get 2 x level of coherence
+ lsr w3, w3, #23
+ cbz w3, finished
+ mov w10, #0 // w10 = 2 x cache level
+ mov w8, #1 // w8 = constant 0b1
+loop_level:
+ add w2, w10, w10, lsr #1 // calculate 3 x cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7
+ cmp w1, #2
+ b.lt next_level // no data or unified cache at this level
+ msr CSSELR_EL1, x10 // select this cache level
+ isb // synchronize change of csselr
+ mrs x1, CCSIDR_EL1 // read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen)-4
+ add w2, w2, #4 // w2 = log2(linelen)
+ ubfx w4, w1, #3, #10 // w4 = max way number, right aligned
+ clz w5, w4 // w5 = 32-log2(ways), bit position of way in dc operand
+ lsl w9, w4, w5 // w9 = max way number, aligned to position in dc operand
+ lsl w16, w8, w5 // w16 = amount to decrement way number per iteration
+loop_way:
+ ubfx w7, w1, #13, #15 // w7 = max set number, right aligned
+ lsl w7, w7, w2 // w7 = max set number, aligned to position in dc operand
+ lsl w17, w8, w2 // w17 = amount to decrement set number per iteration
+loop_set:
+ orr w11, w10, w9 // w11 = combine way number and cache number ...
+ orr w11, w11, w7 // ... and set number for dc operand
+ dc isw, x11 // do data cache invalidate by set and way
+ subs w7, w7, w17 // decrement set number
+ b.ge loop_set
+ subs x9, x9, x16 // decrement way number
+ b.ge loop_way
+next_level:
+ add w10, w10, #2 // increment 2 x cache level
+ cmp w3, w10
+ b.gt loop_level
+ dsb sy // ensure completion of previous cache maintenance operation
+ isb
+finished:
+ ret
+ .cfi_endproc
+
+
+// ------------------------------------------------------------
+
+//
+// ID Register functions
+//
+
+ .type GetMIDR, "function"
+ .cfi_startproc
+GetMIDR:
+
+ mrs x0, MIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetMPIDR, "function"
+ .cfi_startproc
+GetMPIDR:
+
+ mrs x0, MPIDR_EL1
+ ret
+ .cfi_endproc
+
+
+ .type GetAffinity, "function"
+ .cfi_startproc
+GetAffinity:
+
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #32, #8
+ bfi w0, w1, #24, #8
+ ret
+ .cfi_endproc
+
+
+ .type GetCPUID, "function"
+ .cfi_startproc
+GetCPUID:
+
+ mrs x0, MIDR_EL1
+ ubfx x0, x0, #4, #12 // extract PartNum
+ cmp x0, #0xD0D // Cortex-A77
+ b.eq DynamIQ
+ cmp x0, #0xD0B // Cortex-A76
+ b.eq DynamIQ
+ cmp x0, #0xD0A // Cortex-A75
+ b.eq DynamIQ
+ cmp x0, #0xD05 // Cortex-A55
+ b.eq DynamIQ
+ b Others
+DynamIQ:
+ mrs x0, MPIDR_EL1
+ ubfx x0, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ ret
+
+Others:
+ mrs x0, MPIDR_EL1
+ ubfx x1, x0, #MPIDR_EL1_AFF0_LSB, #MPIDR_EL1_AFF_WIDTH
+ ubfx x2, x0, #MPIDR_EL1_AFF1_LSB, #MPIDR_EL1_AFF_WIDTH
+ add x0, x1, x2, LSL #2
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/v8_aarch64.h b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_aarch64.h
new file mode 100644
index 00000000..b09079a4
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_aarch64.h
@@ -0,0 +1,103 @@
+/*
+ *
+ * Armv8-A AArch64 common helper functions
+ *
+ * Copyright (c) 2012-2014 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+
+#ifndef V8_AARCH64_H
+#define V8_AARCH64_H
+
+/*
+ * Parameters for data barriers
+ */
+#define OSHLD 1
+#define OSHST 2
+#define OSH 3
+#define NSHLD 5
+#define NSHST 6
+#define NSH 7
+#define ISHLD 9
+#define ISHST 10
+#define ISH 11
+#define LD 13
+#define ST 14
+#define SY 15
+
+/**********************************************************************/
+
+/*
+ * function prototypes
+ */
+
+/*
+ * void InvalidateUDCaches(void)
+ * invalidates all Unified and Data Caches
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ *
+ *
+ * Side Effects
+ * guarantees that all levels of cache will be invalidated before
+ * returning to caller
+ */
+void InvalidateUDCaches(void);
+
+/*
+ * unsigned long long EnableCachesEL1(void)
+ * enables I- and D- caches at EL1
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * New value of SCTLR_EL1
+ *
+ * Side Effects
+ * context will be synchronised before returning to caller
+ */
+unsigned long long EnableCachesEL1(void);
+
+/*
+ * unsigned long long GetMIDR(void)
+ * returns the contents of MIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MIDR_EL0
+ */
+unsigned long long GetMIDR(void);
+
+/*
+ * unsigned long long GetMPIDR(void)
+ * returns the contents of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0
+ */
+unsigned long long GetMPIDR(void);
+
+/*
+ * unsigned int GetCPUID(void)
+ * returns the Aff0 field of MPIDR_EL0
+ *
+ * Inputs
+ *
+ *
+ * Returns
+ * MPIDR_EL0[7:0]
+ */
+unsigned int GetCPUID(void);
+
+#endif
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/v8_mmu.h b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_mmu.h
new file mode 100644
index 00000000..bce62b54
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_mmu.h
@@ -0,0 +1,128 @@
+//
+// Defines for v8 Memory Model
+//
+// Copyright (c) 2012-2019 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_MMU_H
+#define V8_MMU_H
+
+//
+// Translation Control Register fields
+//
+// RGN field encodings
+//
+#define TCR_RGN_NC 0b00
+#define TCR_RGN_WBWA 0b01
+#define TCR_RGN_WT 0b10
+#define TCR_RGN_WBRA 0b11
+
+//
+// Shareability encodings
+//
+#define TCR_SHARE_NONE 0b00
+#define TCR_SHARE_OUTER 0b10
+#define TCR_SHARE_INNER 0b11
+
+//
+// Granule size encodings
+//
+#define TCR_GRANULE_4K 0b00
+#define TCR_GRANULE_64K 0b01
+#define TCR_GRANULE_16K 0b10
+
+//
+// Physical Address sizes
+//
+#define TCR_SIZE_4G 0b000
+#define TCR_SIZE_64G 0b001
+#define TCR_SIZE_1T 0b010
+#define TCR_SIZE_4T 0b011
+#define TCR_SIZE_16T 0b100
+#define TCR_SIZE_256T 0b101
+
+//
+// Translation Control Register fields
+//
+#define TCR_EL1_T0SZ_SHIFT 0
+#define TCR_EL1_EPD0 (1 << 7)
+#define TCR_EL1_IRGN0_SHIFT 8
+#define TCR_EL1_ORGN0_SHIFT 10
+#define TCR_EL1_SH0_SHIFT 12
+#define TCR_EL1_TG0_SHIFT 14
+
+#define TCR_EL1_T1SZ_SHIFT 16
+#define TCR_EL1_A1 (1 << 22)
+#define TCR_EL1_EPD1 (1 << 23)
+#define TCR_EL1_IRGN1_SHIFT 24
+#define TCR_EL1_ORGN1_SHIFT 26
+#define TCR_EL1_SH1_SHIFT 28
+#define TCR_EL1_TG1_SHIFT 30
+#define TCR_EL1_IPS_SHIFT 32
+#define TCR_EL1_AS (1 << 36)
+#define TCR_EL1_TBI0 (1 << 37)
+#define TCR_EL1_TBI1 (1 << 38)
+
+//
+// Stage 1 Translation Table descriptor fields
+//
+#define TT_S1_ATTR_FAULT (0b00 << 0)
+#define TT_S1_ATTR_BLOCK (0b01 << 0) // Level 1/2
+#define TT_S1_ATTR_TABLE (0b11 << 0) // Level 0/1/2
+#define TT_S1_ATTR_PAGE (0b11 << 0) // Level 3
+
+#define TT_S1_ATTR_MATTR_LSB 2
+
+#define TT_S1_ATTR_NS (1 << 5)
+
+#define TT_S1_ATTR_AP_RW_PL1 (0b00 << 6)
+#define TT_S1_ATTR_AP_RW_ANY (0b01 << 6)
+#define TT_S1_ATTR_AP_RO_PL1 (0b10 << 6)
+#define TT_S1_ATTR_AP_RO_ANY (0b11 << 6)
+
+#define TT_S1_ATTR_SH_NONE (0b00 << 8)
+#define TT_S1_ATTR_SH_OUTER (0b10 << 8)
+#define TT_S1_ATTR_SH_INNER (0b11 << 8)
+
+#define TT_S1_ATTR_AF (1 << 10)
+#define TT_S1_ATTR_nG (1 << 11)
+
+// OA bits [15:12] - If Armv8.2-LPA is implemented, bits[15:12] are bits[51:48]
+// and bits[47:16] are bits[47:16] of the output address for a page of memory
+
+#define TT_S1_ATTR_nT (1 << 16) // Present if Armv8.4-TTRem is implemented, otherwise RES0
+
+#define TT_S1_ATTR_DBM (1 << 51) // Present if Armv8.1-TTHM is implemented, otherwise RES0
+
+#define TT_S1_ATTR_CONTIG (1 << 52)
+#define TT_S1_ATTR_PXN (1 << 53)
+#define TT_S1_ATTR_UXN (1 << 54)
+
+// PBHA bits[62:59] - If Armv8.2-TTPBHA is implemented, hardware can use these bits
+// for IMPLEMENTATIONDEFINED purposes, otherwise IGNORED
+
+#define TT_S1_MAIR_DEV_nGnRnE 0b00000000
+#define TT_S1_MAIR_DEV_nGnRE 0b00000100
+#define TT_S1_MAIR_DEV_nGRE 0b00001000
+#define TT_S1_MAIR_DEV_GRE 0b00001100
+
+//
+// Inner and Outer Normal memory attributes use the same bit patterns
+// Outer attributes just need to be shifted up
+//
+#define TT_S1_MAIR_OUTER_SHIFT 4
+
+#define TT_S1_MAIR_WT_TRANS_RA 0b0010
+
+#define TT_S1_MAIR_WB_TRANS_RA 0b0110
+#define TT_S1_MAIR_WB_TRANS_RWA 0b0111
+
+#define TT_S1_MAIR_WT_RA 0b1010
+
+#define TT_S1_MAIR_WB_RA 0b1110
+#define TT_S1_MAIR_WB_RWA 0b1111
+
+#endif // V8_MMU_H
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/v8_system.h b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_system.h
new file mode 100644
index 00000000..a62d2a33
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_system.h
@@ -0,0 +1,115 @@
+//
+// Defines for v8 System Registers
+//
+// Copyright (c) 2012-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#ifndef V8_SYSTEM_H
+#define V8_SYSTEM_H
+
+//
+// AArch64 SPSR
+//
+#define AARCH64_SPSR_EL3h 0b1101
+#define AARCH64_SPSR_EL3t 0b1100
+#define AARCH64_SPSR_EL2h 0b1001
+#define AARCH64_SPSR_EL2t 0b1000
+#define AARCH64_SPSR_EL1h 0b0101
+#define AARCH64_SPSR_EL1t 0b0100
+#define AARCH64_SPSR_EL0t 0b0000
+#define AARCH64_SPSR_RW (1 << 4)
+#define AARCH64_SPSR_F (1 << 6)
+#define AARCH64_SPSR_I (1 << 7)
+#define AARCH64_SPSR_A (1 << 8)
+#define AARCH64_SPSR_D (1 << 9)
+#define AARCH64_SPSR_IL (1 << 20)
+#define AARCH64_SPSR_SS (1 << 21)
+#define AARCH64_SPSR_V (1 << 28)
+#define AARCH64_SPSR_C (1 << 29)
+#define AARCH64_SPSR_Z (1 << 30)
+#define AARCH64_SPSR_N (1 << 31)
+
+//
+// Multiprocessor Affinity Register
+//
+#define MPIDR_EL1_AFF3_LSB 32
+#define MPIDR_EL1_U (1 << 30)
+#define MPIDR_EL1_MT (1 << 24)
+#define MPIDR_EL1_AFF2_LSB 16
+#define MPIDR_EL1_AFF1_LSB 8
+#define MPIDR_EL1_AFF0_LSB 0
+#define MPIDR_EL1_AFF_WIDTH 8
+
+//
+// Data Cache Zero ID Register
+//
+#define DCZID_EL0_BS_LSB 0
+#define DCZID_EL0_BS_WIDTH 4
+#define DCZID_EL0_DZP_LSB 5
+#define DCZID_EL0_DZP (1 << 5)
+
+//
+// System Control Register
+//
+#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_EL1_E0E (1 << 24)
+#define SCTLR_ELx_WXN (1 << 19)
+#define SCTLR_EL1_nTWE (1 << 18)
+#define SCTLR_EL1_nTWI (1 << 16)
+#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_ELx_I (1 << 12)
+#define SCTLR_EL1_UMA (1 << 9)
+#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
+#define SCTLR_EL1_THEE (1 << 6)
+#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+#define SCTLR_ELx_SA (1 << 3)
+#define SCTLR_ELx_C (1 << 2)
+#define SCTLR_ELx_A (1 << 1)
+#define SCTLR_ELx_M (1 << 0)
+
+//
+// Architectural Feature Access Control Register
+//
+#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_FPEN (3 << 20)
+
+//
+// Architectural Feature Trap Register
+//
+#define CPTR_ELx_TCPAC (1 << 31)
+#define CPTR_ELx_TTA (1 << 20)
+#define CPTR_ELx_TFP (1 << 10)
+
+//
+// Secure Configuration Register
+//
+#define SCR_EL3_TWE (1 << 13)
+#define SCR_EL3_TWI (1 << 12)
+#define SCR_EL3_ST (1 << 11)
+#define SCR_EL3_RW (1 << 10)
+#define SCR_EL3_SIF (1 << 9)
+#define SCR_EL3_HCE (1 << 8)
+#define SCR_EL3_SMD (1 << 7)
+#define SCR_EL3_EA (1 << 3)
+#define SCR_EL3_FIQ (1 << 2)
+#define SCR_EL3_IRQ (1 << 1)
+#define SCR_EL3_NS (1 << 0)
+
+//
+// Hypervisor Configuration Register
+//
+#define HCR_EL2_ID (1 << 33)
+#define HCR_EL2_CD (1 << 32)
+#define HCR_EL2_RW (1 << 31)
+#define HCR_EL2_TRVM (1 << 30)
+#define HCR_EL2_HVC (1 << 29)
+#define HCR_EL2_TDZ (1 << 28)
+
+#endif // V8_SYSTEM_H
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/v8_utils.S b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_utils.S
new file mode 100644
index 00000000..888892a0
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/v8_utils.S
@@ -0,0 +1,69 @@
+//
+// Simple utility routines for baremetal v8 code
+//
+// Copyright (c) 2013-2017 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//
+
+#include "v8_system.h"
+
+ .text
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+//
+// void *ZeroBlock(void *blockPtr, unsigned int nBytes)
+//
+// Zero fill a block of memory
+// Fill memory pages or similar structures with zeros.
+// The byte count must be a multiple of the block fill size (16 bytes)
+//
+// Inputs:
+// blockPtr - base address of block to fill
+// nBytes - block size, in bytes
+//
+// Returns:
+// pointer to just filled block, NULL if nBytes is
+// incompatible with block fill size
+//
+ .global ZeroBlock
+ .type ZeroBlock, "function"
+ .cfi_startproc
+ZeroBlock:
+
+ //
+ // we fill data by steam, 16 bytes at a time: check that
+ // blocksize is a multiple of that
+ //
+ ubfx x2, x1, #0, #4
+ cbnz x2, incompatible
+
+ //
+ // we already have one register full of zeros, get another
+ //
+ mov x3, x2
+
+ //
+ // OK, set temporary pointer and away we go
+ //
+ add x0, x0, x1
+
+loop0:
+ subs x1, x1, #16
+ stp x2, x3, [x0, #-16]!
+ b.ne loop0
+
+ //
+ // that's all - x0 will be back to its start value
+ //
+ ret
+
+ //
+ // parameters are incompatible with block size - return
+ // an indication that this is so
+ //
+incompatible:
+ mov x0,#0
+ ret
+ .cfi_endproc
diff --git a/ports/cortex_a55/ac6/example_build/sample_threadx/vectors.S b/ports/cortex_a55/ac6/example_build/sample_threadx/vectors.S
new file mode 100644
index 00000000..7784f98e
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/sample_threadx/vectors.S
@@ -0,0 +1,252 @@
+// ------------------------------------------------------------
+// Armv8-A Vector tables
+//
+// Copyright (c) 2014-2016 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+// ------------------------------------------------------------
+
+
+ .global el1_vectors
+ .global el2_vectors
+ .global el3_vectors
+ .global c0sync1
+ .global irqHandler
+ .global fiqHandler
+ .global irqFirstLevelHandler
+ .global fiqFirstLevelHandler
+
+ .section EL1VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el1_vectors:
+c0sync1: B c0sync1
+
+ .balign 0x80
+c0irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr1: B c0serr1
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync1: B cxsync1
+
+ .balign 0x80
+cxirq1: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr1: B cxserr1
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync1: B l64sync1
+
+ .balign 0x80
+l64irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr1: B l64serr1
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync1: B l32sync1
+
+ .balign 0x80
+l32irq1: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq1: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr1: B l32serr1
+
+//----------------------------------------------------------------
+
+ .section EL2VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el2_vectors:
+c0sync2: B c0sync2
+
+ .balign 0x80
+c0irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr2: B c0serr2
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync2: B cxsync2
+
+ .balign 0x80
+cxirq2: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr2: B cxserr2
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync2: B l64sync2
+
+ .balign 0x80
+l64irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr2: B l64serr2
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync2: B l32sync2
+
+ .balign 0x80
+l32irq2: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq2: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr2: B l32serr2
+
+//----------------------------------------------------------------
+
+ .section EL3VECTORS, "ax"
+ .align 11
+
+//
+// Current EL with SP0
+//
+el3_vectors:
+c0sync3: B c0sync3
+
+ .balign 0x80
+c0irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+c0fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+c0serr3: B c0serr3
+
+//
+// Current EL with SPx
+//
+ .balign 0x80
+cxsync3: B cxsync3
+
+ .balign 0x80
+cxirq3: B irqFirstLevelHandler
+
+ .balign 0x80
+cxfiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+cxserr3: B cxserr3
+
+//
+// Lower EL using AArch64
+//
+ .balign 0x80
+l64sync3: B l64sync3
+
+ .balign 0x80
+l64irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l64fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l64serr3: B l64serr3
+
+//
+// Lower EL using AArch32
+//
+ .balign 0x80
+l32sync3: B l32sync3
+
+ .balign 0x80
+l32irq3: B irqFirstLevelHandler
+
+ .balign 0x80
+l32fiq3: B fiqFirstLevelHandler
+
+ .balign 0x80
+l32serr3: B l32serr3
+
+
+ .section InterruptHandlers, "ax"
+ .balign 4
+
+ .type irqFirstLevelHandler, "function"
+irqFirstLevelHandler:
+ MSR SPSel, 0
+ STP x29, x30, [sp, #-16]!
+ BL _tx_thread_context_save
+ BL irqHandler
+ B _tx_thread_context_restore
+
+ .type fiqFirstLevelHandler, "function"
+fiqFirstLevelHandler:
+ STP x29, x30, [sp, #-16]!
+ STP x18, x19, [sp, #-16]!
+ STP x16, x17, [sp, #-16]!
+ STP x14, x15, [sp, #-16]!
+ STP x12, x13, [sp, #-16]!
+ STP x10, x11, [sp, #-16]!
+ STP x8, x9, [sp, #-16]!
+ STP x6, x7, [sp, #-16]!
+ STP x4, x5, [sp, #-16]!
+ STP x2, x3, [sp, #-16]!
+ STP x0, x1, [sp, #-16]!
+
+ BL fiqHandler
+
+ LDP x0, x1, [sp], #16
+ LDP x2, x3, [sp], #16
+ LDP x4, x5, [sp], #16
+ LDP x6, x7, [sp], #16
+ LDP x8, x9, [sp], #16
+ LDP x10, x11, [sp], #16
+ LDP x12, x13, [sp], #16
+ LDP x14, x15, [sp], #16
+ LDP x16, x17, [sp], #16
+ LDP x18, x19, [sp], #16
+ LDP x29, x30, [sp], #16
+ ERET
diff --git a/ports/cortex_a55/ac6/example_build/tx/.cproject b/ports/cortex_a55/ac6/example_build/tx/.cproject
new file mode 100644
index 00000000..0d6f5a10
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/tx/.cproject
@@ -0,0 +1,148 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a55/ac6/example_build/tx/.project b/ports/cortex_a55/ac6/example_build/tx/.project
new file mode 100644
index 00000000..863ca5cb
--- /dev/null
+++ b/ports/cortex_a55/ac6/example_build/tx/.project
@@ -0,0 +1,48 @@
+
+
+ tx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
+
+ inc_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/inc
+
+
+ inc_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/inc
+
+
+ src_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/src
+
+
+ src_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/src
+
+
+
diff --git a/ports/cortex_a55/ac6/inc/tx_port.h b/ports/cortex_a55/ac6/inc/tx_port.h
new file mode 100644
index 00000000..33bccbf1
--- /dev/null
+++ b/ports/cortex_a55/ac6/inc/tx_port.h
@@ -0,0 +1,379 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv8-A */
+/* 6.1.10 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef int LONG;
+typedef unsigned int ULONG;
+typedef unsigned long long ULONG64;
+typedef short SHORT;
+typedef unsigned short USHORT;
+#define ULONG64_DEFINED
+
+/* Override the alignment type to use 64-bit alignment and storage for pointers. */
+
+#define ALIGN_TYPE_DEFINED
+typedef unsigned long long ALIGN_TYPE;
+
+
+/* Override the free block marker for byte pools to be a 64-bit constant. */
+
+#define TX_BYTE_BLOCK_FREE ((ALIGN_TYPE) 0xFFFFEEEEFFFFEEEE)
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 4096 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#define TX_INT_ENABLE 0x00 /* Enable IRQ & FIQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_MISRA_ENABLE
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
+#endif
+#else
+#ifndef TX_TRACE_TIME_SOURCE
+ULONG _tx_misra_time_stamp_get(VOID);
+#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
+#endif
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#ifdef TX_MISRA_ENABLE
+#define TX_DISABLE_INLINE
+#else
+#define TX_INLINE_INITIALIZATION
+#endif
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifndef TX_MISRA_ENABLE
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_fp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#ifndef TX_DISABLE_INLINE
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) b = (UINT) __builtin_ctz((unsigned int) m);
+
+#endif
+
+
+/* Define the internal timer extension to also hold the thread pointer such that _tx_thread_timeout
+ can figure out what thread timeout to process. */
+
+#define TX_TIMER_INTERNAL_EXTENSION VOID *tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define the thread timeout setup logic in _tx_thread_create. */
+
+#define TX_THREAD_CREATE_TIMEOUT_SETUP(t) (t) -> tx_thread_timer.tx_timer_internal_timeout_function = &(_tx_thread_timeout); \
+ (t) -> tx_thread_timer.tx_timer_internal_timeout_param = 0; \
+ (t) -> tx_thread_timer.tx_timer_internal_thread_timeout_ptr = (VOID *) (t);
+
+
+/* Define the thread timeout pointer setup in _tx_thread_timeout. */
+
+#define TX_THREAD_TIMEOUT_POINTER_SETUP(t) (t) = (TX_THREAD *) _tx_timer_expired_timer_ptr -> tx_timer_internal_thread_timeout_ptr;
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifndef TX_DISABLE_INLINE
+
+/* Define macros, with in-line assembly for performance. */
+
+__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
+{
+
+unsigned long long daif_value;
+
+ __asm__ volatile (" MRS %0, DAIF ": "=r" (daif_value) );
+ __asm__ volatile (" MSR DAIFSet, 0x3" : : : "memory" );
+ return((unsigned int) daif_value);
+}
+
+__attribute__( ( always_inline ) ) static inline void __restore_interrupts(unsigned int daif_value)
+{
+
+unsigned long long temp;
+
+ temp = (unsigned long long) daif_value;
+ __asm__ volatile (" MSR DAIF,%0": : "r" (temp): "memory" );
+}
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+#define TX_DISABLE interrupt_save = __disable_interrupts();
+#define TX_RESTORE __restore_interrupts(interrupt_save);
+
+#else
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+#endif
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define FP extension for ARMv8. Each is assumed to be called in the context of the executing thread. */
+
+#ifndef TX_SOURCE_CODE
+#define tx_thread_fp_enable _tx_thread_fp_enable
+#define tx_thread_fp_disable _tx_thread_fp_disable
+#endif
+
+VOID tx_thread_fp_enable(VOID);
+VOID tx_thread_fp_disable(VOID);
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv8-A Version 6.1.10 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
diff --git a/ports/cortex_a55/ac6/src/tx_initialize_low_level.S b/ports/cortex_a55/ac6/src/tx_initialize_low_level.S
new file mode 100644
index 00000000..d0b541f1
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_initialize_low_level.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_initialize_low_level(VOID)
+// {
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level, @function
+_tx_initialize_low_level:
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+
+ /* Save the system stack pointer. */
+ // _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
+
+ LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
+ MOV x1, sp // Pickup SP
+ SUB x1, x1, #15 //
+ BIC x1, x1, #0xF // Get 16-bit alignment
+ STR x1, [x0] // Store system stack
+
+ /* Save the first available memory address. */
+ // _tx_initialize_unused_memory = (VOID_PTR) Image$$ZI$$Limit;
+
+ LDR x0, =_tx_initialize_unused_memory // Pickup address of unused memory ptr
+ LDR x1, =zi_limit // Pickup unused memory address
+ LDR x1, [x1] //
+ STR x1, [x0] // Store unused memory address
+
+ /* Done, return to caller. */
+
+ RET // Return to caller
+// }
+
+
+zi_limit:
+ .quad (Image$$TOP_OF_RAM$$Base)
+
diff --git a/ports/cortex_a55/ac6/src/tx_thread_context_restore.S b/ports/cortex_a55/ac6/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..994c404d
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_context_restore.S
@@ -0,0 +1,287 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_restore(VOID)
+// {
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore, @function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ // if (--_tx_thread_system_state)
+ // {
+
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ SUB w2, w2, #1 // Decrement the counter
+ STR w2, [x3, #0] // Store the counter
+ CMP w2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup actual current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR x3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR w2, [x3, #0] // Pickup actual preempt disable flag
+ CMP w2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR x2, [x3, #0] // Pickup actual execute thread pointer
+ CMP x0, x2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+
+ /* Pickup the saved stack pointer. */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+ // }
+ // else
+ // {
+__tx_thread_preempt_restore:
+
+ LDR x4, [x0, #8] // Switch to thread stack pointer
+ MOV sp, x4 //
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ STP x20, x21, [sp, #-16]! // Save x20, x21
+ STP x22, x23, [sp, #-16]! // Save x22, x23
+ STP x24, x25, [sp, #-16]! // Save x24, x25
+ STP x26, x27, [sp, #-16]! // Save x26, x27
+ STP x28, x29, [sp, #-16]! // Save x28, x29
+#ifdef ENABLE_ARM_FP
+ LDR w3, [x0, #248] // Pickup FP enable flag
+ CMP w3, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q0, q1, [sp, #-32]! // Save q0, q1
+ STP q2, q3, [sp, #-32]! // Save q2, q3
+ STP q4, q5, [sp, #-32]! // Save q4, q5
+ STP q6, q7, [sp, #-32]! // Save q6, q7
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ STP q16, q17, [sp, #-32]! // Save q16, q17
+ STP q18, q19, [sp, #-32]! // Save q18, q19
+ STP q20, q21, [sp, #-32]! // Save q20, q21
+ STP q22, q23, [sp, #-32]! // Save q22, q23
+ STP q24, q25, [sp, #-32]! // Save q24, q25
+ STP q26, q27, [sp, #-32]! // Save q26, q27
+ STP q28, q29, [sp, #-32]! // Save q28, q29
+ STP q30, q31, [sp, #-32]! // Save q30, q31
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+ STP x4, x5, [sp, #-16]! // Save x4 (SPSR_EL3), x5 (ELR_E3)
+
+ MOV x3, sp // Move sp into x3
+ STR x3, [x0, #8] // Save stack pointer in thread control
+ // block
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+
+ /* Save the remaining time-slice and disable it. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w2, [x0, #36] // Save thread's time-slice
+ MOV w2, #0 // Clear value
+ STR w2, [x3, #0] // Disable global time-slice flag
+
+ // }
+__tx_thread_dont_save_ts:
+
+
+ /* Clear the current task pointer. */
+ /* _tx_thread_current_ptr = TX_NULL; */
+
+ MOV x0, #0 // NULL value
+ STR x0, [x1, #0] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ // _tx_thread_schedule();
+
+ // }
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ LDR x1, =_tx_thread_schedule // Build address for _tx_thread_schedule
+#ifdef EL1
+ MSR ELR_EL1, x1 // Setup point of interrupt
+// MOV x1, #0x4 // Setup EL1 return
+// MSR spsr_el1, x1 // Move into SPSR
+#else
+#ifdef EL2
+ MSR ELR_EL2, x1 // Setup point of interrupt
+// MOV x1, #0x8 // Setup EL2 return
+// MSR spsr_el2, x1 // Move into SPSR
+#else
+ MSR ELR_EL3, x1 // Setup point of interrupt
+// MOV x1, #0xC // Setup EL3 return
+// MSR spsr_el3, x1 // Move into SPSR
+#endif
+#endif
+ ERET // Return to scheduler
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_context_save.S b/ports/cortex_a55/ac6/src/tx_thread_context_save.S
new file mode 100644
index 00000000..859a1e44
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_context_save.S
@@ -0,0 +1,216 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_context_save(VOID)
+// {
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save, @function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
+ out, x29 (frame pointer), x30 (link register) are saved, we are in EL1,
+ and all other registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+ // if (_tx_thread_system_state++)
+ // {
+
+ STP x0, x1, [sp, #-16]! // Save x0, x1
+ STP x2, x3, [sp, #-16]! // Save x2, x3
+ LDR x3, =_tx_thread_system_state // Pickup address of system state var
+ LDR w2, [x3, #0] // Pickup system state
+ CMP w2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD w2, w2, #1 // Increment the nested interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x0, SPSR_EL1 // Pickup SPSR
+ MRS x1, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x0, SPSR_EL2 // Pickup SPSR
+ MRS x1, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x0, SPSR_EL3 // Pickup SPSR
+ MRS x1, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x0, x1, [sp, #-16]! // Save SPSR, ELR
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ /* Return to the ISR. */
+
+ RET // Return to ISR
+
+__tx_thread_not_nested_save:
+ // }
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ // else if (_tx_thread_current_ptr)
+ // {
+
+ ADD w2, w2, #1 // Increment the interrupt counter
+ STR w2, [x3, #0] // Store it back in the variable
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR x0, [x1, #0] // Pickup current thread pointer
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ STP x4, x5, [sp, #-16]! // Save x4, x5
+ STP x6, x7, [sp, #-16]! // Save x6, x7
+ STP x8, x9, [sp, #-16]! // Save x8, x9
+ STP x10, x11, [sp, #-16]! // Save x10, x11
+ STP x12, x13, [sp, #-16]! // Save x12, x13
+ STP x14, x15, [sp, #-16]! // Save x14, x15
+ STP x16, x17, [sp, #-16]! // Save x16, x17
+ STP x18, x19, [sp, #-16]! // Save x18, x19
+#ifdef EL1
+ MRS x4, SPSR_EL1 // Pickup SPSR
+ MRS x5, ELR_EL1 // Pickup ELR (point of interrupt)
+#else
+#ifdef EL2
+ MRS x4, SPSR_EL2 // Pickup SPSR
+ MRS x5, ELR_EL2 // Pickup ELR (point of interrupt)
+#else
+ MRS x4, SPSR_EL3 // Pickup SPSR
+ MRS x5, ELR_EL3 // Pickup ELR (point of interrupt)
+#endif
+#endif
+ STP x4, x5, [sp, #-16]! // Save SPSR, ELR
+
+ /* Save the current stack pointer in the thread's control block. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+
+ MOV x4, sp //
+ STR x4, [x0, #8] // Save thread stack pointer
+
+ /* Switch to the system stack. */
+ // sp = _tx_thread_system_stack_ptr;
+
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ RET // Return to caller
+
+ // }
+ // else
+ // {
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ STP x29, x30, [sp, #-16]! // Save x29, x30
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+#endif
+
+ ADD sp, sp, #48 // Recover saved registers
+ RET // Continue IRQ processing
+
+ // }
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_fp_disable.c b/ports/cortex_a55/ac6/src/tx_thread_fp_disable.c
new file mode 100644
index 00000000..3e5d7e21
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_fp_disable.c
@@ -0,0 +1,97 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function disables the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_disable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now set the FP enable flag to false in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_FALSE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a55/ac6/src/tx_thread_fp_enable.c b/ports/cortex_a55/ac6/src/tx_thread_fp_enable.c
new file mode 100644
index 00000000..4e69205c
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_fp_enable.c
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#define TX_SOURCE_CODE
+
+
+/* Include necessary system files. */
+
+#include "tx_api.h"
+#include "tx_thread.h"
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fp_enable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function enabled the FP for the currently executing thread. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+VOID _tx_thread_fp_enable(VOID)
+{
+
+TX_THREAD *thread_ptr;
+ULONG system_state;
+
+
+ /* Pickup the current thread pointer. */
+ TX_THREAD_GET_CURRENT(thread_ptr);
+
+ /* Get the system state. */
+ system_state = TX_THREAD_GET_SYSTEM_STATE();
+
+ /* Make sure it is not NULL. */
+ if (thread_ptr != TX_NULL)
+ {
+
+ /* Thread is running... make sure the call is from the thread context. */
+ if (system_state == 0)
+ {
+
+ /* Yes, now setup the FP enable flag in the TX_THREAD structure. */
+ thread_ptr -> tx_thread_fp_enable = TX_TRUE;
+ }
+ }
+}
+
diff --git a/ports/cortex_a55/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a55/ac6/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..6a5a7741
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_interrupt_control.S
@@ -0,0 +1,81 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_control(UINT new_posture)
+// {
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control, @function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x1, DAIF // Pickup current interrupt posture
+
+ /* Apply the new interrupt posture. */
+
+ MSR DAIF, x0 // Set new interrupt posture
+ MOV x0, x1 // Setup return value
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a55/ac6/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..d0062ef8
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,79 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_disable(void)
+// {
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable, @function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS x0, DAIF // Pickup current interrupt lockout posture
+
+ /* Mask interrupts. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a55/ac6/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..1b6261ba
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,77 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// UINT _tx_thread_interrupt_restore(UINT old_posture)
+// {
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore, @function
+_tx_thread_interrupt_restore:
+
+ /* Restore the old interrupt posture. */
+
+ MSR DAIF, x0 // Setup the old posture
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_schedule.S b/ports/cortex_a55/ac6/src/tx_thread_schedule.S
new file mode 100644
index 00000000..9a7a7262
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_schedule.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_schedule(VOID)
+// {
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule, @function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+ MSR DAIFClr, 0x3 // Enable interrupts
+
+ /* Wait for a thread to execute. */
+ // do
+ // {
+
+ LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+#ifdef TX_ENABLE_WFI
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BNE _tx_thread_schedule_thread //
+ WFI //
+ B __tx_thread_schedule_loop // Keep looking for a thread
+_tx_thread_schedule_thread:
+#else
+__tx_thread_schedule_loop:
+ LDR x0, [x1, #0] // Pickup next thread to execute
+ CMP x0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+#endif
+
+ // }
+ // while(_tx_thread_execute_ptr == TX_NULL);
+
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+ MSR DAIFSet, 0x3 // Lockout interrupts
+
+ /* Setup the current thread pointer. */
+ // _tx_thread_current_ptr = _tx_thread_execute_ptr;
+
+ LDR x1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR x0, [x1, #0] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+ // _tx_thread_current_ptr -> tx_thread_run_count++;
+
+ LDR w2, [x0, #4] // Pickup run counter
+ LDR w3, [x0, #36] // Pickup time-slice for this thread
+ ADD w2, w2, #1 // Increment thread run-counter
+ STR w2, [x0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+ // _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ // variable
+ LDR x4, [x0, #8] // Switch stack pointers
+ MOV sp, x4 //
+ STR w3, [x2, #0] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV x19, x0 // Save x0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV x0, x19 // Restore x0
+#endif
+
+ /* Switch to the thread's stack. */
+ // sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
+ CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL)
+ BEQ _tx_solicited_return
+#ifdef EL1
+ MSR SPSR_EL1, x4 // Setup SPSR for return
+ MSR ELR_EL1, x5 // Setup point of interrupt
+#else
+#ifdef EL2
+ MSR SPSR_EL2, x4 // Setup SPSR for return
+ MSR ELR_EL2, x5 // Setup point of interrupt
+#else
+ MSR SPSR_EL3, x4 // Setup SPSR for return
+ MSR ELR_EL3, x5 // Setup point of interrupt
+#endif
+#endif
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_interrupt_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q30, q31, [sp], #32 // Recover q30, q31
+ LDP q28, q29, [sp], #32 // Recover q28, q29
+ LDP q26, q27, [sp], #32 // Recover q26, q27
+ LDP q24, q25, [sp], #32 // Recover q24, q25
+ LDP q22, q23, [sp], #32 // Recover q22, q23
+ LDP q20, q21, [sp], #32 // Recover q20, q21
+ LDP q18, q19, [sp], #32 // Recover q18, q19
+ LDP q16, q17, [sp], #32 // Recover q16, q17
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+ LDP q6, q7, [sp], #32 // Recover q6, q7
+ LDP q4, q5, [sp], #32 // Recover q4, q5
+ LDP q2, q3, [sp], #32 // Recover q2, q3
+ LDP q0, q1, [sp], #32 // Recover q0, q1
+_skip_interrupt_fp_restore:
+#endif
+ LDP x28, x29, [sp], #16 // Recover x28
+ LDP x26, x27, [sp], #16 // Recover x26, x27
+ LDP x24, x25, [sp], #16 // Recover x24, x25
+ LDP x22, x23, [sp], #16 // Recover x22, x23
+ LDP x20, x21, [sp], #16 // Recover x20, x21
+ LDP x18, x19, [sp], #16 // Recover x18, x19
+ LDP x16, x17, [sp], #16 // Recover x16, x17
+ LDP x14, x15, [sp], #16 // Recover x14, x15
+ LDP x12, x13, [sp], #16 // Recover x12, x13
+ LDP x10, x11, [sp], #16 // Recover x10, x11
+ LDP x8, x9, [sp], #16 // Recover x8, x9
+ LDP x6, x7, [sp], #16 // Recover x6, x7
+ LDP x4, x5, [sp], #16 // Recover x4, x5
+ LDP x2, x3, [sp], #16 // Recover x2, x3
+ LDP x0, x1, [sp], #16 // Recover x0, x1
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ ERET // Return to point of interrupt
+
+_tx_solicited_return:
+
+#ifdef ENABLE_ARM_FP
+ LDR w1, [x0, #248] // Pickup FP enable flag
+ CMP w1, #0 // Is FP enabled?
+ BEQ _skip_solicited_fp_restore // No, skip FP restore
+ LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
+ MSR FPSR, x0 // Recover FPSR
+ MSR FPCR, x1 // Recover FPCR
+ LDP q14, q15, [sp], #32 // Recover q14, q15
+ LDP q12, q13, [sp], #32 // Recover q12, q13
+ LDP q10, q11, [sp], #32 // Recover q10, q11
+ LDP q8, q9, [sp], #32 // Recover q8, q9
+_skip_solicited_fp_restore:
+#endif
+ LDP x27, x28, [sp], #16 // Recover x27, x28
+ LDP x25, x26, [sp], #16 // Recover x25, x26
+ LDP x23, x24, [sp], #16 // Recover x23, x24
+ LDP x21, x22, [sp], #16 // Recover x21, x22
+ LDP x19, x20, [sp], #16 // Recover x19, x20
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ MSR DAIF, x4 // Recover DAIF
+ RET // Return to caller
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_stack_build.S b/ports/cortex_a55/ac6/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..5b7e945a
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_stack_build.S
@@ -0,0 +1,158 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread */
+/* function_ptr Pointer to entry function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.10 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
+// {
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build, @function
+_tx_thread_stack_build:
+
+
+ /* Build an interrupt frame. On Cortex-A35 it should look like this:
+
+ Stack Top: SSPR Initial SSPR
+ ELR Point of interrupt
+ x28 Initial value for x28
+ not used Not used
+ x26 Initial value for x26
+ x27 Initial value for x27
+ x24 Initial value for x24
+ x25 Initial value for x25
+ x22 Initial value for x22
+ x23 Initial value for x23
+ x20 Initial value for x20
+ x21 Initial value for x21
+ x18 Initial value for x18
+ x19 Initial value for x19
+ x16 Initial value for x16
+ x17 Initial value for x17
+ x14 Initial value for x14
+ x15 Initial value for x15
+ x12 Initial value for x12
+ x13 Initial value for x13
+ x10 Initial value for x10
+ x11 Initial value for x11
+ x8 Initial value for x8
+ x9 Initial value for x9
+ x6 Initial value for x6
+ x7 Initial value for x7
+ x4 Initial value for x4
+ x5 Initial value for x5
+ x2 Initial value for x2
+ x3 Initial value for x3
+ x0 Initial value for x0
+ x1 Initial value for x1
+ x29 Initial value for x29 (frame pointer)
+ x30 Initial value for x30 (link register)
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR x4, [x0, #24] // Pickup end of stack area
+ BIC x4, x4, #0xF // Ensure 16-byte alignment
+
+ /* Actually build the stack frame. */
+
+ MOV x2, #0 // Build clear value
+ MOV x3, #0 //
+
+ STP x2, x3, [x4, #-16]! // Set backtrace to 0
+ STP x2, x3, [x4, #-16]! // Set initial x29, x30
+ STP x2, x3, [x4, #-16]! // Set initial x0, x1
+ STP x2, x3, [x4, #-16]! // Set initial x2, x3
+ STP x2, x3, [x4, #-16]! // Set initial x4, x5
+ STP x2, x3, [x4, #-16]! // Set initial x6, x7
+ STP x2, x3, [x4, #-16]! // Set initial x8, x9
+ STP x2, x3, [x4, #-16]! // Set initial x10, x11
+ STP x2, x3, [x4, #-16]! // Set initial x12, x13
+ STP x2, x3, [x4, #-16]! // Set initial x14, x15
+ STP x2, x3, [x4, #-16]! // Set initial x16, x17
+ STP x2, x3, [x4, #-16]! // Set initial x18, x19
+ STP x2, x3, [x4, #-16]! // Set initial x20, x21
+ STP x2, x3, [x4, #-16]! // Set initial x22, x23
+ STP x2, x3, [x4, #-16]! // Set initial x24, x25
+ STP x2, x3, [x4, #-16]! // Set initial x26, x27
+ STP x2, x3, [x4, #-16]! // Set initial x28
+#ifdef EL1
+ MOV x2, #0x4 // Build initial SPSR (EL1)
+#else
+#ifdef EL2
+ MOV x2, #0x8 // Build initial SPSR (EL2)
+#else
+ MOV x2, #0xC // Build initial SPSR (EL3)
+#endif
+#endif
+ MOV x3, x1 // Build initial ELR
+ STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
+
+ /* Setup stack pointer. */
+ // thread_ptr -> tx_thread_stack_ptr = x2;
+
+ STR x4, [x0, #8] // Save stack pointer in thread's
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_thread_system_return.S b/ports/cortex_a55/ac6/src/tx_thread_system_return.S
new file mode 100644
index 00000000..7d42b63d
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_thread_system_return.S
@@ -0,0 +1,151 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_thread_system_return(VOID)
+// {
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return, @function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ MRS x0, DAIF // Pickup DAIF
+ MSR DAIFSet, 0x3 // Lockout interrupts
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+ STP x19, x20, [sp, #-16]! // Save x19, x20
+ STP x21, x22, [sp, #-16]! // Save x21, x22
+ STP x23, x24, [sp, #-16]! // Save x23, x24
+ STP x25, x26, [sp, #-16]! // Save x25, x26
+ STP x27, x28, [sp, #-16]! // Save x27, x28
+ LDR x5, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR x6, [x5, #0] // Pickup current thread pointer
+
+#ifdef ENABLE_ARM_FP
+ LDR w7, [x6, #248] // Pickup FP enable flag
+ CMP w7, #0 // Is FP enabled?
+ BEQ _skip_fp_save // No, skip FP save
+ STP q8, q9, [sp, #-32]! // Save q8, q9
+ STP q10, q11, [sp, #-32]! // Save q10, q11
+ STP q12, q13, [sp, #-32]! // Save q12, q13
+ STP q14, q15, [sp, #-32]! // Save q14, q15
+ MRS x2, FPSR // Pickup FPSR
+ MRS x3, FPCR // Pickup FPCR
+ STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
+_skip_fp_save:
+#endif
+
+ MOV x1, #0 // Clear x1
+ STP x0, x1, [sp, #-16]! // Save DAIF and clear value for ELR_EK1
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ MOV x19, x5 // Save x5
+ MOV x20, x6 // Save x6
+ BL _tx_execution_thread_exit // Call the thread exit function
+ MOV x5, x19 // Restore x5
+ MOV x6, x20 // Restore x6
+#endif
+
+ LDR x2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR w1, [x2, #0] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+ // _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
+ // sp = _tx_thread_system_stack_ptr;
+
+ MOV x4, sp //
+ STR x4, [x6, #8] // Save thread stack pointer
+ LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
+ LDR x4, [x3, #0] // Pickup system stack pointer
+ MOV sp, x4 // Setup system stack pointer
+
+ /* Determine if the time-slice is active. */
+ // if (_tx_timer_time_slice)
+ // {
+
+ MOV x4, #0 // Build clear value
+ CMP w1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save the current remaining time-slice. */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
+
+ STR w4, [x2, #0] // Clear time-slice
+ STR w1, [x6, #36] // Store current time-slice
+
+ // }
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+ // _tx_thread_current_ptr = TX_NULL;
+
+ STR x4, [x5, #0] // Clear current thread pointer
+
+ B _tx_thread_schedule // Jump to scheduler!
+
+// }
diff --git a/ports/cortex_a55/ac6/src/tx_timer_interrupt.S b/ports/cortex_a55/ac6/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..5810b5c2
--- /dev/null
+++ b/ports/cortex_a55/ac6/src/tx_timer_interrupt.S
@@ -0,0 +1,228 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+ .text
+ .align 3
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv8-A */
+/* 6.1.10 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* */
+/**************************************************************************/
+// VOID _tx_timer_interrupt(VOID)
+// {
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt, @function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+ // _tx_timer_system_clock++;
+
+ LDR x1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR w0, [x1, #0] // Pickup system clock
+ ADD w0, w0, #1 // Increment system clock
+ STR w0, [x1, #0] // Store new system clock
+
+ /* Test for time-slice expiration. */
+ /* if (_tx_timer_time_slice)
+ { */
+
+ LDR x3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR w2, [x3, #0] // Pickup time-slice
+ CMP w2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+ /* _tx_timer_time_slice--; */
+
+ SUB w2, w2, #1 // Decrement the time-slice
+ STR w2, [x3, #0] // Store new time-slice value
+
+ /* Check for expiration. */
+ /* if (__tx_timer_time_slice == 0) */
+
+ CMP w2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+ /* _tx_timer_expired_time_slice = TX_TRUE; */
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV w0, #1 // Build expired value
+ STR w0, [x3, #0] // Set time-slice expiration flag
+
+ /* } */
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+ // if (*_tx_timer_current_ptr)
+ // {
+
+ LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
+ LDR x0, [x1, #0] // Pickup current timer
+ LDR x2, [x0, #0] // Pickup timer list entry
+ CMP x2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+ // _tx_timer_expired = TX_TRUE;
+
+ LDR x3, =_tx_timer_expired // Pickup expiration flag address
+ MOV w2, #1 // Build expired value
+ STR w2, [x3, #0] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+ // }
+ // else
+ // {
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ // _tx_timer_current_ptr++;
+
+ ADD x0, x0, #8 // Move to next timer
+
+ /* Check for wrap-around. */
+ // if (_tx_timer_current_ptr == _tx_timer_list_end)
+
+ LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
+ LDR x2, [x3, #0] // Pickup list end
+ CMP x0, x2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wrap-around logic
+
+ /* Wrap to beginning of list. */
+ // _tx_timer_current_ptr = _tx_timer_list_start;
+
+ LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
+ LDR x0, [x3, #0] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR x0, [x1, #0] // Store new current timer pointer
+ // }
+
+__tx_timer_done:
+
+
+ /* See if anything has expired. */
+ // if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
+ //{
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of expired flag
+ LDR w2, [x3, #0] // Pickup time-slice expired flag
+ CMP w2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR x1, =_tx_timer_expired // Pickup addr of other expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+
+ STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
+
+ /* Did a timer expire? */
+ // if (_tx_timer_expired)
+ // {
+
+ LDR x1, =_tx_timer_expired // Pickup addr of expired flag
+ LDR w0, [x1, #0] // Pickup timer expired flag
+ CMP w0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ // _tx_timer_expiration_process();
+
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+ // }
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+ // if (_tx_timer_expired_time_slice)
+ // {
+
+ LDR x3, =_tx_timer_expired_time_slice // Pickup addr of time-slice expired
+ LDR w2, [x3, #0] // Pickup the actual flag
+ CMP w2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+ // _tx_thread_time_slice();
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+ // }/
+
+__tx_timer_not_ts_expiration:
+
+ LDP x29, x30, [sp], #16 // Recover x29, x30
+ // }
+
+__tx_timer_nothing_expired:
+
+ RET // Return to caller
+
+// }
diff --git a/ports/cortex_a55/gnu/example_build/sample_threadx/.cproject b/ports/cortex_a55/gnu/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..1c32cb32
--- /dev/null
+++ b/ports/cortex_a55/gnu/example_build/sample_threadx/.cproject
@@ -0,0 +1,170 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a55/gnu/example_build/sample_threadx/.project b/ports/cortex_a55/gnu/example_build/sample_threadx/.project
new file mode 100644
index 00000000..a1b15572
--- /dev/null
+++ b/ports/cortex_a55/gnu/example_build/sample_threadx/.project
@@ -0,0 +1,26 @@
+
+
+ sample_threadx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/ports/cortex_a55/gnu/example_build/sample_threadx/GICv3.h b/ports/cortex_a55/gnu/example_build/sample_threadx/GICv3.h
new file mode 100644
index 00000000..dfe37586
--- /dev/null
+++ b/ports/cortex_a55/gnu/example_build/sample_threadx/GICv3.h
@@ -0,0 +1,561 @@
+/*
+ * GICv3.h - data types and function prototypes for GICv3 utility routines
+ *
+ * Copyright (c) 2014-2017 Arm Limited (or its affiliates). All rights reserved.
+ * Use, modification and redistribution of this file is subject to your possession of a
+ * valid End User License Agreement for the Arm Product of which these examples are part of
+ * and your compliance with all applicable terms and conditions of such licence agreement.
+ */
+#ifndef GICV3_h
+#define GICV3_h
+
+#include
+
+/*
+ * extra flags for GICD enable
+ */
+typedef enum
+{
+ gicdctlr_EnableGrp0 = (1 << 0),
+ gicdctlr_EnableGrp1NS = (1 << 1),
+ gicdctlr_EnableGrp1A = (1 << 1),
+ gicdctlr_EnableGrp1S = (1 << 2),
+ gicdctlr_EnableAll = (1 << 2) | (1 << 1) | (1 << 0),
+ gicdctlr_ARE_S = (1 << 4), /* Enable Secure state affinity routing */
+ gicdctlr_ARE_NS = (1 << 5), /* Enable Non-Secure state affinity routing */
+ gicdctlr_DS = (1 << 6), /* Disable Security support */
+ gicdctlr_E1NWF = (1 << 7) /* Enable "1-of-N" wakeup model */
+} GICDCTLRFlags_t;
+
+/*
+ * modes for SPI routing
+ */
+typedef enum
+{
+ gicdirouter_ModeSpecific = 0,
+ gicdirouter_ModeAny = (1 << 31)
+} GICDIROUTERBits_t;
+
+typedef enum
+{
+ gicdicfgr_Level = 0,
+ gicdicfgr_Edge = (1 << 1)
+} GICDICFGRBits_t;
+
+typedef enum
+{
+ gicigroupr_G0S = 0,
+ gicigroupr_G1NS = (1 << 0),
+ gicigroupr_G1S = (1 << 2)
+} GICIGROUPRBits_t;
+
+typedef enum
+{
+ gicrwaker_ProcessorSleep = (1 << 1),
+ gicrwaker_ChildrenAsleep = (1 << 2)
+} GICRWAKERBits_t;
+
+/**********************************************************************/
+
+/*
+ * Utility macros & functions
+ */
+#define RANGE_LIMIT(x) ((sizeof(x) / sizeof((x)[0])) - 1)
+
+static inline uint64_t gicv3PackAffinity(uint32_t aff3, uint32_t aff2,
+ uint32_t aff1, uint32_t aff0)
+{
+ /*
+ * only need to cast aff3 to get type promotion for all affinities
+ */
+ return ((((uint64_t)aff3 & 0xff) << 32) |
+ ((aff2 & 0xff) << 16) |
+ ((aff1 & 0xff) << 8) | aff0);
+}
+
+/**********************************************************************/
+
+/*
+ * GIC Distributor Function Prototypes
+ */
+
+/*
+ * ConfigGICD - configure GIC Distributor prior to enabling it
+ *
+ * Inputs:
+ *
+ * control - control flags
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void ConfigGICD(GICDCTLRFlags_t flags);
+
+/*
+ * EnableGICD - top-level enable for GIC Distributor
+ *
+ * Inputs:
+ *
+ * flags - new control flags to set
+ *
+ * Returns:
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void EnableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * DisableGICD - top-level disable for GIC Distributor
+ *
+ * Inputs
+ *
+ * flags - control flags to clear
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * ConfigGICD() will set an absolute flags value, whereas
+ * {En,Dis}ableGICD() will only {set,clear} the flag bits
+ * passed as a parameter
+ */
+void DisableGICD(GICDCTLRFlags_t flags);
+
+/*
+ * SyncAREinGICD - synchronise GICD Address Routing Enable bits
+ *
+ * Inputs
+ *
+ * flags - absolute flag bits to set in GIC Distributor
+ *
+ * dosync - flag whether to wait for ARE bits to match passed
+ * flag field (dosync = true), or whether to set absolute
+ * flag bits (dosync = false)
+ *
+ * Returns
+ *
+ *
+ *
+ * NOTE:
+ *
+ * This function is used to resolve a race in an MP system whereby secondary
+ * CPUs cannot reliably program all Redistributor registers until the
+ * primary CPU has enabled Address Routing. The primary CPU will call this
+ * function with dosync = false, while the secondaries will call it with
+ * dosync = true.
+ */
+void SyncAREinGICD(GICDCTLRFlags_t flags, uint32_t dosync);
+
+/*
+ * EnableSPI - enable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnableSPI(uint32_t id);
+
+/*
+ * DisableSPI - disable a specific shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisableSPI(uint32_t id);
+
+/*
+ * SetSPIPriority - configure the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetSPIPriority(uint32_t id, uint32_t priority);
+
+/*
+ * GetSPIPriority - determine the priority for a shared peripheral interrupt
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * interrupt priority in the range 0 - 0xff
+ */
+uint32_t GetSPIPriority(uint32_t id);
+
+/*
+ * SetSPIRoute - specify interrupt routing when gicdctlr_ARE is enabled
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * affinity - prepacked "dotted quad" affinity routing. NOTE: use the
+ * gicv3PackAffinity() helper routine to generate this input
+ *
+ * mode - select routing mode (specific affinity, or any recipient)
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPIRoute(uint32_t id, uint64_t affinity, GICDIROUTERBits_t mode);
+
+/*
+ * GetSPIRoute - read ARE-enabled interrupt routing information
+ *
+ * Inputs:
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * routing configuration
+ */
+uint64_t GetSPIRoute(uint32_t id);
+
+/*
+ * SetSPITarget - configure the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * target - 8-bit target bitmap
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPITarget(uint32_t id, uint32_t target);
+
+/*
+ * GetSPITarget - read the set of processor targets for an interrupt
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * 8-bit target bitmap
+ */
+uint32_t GetSPITarget(uint32_t id);
+
+/*
+ * ConfigureSPI - setup an interrupt as edge- or level-triggered
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * config - desired configuration
+ *
+ * Returns
+ *
+ *
+ */
+void ConfigureSPI(uint32_t id, GICDICFGRBits_t config);
+
+/*
+ * SetSPIPending - mark an interrupt as pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPIPending(uint32_t id);
+
+/*
+ * ClearSPIPending - mark an interrupt as not pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void ClearSPIPending(uint32_t id);
+
+/*
+ * GetSPIPending - query whether an interrupt is pending
+ *
+ * Inputs
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ * pending status
+ */
+uint32_t GetSPIPending(uint32_t id);
+
+/*
+ * SetSPISecurity - mark a shared peripheral interrupt as
+ * security
+ *
+ * Inputs
+ *
+ * id - which interrupt to mark
+ *
+ * group - the group for the interrupt
+ *
+ * Returns
+ *
+ *
+ */
+void SetSPISecurity(uint32_t id, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityBlock - mark a block of 32 shared peripheral
+ * interrupts as security
+ *
+ * Inputs:
+ *
+ * block - which block to mark (e.g. 1 = Ints 32-63)
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityBlock(uint32_t block, GICIGROUPRBits_t group);
+
+/*
+ * SetSPISecurityAll - mark all shared peripheral interrupts
+ * as security
+ *
+ * Inputs:
+ *
+ * group - the group for the interrupts
+ *
+ * Returns:
+ *
+ *
+ */
+void SetSPISecurityAll(GICIGROUPRBits_t group);
+
+/**********************************************************************/
+
+/*
+ * GIC Re-Distributor Function Prototypes
+ *
+ * The model for calling Redistributor functions is that, rather than
+ * identifying the target redistributor with every function call, the
+ * SelectRedistributor() function is used to identify which redistributor
+ * is to be used for all functions until a different redistributor is
+ * explicitly selected
+ */
+
+/*
+ * WakeupGICR - wake up a Redistributor
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to wakeup
+ *
+ * Returns:
+ *
+ *
+ */
+void WakeupGICR(uint32_t gicr);
+
+/*
+ * EnablePrivateInt - enable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to enable
+ *
+ * Returns:
+ *
+ *
+ */
+void EnablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * DisablePrivateInt - disable a private (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - which interrupt to disable
+ *
+ * Returns:
+ *
+ *
+ */
+void DisablePrivateInt(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * priority - 8-bit priority to program (see note below)
+ *
+ * Returns:
+ *
+ *
+ *
+ * Note:
+ *
+ * The GICv3 architecture makes this function sensitive to the Security
+ * context in terms of what effect it has on the programmed priority: no
+ * attempt is made to adjust for the reduced priority range available
+ * when making Non-Secure accesses to the GIC
+ */
+void SetPrivateIntPriority(uint32_t gicr, uint32_t id, uint32_t priority);
+
+/*
+ * GetPrivateIntPriority - configure the priority for a private
+ * (SGI/PPI) interrupt
+ *
+ * Inputs:
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns:
+ *
+ * Int priority
+ */
+uint32_t GetPrivateIntPriority(uint32_t gicr, uint32_t id);
+
+/*
+ * SetPrivateIntPending - mark a private (SGI/PPI) interrupt as pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *
+ */
+void SetPrivateIntPending(uint32_t gicr, uint32_t id);
+
+/*
+ * ClearPrivateIntPending - mark a private (SGI/PPI) interrupt as not pending
+ *
+ * Inputs
+ *
+ * gicr - which Redistributor to program
+ *
+ * id - interrupt identifier
+ *
+ * Returns
+ *
+ *