Release 6.1.10

This commit is contained in:
Yuxin Zhou
2022-01-29 00:24:03 +00:00
parent b216ceb25e
commit f7f0957188
3111 changed files with 495735 additions and 40800 deletions

View File

@@ -25,8 +25,8 @@
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33/AC6 */
/* 6.1.9 */
/* tx_port.h Cortex-M33 */
/* 6.1.10 */
/* */
/* AUTHOR */
/* */
@@ -43,6 +43,9 @@
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M33 files. It unifies */
/* the Cortex-M33 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
@@ -51,14 +54,20 @@
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Yuxin Zhou Modified comment(s), removed */
/* unneeded header file, */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -68,17 +77,32 @@
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
@@ -101,21 +125,17 @@ UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* This port handles stack errors. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
@@ -132,10 +152,11 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif
#endif
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
@@ -160,19 +181,19 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M33 port. */
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -211,7 +232,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -225,16 +246,35 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
@@ -249,7 +289,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
@@ -259,38 +299,39 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#ifndef TX_MISRA_ENABLE
//register unsigned int _ipsr __asm ("MRS %[result], ipsr" : [result] "=r" (_ipsr) : );
inline static unsigned int _get_ipsr(void);
inline static unsigned int _get_ipsr(void)
{
unsigned int _ipsr;
__asm("MRS %[result], ipsr" : [result] "=r" (_ipsr) : );
return _ipsr;
}
#endif
#ifdef __ARM_PCS_VFP
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
@@ -299,128 +340,128 @@ void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else
#else /* TX_MISRA_ENABLE not defined */
#ifdef TX_SOURCE_CODE
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
static unsigned int _get_control(void);
static unsigned int _get_control(void)
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
unsigned int _control;
__asm("MRS %[result], control" : [result] "=r" (_control) : );
return _control;
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
static void _set_control(unsigned int _control);
static void _set_control(unsigned int _control)
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm("MSR control, %[input]" : : [input] "r" (_control));
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif
#endif
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_set_control(_tx_vfp_state);; \
}
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
void _tx_vfp_access(void);
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_set_control(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_vfp_access(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_set_control(_tx_vfp_state); \
} \
} \
} \
}
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
@@ -445,16 +486,27 @@ void _tx_vfp_access(void);
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _get_ipsr())
#else
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif
#endif
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
@@ -472,31 +524,102 @@ extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
/* Determine if the ARM architecture has the CLZ instruction. This is available on
architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#ifndef TX_DISABLE_INLINE
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __clz(__rbit((m)));
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define ThreadX interrupt lockout and restore macros for protection on
access of critical kernel information. The restore interrupt macro must
restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
#ifdef TX_DISABLE_INLINE
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
@@ -504,41 +627,14 @@ VOID _tx_thread_interrupt_restore(UIN
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#else
#define TX_INTERRUPT_SAVE_AREA UINT was_masked;
#define TX_DISABLE was_masked = __disable_irq();
#define TX_RESTORE if (was_masked == 0) __enable_irq();
#define _tx_thread_system_return _tx_thread_system_return_inline
static void _tx_thread_system_return_inline(void)
{
unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
*((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_get_ipsr() == 0)
{
was_masked = __disable_irq();
__enable_irq();
if (was_masked != 0)
__disable_irq();
}
}
#endif
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M33/AC6 Version 6.1.9 *";
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M33 Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];

View File

@@ -42,7 +42,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/

View File

@@ -23,7 +23,7 @@
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
@@ -45,8 +45,14 @@
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
@@ -54,8 +60,14 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
@@ -63,7 +75,7 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M33/AC6 */
/* 6.1.7 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -102,12 +114,16 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
/* changed name, execute in */
/* handler mode, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
@@ -118,12 +134,26 @@ UINT status;
{
/* Set secure mode to use PSP. */
__set_CONTROL(__get_CONTROL() | 2);
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
@@ -136,7 +166,7 @@ UINT status;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M33/AC6 */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -160,9 +190,7 @@ UINT status;
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* calloc Compiler's calloc function */
/* malloc Compiler's malloc function */
/* free Compiler's free() function */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* __TZ_get_PSPLIM_NS Intrinsic to get NS PSP */
@@ -179,17 +207,22 @@ UINT status;
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack_size)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
@@ -199,23 +232,39 @@ UCHAR *stack_mem;
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
/* Allocate space for secure stack info. */
info_ptr = calloc(1, sizeof(TX_THREAD_SECURE_STACK_INFO));
if(info_ptr != TX_NULL)
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
@@ -223,13 +272,13 @@ UCHAR *stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save info pointer in thread. */
thread_ptr -> tx_thread_secure_stack_context = info_ptr;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == __TZ_get_PSPLIM_NS())
{
@@ -238,21 +287,26 @@ UCHAR *stack_mem;
__set_PSP((ULONG)(info_ptr -> tx_thread_secure_stack_ptr));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
free(info_ptr);
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
@@ -263,7 +317,7 @@ UCHAR *stack_mem;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M33/AC6 */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -298,44 +352,65 @@ UCHAR *stack_mem;
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info from thread. */
info_ptr = thread_ptr -> tx_thread_secure_stack_context;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
/* Check that this secure context is for this thread. */
else if (info_ptr -> tx_thread_ptr != thread_ptr)
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
/* Free info struct. */
free(info_ptr);
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
@@ -346,7 +421,7 @@ TX_THREAD_SECURE_STACK_INFO *info_ptr;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M33/AC6 */
/* 6.1.7 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -383,6 +458,9 @@ TX_THREAD_SECURE_STACK_INFO *info_ptr;
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -390,38 +468,45 @@ void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = (TX_THREAD_SECURE_STACK_INFO *)(thread_ptr -> tx_thread_secure_stack_context);
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
sp = __get_PSP();
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
return;
}
@@ -432,7 +517,7 @@ ULONG sp;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M33/AC6 */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -466,32 +551,42 @@ ULONG sp;
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = (TX_THREAD_SECURE_STACK_INFO *)(thread_ptr -> tx_thread_secure_stack_context);
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
__set_PSPLIM((ULONG)info_ptr -> tx_thread_secure_stack_limit);
__set_PSP ((ULONG)info_ptr -> tx_thread_secure_stack_ptr);
return;
}

View File

@@ -25,8 +25,8 @@
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33/GNU */
/* 6.1.9 */
/* tx_port.h Cortex-M33 */
/* 6.1.10 */
/* */
/* AUTHOR */
/* */
@@ -43,6 +43,9 @@
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M33 files. It unifies */
/* the Cortex-M33 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
@@ -51,15 +54,20 @@
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Modified comment(s), changed */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* to be inline functions, */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -69,17 +77,33 @@
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
/* Define ThreadX basic types for this port. */
#define VOID void
@@ -101,21 +125,17 @@ UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* This port handles stack errors. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
@@ -132,10 +152,11 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif
#endif
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
@@ -160,19 +181,19 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M33 port. */
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -211,7 +232,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -225,16 +246,35 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
/* IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context; \
VOID *tx_thread_iar_tls_pointer;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_iar_tls_pointer;
#endif
#else
/* No IAR library support */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* ThreadX in non-secure zone with calls to secure zone. */
#define TX_THREAD_EXTENSION_2 VOID *tx_thread_secure_stack_context;
#else
/* ThreadX in only one zone. */
#define TX_THREAD_EXTENSION_2
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
@@ -249,7 +289,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
@@ -259,38 +299,39 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#ifndef TX_MISRA_ENABLE
//register unsigned int _ipsr __asm ("MRS %[result], ipsr" : [result] "=r" (_ipsr) : );
inline static unsigned int _get_ipsr(void);
inline static unsigned int _get_ipsr(void)
{
unsigned int _ipsr;
__asm("MRS %[result], ipsr" : [result] "=r" (_ipsr) : );
return _ipsr;
}
#endif
#ifdef __ARM_PCS_VFP
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
@@ -299,128 +340,128 @@ void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#else
#else /* TX_MISRA_ENABLE not defined */
#ifdef TX_SOURCE_CODE
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
static inline unsigned int _get_control(void);
static inline unsigned int _get_control(void)
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
unsigned int _control;
__asm("MRS %[result], control" : [result] "=r" (_control) : );
return _control;
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
static inline void _set_control(unsigned int _control);
static inline void _set_control(unsigned int _control)
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm("MSR control, %[input]" : : [input] "r" (_control));
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif
#endif
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_set_control(_tx_vfp_state);; \
}
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
void _tx_vfp_access(void);
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_set_control(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_vfp_access(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_set_control(_tx_vfp_state); \
} \
} \
} \
}
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
@@ -445,16 +486,27 @@ void _tx_vfp_access(void);
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _get_ipsr())
#else
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif
#endif
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
@@ -472,106 +524,117 @@ extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
/* Determine if the ARM architecture has the CLZ instruction. This is available on
architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#ifndef TX_DISABLE_INLINE
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define ThreadX interrupt lockout and restore macros for protection on
access of critical kernel information. The restore interrupt macro must
restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
#ifndef TX_DISABLE_INLINE
/* Define the interrupt disable/restore macros. */
/* Define GNU specific macros, with in-line assembly for performance. */
__attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupts(void)
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
unsigned int primask_value;
__asm__ volatile (" MRS %0,PRIMASK ": "=r" (primask_value) );
__asm__ volatile (" CPSID i" : : : "memory" );
return(primask_value);
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
__attribute__( ( always_inline ) ) static inline void __restore_interrupts(unsigned int primask_value)
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile (" MSR PRIMASK,%0": : "r" (primask_value): "memory" );
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
__attribute__( ( always_inline ) ) static inline unsigned int __get_primask_value(void)
{
unsigned int primask_value;
__asm__ volatile (" MRS %0,PRIMASK ": "=r" (primask_value) );
return(primask_value);
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__asm__ volatile (" CPSIE i": : : "memory" );
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
unsigned int interrupt_save;
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_get_ipsr() == 0)
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_primask_value();
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
__restore_interrupts(interrupt_save);
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupts(interrupt_save);
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
#else
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_control(TX_INT_DISABLE);
#define TX_RESTORE _tx_thread_interrupt_control(interrupt_save);
#endif
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M33/GNU Version 6.1.9 *";
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M33 Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];

View File

@@ -42,7 +42,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/

View File

@@ -34,7 +34,7 @@ HEAP_SIZE = 0x00000000
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level Cortex-M33/GNU */
/* 6.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -68,6 +68,8 @@ HEAP_SIZE = 0x00000000
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
// VOID _tx_initialize_low_level(VOID)
@@ -218,7 +220,7 @@ _unhandled_usage_loop:
// Handle stack overflow
STR r1, [r0] // Clear CFSR flag(s)
#ifdef __ARM_PCS_VFP
#ifdef __ARM_FP
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit

View File

@@ -26,7 +26,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M33/GNU */
/* 6.1.6 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -65,6 +65,8 @@
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -86,7 +88,7 @@ _tx_thread_schedule:
LDR r2, =_tx_thread_preempt_disable // Build address of preempt disable flag
STR r0, [r2, #0] // Clear preempt disable flag
#ifdef __ARM_PCS_VFP
#ifdef __ARM_FP
/* Clear CONTROL.FPCA bit so VFP registers aren't unnecessarily stacked. */
MRS r0, CONTROL // Pickup current CONTROL register
BIC r0, r0, #4 // Clear the FPCA bit
@@ -145,7 +147,7 @@ __tx_ts_handler:
STR r3, [r0] // Set _tx_thread_current_ptr to NULL
MRS r12, PSP // Pickup PSP pointer (thread's stack pointer)
STMDB r12!, {r4-r11} // Save its remaining registers
#ifdef __ARM_PCS_VFP
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_save
VSTMDB r12!,{s16-s31} // Yes, save additional VFP registers
@@ -230,7 +232,7 @@ _skip_secure_restore:
MSR PSPLIM, r12 // Set stack limit
LDR r12, [r1, #8] // Pickup thread's stack pointer
LDMIA r12!, {LR} // Pickup LR
#ifdef __ARM_PCS_VFP
#ifdef __ARM_FP
TST LR, #0x10 // Determine if the VFP extended frame is present
BNE _skip_vfp_restore // If not, skip VFP restore
VLDMIA r12!, {s16-s31} // Yes, restore additional VFP registers

View File

@@ -23,7 +23,7 @@
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
@@ -44,8 +44,14 @@
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
@@ -53,8 +59,14 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
@@ -62,7 +74,7 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M33/GNU */
/* 6.1.7 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -98,6 +110,9 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
/* handler mode, */
/* disable optimizations, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry, optimize(0)))
@@ -106,6 +121,7 @@ UINT _tx_thread_secure_mode_stack_initialize(void)
UINT status;
ULONG control;
ULONG ipsr;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
@@ -119,12 +135,26 @@ ULONG ipsr;
asm volatile("MRS %0, CONTROL" : "=r" (control)); /* Get CONTROL register. */
control |= 2; /* Use PSP. */
asm volatile("MSR CONTROL, %0" :: "r" (control)); /* Set CONTROL register. */
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
asm volatile("MSR PSPLIM, %0" :: "r" (0));
asm volatile("MSR PSP, %0" :: "r" (0));
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
@@ -137,7 +167,7 @@ ULONG ipsr;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M33/GNU */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -160,9 +190,7 @@ ULONG ipsr;
/* */
/* CALLS */
/* */
/* calloc Compiler's calloc function */
/* malloc Compiler's malloc function */
/* free Compiler's free() function */
/* */
/* CALLED BY */
/* */
@@ -176,6 +204,9 @@ ULONG ipsr;
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -186,9 +217,10 @@ TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
ULONG ipsr;
ULONG psplim_ns;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
@@ -199,23 +231,38 @@ ULONG psplim_ns;
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
/* Allocate space for secure stack info. */
info_ptr = calloc(1, sizeof(TX_THREAD_SECURE_STACK_INFO));
if(info_ptr != TX_NULL)
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
@@ -223,13 +270,13 @@ ULONG psplim_ns;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save info pointer in thread. */
thread_ptr -> tx_thread_secure_stack_context = info_ptr;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
asm volatile("MRS %0, PSPLIM_NS" : "=r" (psplim_ns)); /* Get PSPLIM_NS register. */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == psplim_ns)
@@ -239,21 +286,26 @@ ULONG psplim_ns;
asm volatile("MSR PSP, %0" :: "r" ((ULONG)(info_ptr -> tx_thread_secure_stack_ptr)));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
free(info_ptr);
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
@@ -264,7 +316,7 @@ ULONG psplim_ns;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M33/GNU */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -298,46 +350,67 @@ ULONG psplim_ns;
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG ipsr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info from thread. */
info_ptr = thread_ptr -> tx_thread_secure_stack_context;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
if (ipsr == 0)
{
status = TX_CALLER_ERROR;
}
/* Check that this secure context is for this thread. */
else if (info_ptr -> tx_thread_ptr != thread_ptr)
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
/* Free info struct. */
free(info_ptr);
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
@@ -348,7 +421,7 @@ ULONG ipsr;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M33/GNU */
/* 6.1.7 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -382,6 +455,9 @@ ULONG ipsr;
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -390,6 +466,7 @@ void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
ULONG ipsr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
@@ -397,32 +474,38 @@ ULONG ipsr;
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = (TX_THREAD_SECURE_STACK_INFO *)(thread_ptr -> tx_thread_secure_stack_context);
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
asm volatile("MRS %0, PSP" : "=r" (sp)); /* Get PSP register. */
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
asm volatile("MSR PSPLIM, %0" :: "r" (0));
asm volatile("MSR PSP, %0" :: "r" (0));
return;
}
@@ -433,7 +516,7 @@ ULONG ipsr;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M33/GNU */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -465,6 +548,9 @@ ULONG ipsr;
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -472,6 +558,7 @@ void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG ipsr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
asm volatile("MRS %0, IPSR" : "=r" (ipsr)); /* Get IPSR register. */
@@ -479,20 +566,26 @@ ULONG ipsr;
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = (TX_THREAD_SECURE_STACK_INFO *)(thread_ptr -> tx_thread_secure_stack_context);
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
asm volatile("MSR PSPLIM, %0" :: "r" ((ULONG)info_ptr -> tx_thread_secure_stack_limit));
asm volatile("MSR PSP, %0" :: "r" ((ULONG)info_ptr -> tx_thread_secure_stack_ptr));
return;
}

View File

@@ -25,8 +25,8 @@
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33/IAR */
/* 6.1.9 */
/* tx_port.h Cortex-M33 */
/* 6.1.10 */
/* */
/* AUTHOR */
/* */
@@ -43,6 +43,9 @@
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* This file replaces the previous Cortex-M33 files. It unifies */
/* the Cortex-M33 compilers into one common file. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
@@ -51,13 +54,20 @@
/* 03-02-2021 Scott Larson Modified comment(s), added */
/* ULONG64_DEFINED, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Yuxin Zhou Modified comment(s), and */
/* 06-02-2021 Scott Larson Modified comment(s), removed */
/* unneeded header file, funcs */
/* set_control and get_control */
/* changed to inline, */
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
@@ -67,19 +77,30 @@
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif
#endif /* TX_INCLUDE_USER_DEFINE_FILE */
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
#include <intrinsics.h>
#ifdef __ICCARM__
#include <intrinsics.h> /* IAR Intrinsics */
#define __asm__ __asm /* Define to make all inline asm from each compiler look similar */
#define _tx_control_get __get_CONTROL
#define _tx_control_set __set_CONTROL
#define _tx_ipsr_get __get_IPSR
#ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
#include <yvals.h>
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#endif /* __ICCARM__ */
#ifdef __ARMCOMPILER_VERSION
#include <arm_compat.h>
#endif
@@ -104,21 +125,17 @@ UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* This port handles stack errors. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
/* Define the system API mappings based on the error checking
selected by the user. Note: this section is only applicable to
application source code, hence the conditional that turns off this
stuff when the include file is processed by the ThreadX source. */
#ifndef TX_SOURCE_CODE
/* Determine if error checking is desired. If so, map API functions
/* Determine if error checking is desired. If so, map API functions
to the appropriate error checking front-ends. Otherwise, map API
functions to the core functions that actually perform the work.
functions to the core functions that actually perform the work.
Note: error checking is enabled by default. */
#ifdef TX_DISABLE_ERROR_CHECKING
@@ -135,10 +152,11 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
#define tx_thread_secure_stack_free _txe_thread_secure_stack_free
#endif
#endif
#endif /* TX_DISABLE_ERROR_CHECKING */
#endif /* TX_SOURCE_CODE */
/* This port has a usage fault handler in _tx_initialize_low_level for stack exceptions. */
#define TX_PORT_THREAD_STACK_ERROR_HANDLING
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
@@ -163,19 +181,19 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX Cortex-M33 port. */
/* Define various constants for the ThreadX Cortex-M port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -214,7 +232,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -228,7 +246,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
@@ -255,10 +273,11 @@ ULONG _tx_misra_time_stamp_get(VOID);
#define TX_THREAD_EXTENSION_2
#endif
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
@@ -270,7 +289,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
@@ -284,34 +303,35 @@ ULONG _tx_misra_time_stamp_get(VOID);
void *_tx_iar_create_per_thread_tls_area(void);
void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
void __iar_Initlocks(void);
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
#endif
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
#else
/* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#else /* No IAR library support. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
#else
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#endif
#endif
#endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
/* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
#define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
#endif
#ifdef __ARMVFP__
#if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__)
#ifdef TX_MISRA_ENABLE
@@ -320,107 +340,128 @@ void _tx_misra_control_set(ULONG value);
ULONG _tx_misra_fpccr_get(void);
void _tx_misra_vfp_touch(void);
#endif
#else /* TX_MISRA_ENABLE not defined */
#ifdef __GNUC__ /* GCC and ARM Compiler 6 */
__attribute__( ( always_inline ) ) static inline ULONG _tx_control_get(void)
{
ULONG control_value;
__asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
return(control_value);
}
__attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG control_value)
{
__asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
}
#endif /* __GNUC__ */
/* Touch VFP register in order to flush. Works for AC6/GCC/IAR compilers. */
#define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
#endif /* TX_MISRA_ENABLE */
/* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
in order to ensure no lazy stacking will occur. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = __get_CONTROL(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
__set_CONTROL(_tx_vfp_state); \
}
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
}
#else
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
}
#endif
/* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
the lazy FPU save, then restore the CONTROL.FPCA state. */
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = __get_CONTROL(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
__set_CONTROL(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = __get_CONTROL(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
__asm volatile ("vmov.f32 s0, s0"); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = __get_CONTROL(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
__set_CONTROL(_tx_vfp_state); \
} \
} \
} \
}
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = *((ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_control_set(_tx_vfp_state); \
} \
} \
} \
}
#else
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
ULONG _tx_system_state; \
_tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
_tx_fpccr = _tx_misra_fpccr_get(); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_misra_vfp_touch(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
_tx_vfp_state = _tx_misra_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
_tx_misra_control_set(_tx_vfp_state); \
} \
} \
} \
}
#endif
#else
#else /* No VFP in use */
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
#endif
#endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__ARM_FP) || defined(__TARGET_FPU_VFP) || defined(__VFP__) */
/* Define the ThreadX object creation extensions for the remaining objects. */
@@ -445,16 +486,27 @@ void _tx_misra_vfp_touch(void);
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Define the get system state macro. */
/* Define the get system state macro. */
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_IPSR())
#else
#if defined(__GNUC__) /* GCC and AC6 */
__attribute__( ( always_inline ) ) static inline UINT _tx_ipsr_get(void)
{
UINT ipsr_value;
__asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
return(ipsr_value);
}
#endif /* GCC and AC6 IPSR_get function. */
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_ipsr_get())
#else /* TX_MISRA_ENABLE is defined, use MISRA function. */
ULONG _tx_misra_ipsr_get(VOID);
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
#endif
#endif
#endif /* TX_MISRA_ENABLE */
#endif /* TX_THREAD_GET_SYSTEM_STATE */
/* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
@@ -472,31 +524,102 @@ extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
/* Determine if the ARM architecture has the CLZ instruction. This is available on
architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#ifndef TX_DISABLE_INLINE
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT)__CLZ(__RBIT((m)));
/* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
#ifdef __ICCARM__ /* IAR Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
#elif defined(__GNUC__) /* GCC and AC6 Compiler */
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
__asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
#else
#error "Compiler not supported."
#endif
/* Define ThreadX interrupt lockout and restore macros for protection on
access of critical kernel information. The restore interrupt macro must
restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
#ifdef TX_DISABLE_INLINE
/* Define the interrupt disable/restore macros. */
__attribute__( ( always_inline ) ) static inline UINT __get_interrupt_posture(void)
{
UINT posture;
#ifdef TX_PORT_USE_BASEPRI
__asm__ volatile ("MRS %0, BASEPRI ": "=r" (posture));
#else
__asm__ volatile ("MRS %0, PRIMASK ": "=r" (posture));
#endif
return(posture);
}
#ifdef TX_PORT_USE_BASEPRI
__attribute__( ( always_inline ) ) static inline void __set_basepri_value(UINT basepri_value)
{
__asm__ volatile ("MSR BASEPRI,%0 ": : "r" (basepri_value));
}
#else
__attribute__( ( always_inline ) ) static inline void __enable_interrupts(void)
{
__asm__ volatile ("CPSIE i": : : "memory");
}
#endif
__attribute__( ( always_inline ) ) static inline void __restore_interrupt(UINT int_posture)
{
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(int_posture);
#else
__asm__ volatile ("MSR PRIMASK,%0": : "r" (int_posture): "memory");
#endif
}
__attribute__( ( always_inline ) ) static inline UINT __disable_interrupts(void)
{
UINT int_posture;
int_posture = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(TX_PORT_BASEPRI);
#else
__asm__ volatile ("CPSID i" : : : "memory");
#endif
return(int_posture);
}
__attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
{
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
#ifdef TX_PORT_USE_BASEPRI
__set_basepri_value(0);
#else
__enable_interrupts();
#endif
__restore_interrupt(interrupt_save);
}
}
#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
#define TX_DISABLE interrupt_save = __disable_interrupts();
#define TX_RESTORE __restore_interrupt(interrupt_save);
/* Redefine _tx_thread_system_return for improved performance. */
#define _tx_thread_system_return _tx_thread_system_return_inline
#else /* TX_DISABLE_INLINE is defined */
UINT _tx_thread_interrupt_disable(VOID);
VOID _tx_thread_interrupt_restore(UINT previous_posture);
@@ -504,49 +627,14 @@ VOID _tx_thread_interrupt_restore(UIN
#define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
#else
#define TX_INTERRUPT_SAVE_AREA __istate_t interrupt_save;
#define TX_DISABLE {interrupt_save = __get_interrupt_state();__disable_interrupt();};
#define TX_RESTORE {__set_interrupt_state(interrupt_save);};
#define _tx_thread_system_return _tx_thread_system_return_inline
static void _tx_thread_system_return_inline(void)
{
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
*((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
__enable_interrupt();
__set_interrupt_state(interrupt_save);
}
}
#endif
/* Define the interrupt lockout macros for each ThreadX object. */
#define TX_BLOCK_POOL_DISABLE TX_DISABLE
#define TX_BYTE_POOL_DISABLE TX_DISABLE
#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
#define TX_MUTEX_DISABLE TX_DISABLE
#define TX_QUEUE_DISABLE TX_DISABLE
#define TX_SEMAPHORE_DISABLE TX_DISABLE
#endif /* TX_DISABLE_INLINE */
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M33/IAR Version 6.1.9 *";
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M33 Version 6.1.10 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];

View File

@@ -42,7 +42,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* */
/**************************************************************************/

View File

@@ -23,7 +23,7 @@
#include "tx_api.h"
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
/* If TX_SINGLE_MODE_SECURE or TX_SINGLE_MODE_NON_SECURE is defined,
no secure stack functionality is needed. */
#if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
@@ -45,8 +45,14 @@
#define TX_THREAD_STACK_SEAL_SIZE 8
#define TX_THREAD_STACK_SEAL_VALUE 0xFEF5EDA5
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
/* max number of Secure context */
#ifndef TX_MAX_SECURE_CONTEXTS
#define TX_MAX_SECURE_CONTEXTS 32
#endif
#define TX_INVALID_SECURE_CONTEXT_IDX (-1)
/* Secure stack info struct to hold stack start, stack limit,
current stack pointer, and pointer to owning thread.
This will be allocated for each thread with a secure stack. */
typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
{
@@ -54,8 +60,14 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
VOID *tx_thread_secure_stack_start; /* Thread's secure stack start address */
VOID *tx_thread_secure_stack_limit; /* Thread's secure stack limit */
TX_THREAD *tx_thread_ptr; /* Keep track of thread for error handling */
INT tx_next_free_index; /* Next free index of free secure context */
} TX_THREAD_SECURE_STACK_INFO;
/* Static secure contexts */
static TX_THREAD_SECURE_STACK_INFO tx_thread_secure_context[TX_MAX_SECURE_CONTEXTS];
/* Head of free secure context */
static INT tx_head_free_index = 0U;
/**************************************************************************/
@@ -63,7 +75,7 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_initialize Cortex-M33/IAR */
/* 6.1.7 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -101,12 +113,16 @@ typedef struct TX_THREAD_SECURE_STACK_INFO_STRUCT
/* 06-02-2021 Scott Larson Change name, execute in */
/* handler mode, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
UINT _tx_thread_secure_mode_stack_initialize(void)
{
UINT status;
INT index;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
@@ -117,12 +133,26 @@ UINT status;
{
/* Set secure mode to use PSP. */
__set_CONTROL(__get_CONTROL() | 2);
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
for (index = 0; index < TX_MAX_SECURE_CONTEXTS; index++)
{
/* Check last index and mark next free to invalid index */
if(index == (TX_MAX_SECURE_CONTEXTS - 1))
{
tx_thread_secure_context[index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
tx_thread_secure_context[index].tx_next_free_index = index + 1;
}
}
status = TX_SUCCESS;
}
return status;
@@ -135,7 +165,7 @@ UINT status;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_allocate Cortex-M33/IAR */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -159,9 +189,7 @@ UINT status;
/* CALLS */
/* */
/* __get_IPSR Intrinsic to get IPSR */
/* calloc Compiler's calloc function */
/* malloc Compiler's malloc function */
/* free Compiler's free() function */
/* __set_PSPLIM Intrinsic to set PSP limit */
/* __set_PSP Intrinsic to set PSP */
/* __TZ_get_PSPLIM_NS Intrinsic to get NS PSP */
@@ -178,6 +206,9 @@ UINT status;
/* 10-16-2020 Scott Larson Modified comment(s), */
/* added stack sealing, */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -186,9 +217,10 @@ UINT _tx_thread_secure_mode_stack_allocate(TX_THREAD *thread_ptr, ULONG stack
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
UCHAR *stack_mem;
INT secure_context_index;
status = TX_SUCCESS;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
@@ -198,23 +230,38 @@ UCHAR *stack_mem;
{
status = TX_SIZE_ERROR;
}
/* Check if thread already has secure stack allocated. */
else if (thread_ptr -> tx_thread_secure_stack_context != 0)
{
status = TX_THREAD_ERROR;
}
else
{
/* Allocate space for secure stack info. */
info_ptr = calloc(1, sizeof(TX_THREAD_SECURE_STACK_INFO));
if(info_ptr != TX_NULL)
TX_DISABLE
/* Allocate free index for secure stack info. */
if(tx_head_free_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
secure_context_index = tx_head_free_index;
tx_head_free_index = tx_thread_secure_context[tx_head_free_index].tx_next_free_index;
tx_thread_secure_context[secure_context_index].tx_next_free_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
else
{
secure_context_index = TX_INVALID_SECURE_CONTEXT_IDX;
}
TX_RESTORE
if(secure_context_index != TX_INVALID_SECURE_CONTEXT_IDX)
{
info_ptr = &tx_thread_secure_context[secure_context_index];
/* If stack info allocated, allocate a stack & seal. */
stack_mem = malloc(stack_size + TX_THREAD_STACK_SEAL_SIZE);
if(stack_mem != TX_NULL)
{
/* Secure stack has been allocated, save in the stack info struct. */
@@ -222,13 +269,13 @@ UCHAR *stack_mem;
info_ptr -> tx_thread_secure_stack_start = stack_mem + stack_size;
info_ptr -> tx_thread_secure_stack_ptr = info_ptr -> tx_thread_secure_stack_start;
info_ptr -> tx_thread_ptr = thread_ptr;
/* Seal bottom of stack. */
*(ULONG*)info_ptr -> tx_thread_secure_stack_start = TX_THREAD_STACK_SEAL_VALUE;
/* Save info pointer in thread. */
thread_ptr -> tx_thread_secure_stack_context = info_ptr;
/* Save secure context id (i.e non-zero base index) in thread. */
thread_ptr -> tx_thread_secure_stack_context = (VOID *)(secure_context_index + 1);
/* Check if this thread is running by looking at its stack start and PSPLIM_NS */
if(((ULONG) thread_ptr -> tx_thread_stack_start & 0xFFFFFFF8) == __TZ_get_PSPLIM_NS())
{
@@ -237,21 +284,26 @@ UCHAR *stack_mem;
__set_PSP((ULONG)(info_ptr -> tx_thread_secure_stack_ptr));
}
}
else
{
TX_DISABLE
/* Stack not allocated, free the info struct. */
free(info_ptr);
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
status = TX_NO_MEMORY;
}
}
else
{
status = TX_NO_MEMORY;
}
}
return(status);
}
@@ -262,7 +314,7 @@ UCHAR *stack_mem;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_mode_stack_free Cortex-M33/IAR */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -297,6 +349,9 @@ UCHAR *stack_mem;
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -304,37 +359,54 @@ UINT _tx_thread_secure_mode_stack_free(TX_THREAD *thread_ptr)
{
UINT status;
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index;
status = TX_SUCCESS;
/* Pickup stack info from thread. */
info_ptr = thread_ptr -> tx_thread_secure_stack_context;
/* Pickup stack info id from thread. */
secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* Make sure function is called from interrupt (threads should not call). */
if (__get_IPSR() == 0)
{
status = TX_CALLER_ERROR;
}
/* Check that this secure context is for this thread. */
else if (info_ptr -> tx_thread_ptr != thread_ptr)
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
/* Free info struct. */
free(info_ptr);
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
/* Pickup stack info from static array of secure contexts. */
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
status = TX_THREAD_ERROR;
}
else
{
/* Free secure stack. */
free(info_ptr -> tx_thread_secure_stack_limit);
TX_DISABLE
/* Free info struct. */
tx_thread_secure_context[secure_context_index].tx_next_free_index = tx_head_free_index;
tx_head_free_index = secure_context_index;
TX_RESTORE
/* Clear secure context from thread. */
thread_ptr -> tx_thread_secure_stack_context = 0;
}
}
return(status);
}
@@ -345,7 +417,7 @@ TX_THREAD_SECURE_STACK_INFO *info_ptr;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_save Cortex-M33/IAR */
/* 6.1.7 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -382,6 +454,9 @@ TX_THREAD_SECURE_STACK_INFO *info_ptr;
/* resulting in version 6.1.1 */
/* 06-02-2021 Scott Larson Fix stack pointer save, */
/* resulting in version 6.1.7 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
@@ -389,38 +464,45 @@ void _tx_thread_secure_stack_context_save(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
ULONG sp;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = (TX_THREAD_SECURE_STACK_INFO *)(thread_ptr -> tx_thread_secure_stack_context);
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Check that stack pointer is in range */
sp = __get_PSP();
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
if ((sp < (ULONG)info_ptr -> tx_thread_secure_stack_limit) ||
(sp > (ULONG)info_ptr -> tx_thread_secure_stack_start))
{
return;
}
/* Save stack pointer. */
info_ptr -> tx_thread_secure_stack_ptr = (VOID *) sp;
/* Set process stack pointer and stack limit to 0 to throw exception when a thread
without a secure stack calls a secure function that tries to use secure stack. */
__set_PSPLIM(0);
__set_PSP(0);
return;
}
@@ -431,7 +513,7 @@ ULONG sp;
/* FUNCTION RELEASE */
/* */
/* _tx_thread_secure_stack_context_restore Cortex-M33/IAR */
/* 6.1.1 */
/* 6.1.10 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -465,32 +547,42 @@ ULONG sp;
/* 09-30-2020 Scott Larson Initial Version 6.1 */
/* 10-16-2020 Scott Larson Modified comment(s), */
/* resulting in version 6.1.1 */
/* 01-31-2022 Himanshu Gupta Modified comments(s), updated */
/* secure stack allocation, */
/* resulting in version 6.1.10 */
/* */
/**************************************************************************/
__attribute__((cmse_nonsecure_entry))
void _tx_thread_secure_stack_context_restore(TX_THREAD *thread_ptr)
{
TX_THREAD_SECURE_STACK_INFO *info_ptr;
INT secure_context_index = (INT)thread_ptr -> tx_thread_secure_stack_context - 1;
/* This function should be called from scheduler only. */
if (__get_IPSR() == 0)
{
return;
}
/* Check if secure context index is in valid range. */
else if (secure_context_index < 0 || secure_context_index >= TX_MAX_SECURE_CONTEXTS)
{
return;
}
/* Pickup the secure context pointer. */
info_ptr = (TX_THREAD_SECURE_STACK_INFO *)(thread_ptr -> tx_thread_secure_stack_context);
info_ptr = &tx_thread_secure_context[secure_context_index];
/* Check that this secure context is for this thread. */
if (info_ptr -> tx_thread_ptr != thread_ptr)
{
return;
}
/* Set stack pointer and limit. */
__set_PSPLIM((ULONG)info_ptr -> tx_thread_secure_stack_limit);
__set_PSP ((ULONG)info_ptr -> tx_thread_secure_stack_ptr);
return;
}