Release 6.1.11

This commit is contained in:
Yuxin Zhou
2022-04-20 05:07:02 +00:00
parent f851772ce0
commit cef9cb22a5
784 changed files with 57616 additions and 101103 deletions

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -131,7 +131,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1275109624" superClass="com.arm.tool.c.compiler.v6.base.input"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.1275455284" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a53" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a53" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a53" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1409269723" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a55" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a55" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a55" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1630359210" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a57" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a57" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a57" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.153404379" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A5x-SMP/AC6 */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.9 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -79,7 +79,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A5x-SMP/AC6 */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.9 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A5x-SMP/AC6 */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.9 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A5x-SMP/GCC */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.9 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -79,7 +79,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A5x-SMP/GCC */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.9 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A5x-SMP/GCC */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,9 +60,12 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* resulting in version 6.1.9 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 10-15-2021 Andres Mlinar Updated comments, */
/* resulting in version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -104,11 +107,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -21,17 +21,6 @@
/**************************************************************************/
/* #define TX_SOURCE_CODE */
/* Include necessary system files. */
/*
#include "tx_api.h"
#include "tx_thread.h"
#include "tx_timer.h"
*/
EXTERN _tx_thread_system_state
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_execute_ptr
@@ -54,15 +43,12 @@
SECTION `.text`:CODE:NOROOT(3)
CODE
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A5x-SMP/IAR */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -95,10 +81,13 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 William E. Lamie Initial Version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
/* VOID _tx_thread_context_restore(VOID)
{ */
// VOID _tx_thread_context_restore(VOID)
// {
PUBLIC _tx_thread_context_restore
_tx_thread_context_restore:
@@ -106,7 +95,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -132,8 +121,8 @@ _tx_thread_context_restore:
#endif
/* Determine if interrupts are nested. */
/* if (--_tx_thread_system_state)
{ */
// if (--_tx_thread_system_state)
// {
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, x8, LSL #2] // Pickup system state
@@ -173,13 +162,13 @@ _tx_thread_context_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
/* } */
// }
__tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
/* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
|| (_tx_thread_preempt_disable))
{ */
// else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
// || (_tx_thread_preempt_disable))
// {
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, x8, LSL #3] // Pickup actual current thread pointer
@@ -205,7 +194,7 @@ __tx_thread_no_preempt_restore:
/* Restore interrupted thread or ISR. */
/* Pickup the saved stack pointer. */
/* sp = _tx_thread_current_ptr -> tx_thread_stack_ptr; */
// sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -238,80 +227,11 @@ __tx_thread_no_preempt_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
/* }
else
{ */
// }
// else
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
/* if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
{ */
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
B.EQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
/* if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
{ */
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
B.EQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
/* _tx_thread_smp_protect_wait_list_remove(this_core); */
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
/* }
else
{ */
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
/* _tx_thread_smp_protect_wait_counts[core]--; */
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
/* _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF; */
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
/* _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0; */
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
/* }
} */
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -358,27 +278,27 @@ _skip_fp_save:
/* Save the remaining time-slice and disable it. */
/* if (_tx_timer_time_slice)
{ */
// if (_tx_timer_time_slice)
// {
LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
LDR w2, [x3, x8, LSL #2] // Pickup time-slice
CMP w2, #0 // Is it active?
B.EQ __tx_thread_dont_save_ts // No, don't save it
/* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
_tx_timer_time_slice = 0; */
// _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
// _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
STR w2, [x3, x8, LSL #2] // Disable global time-slice flag
/* } */
// }
__tx_thread_dont_save_ts:
/* Clear the current task pointer. */
/* _tx_thread_current_ptr = TX_NULL; */
// _tx_thread_current_ptr = TX_NULL;
MOV x2, #0 // NULL value
STR x2, [x1, x8, LSL #3] // Clear current thread pointer
@@ -386,13 +306,13 @@ __tx_thread_dont_save_ts:
/* Set bit indicating this thread is ready for execution. */
MOV x2, #1 // Build ready flag
DMB ISH // Ensure that accesses to shared resource have completed
STR w2, [x0, #260] // Set thread's ready flag
DMB ISH // Ensure that accesses to shared resource have completed
/* Return to the scheduler. */
/* _tx_thread_schedule(); */
// _tx_thread_schedule();
/* } */
// }
__tx_thread_idle_system_restore:
@@ -415,5 +335,5 @@ __tx_thread_idle_system_restore:
#endif
#endif
ERET // Return to scheduler
/* } */
// }
END

View File

@@ -45,15 +45,13 @@
SECTION `.text`:CODE:NOROOT(3)
CODE
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A5x-SMP/IAR */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -85,6 +83,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 William E. Lamie Initial Version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
PUBLIC _tx_thread_smp_protect
@@ -97,295 +98,51 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
/* if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
{ */
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
B.NE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
/* _tx_thread_smp_protection.tx_thread_smp_protect_count++; */
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
/* if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
{ */
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
B.NE _start_waiting // No, protection not available
/* Is the list empty? */
/* if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
{ */
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
B.NE _list_not_empty
/* Try to get the lock. */
/* if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
{ */
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
B.NE _start_waiting // Did it fail?
/* We got the lock! */
/* _tx_thread_smp_protect_lock_got(); */
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
/* if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
{ */
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
B.NE _start_waiting
/* Is the lock still available? */
/* if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
{ */
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
B.NE _start_waiting // No, protection not available
/* Get the lock. */
/* _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1; */
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
B.NE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
/* _tx_thread_smp_protect_lock_got(); */
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
/* _tx_thread_smp_protect_remove_from_front_of_list(); */
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
/* _tx_thread_smp_protect_wait_counts[this_core]++; */
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
/* if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
{ */
CMP w4, #1
B.NE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
/* _tx_thread_smp_protect_wait_list_add(this_core); */
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* } */
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
B.EQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
/* while (1)
{ */
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
/* if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
{ */
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
B.EQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
/* } */
/* Are we at the front of the list? */
/* if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
{ */
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
B.NE _did_not_get_lock
/* Is the lock still available? */
/* if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
{ */
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
B.NE _did_not_get_lock // No, protection not available
/* Get the lock. */
/* _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1; */
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
B.NE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
/* _tx_thread_smp_protect_lock_got(); */
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
/* _tx_thread_smp_protect_remove_from_front_of_list(); */
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
/* if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
{ */
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
B.NE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
/* _tx_thread_smp_protect_wait_list_add(this_core); */
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
/* _tx_thread_smp_protect_wait_counts[this_core]++; */
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* } */
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
/* _tx_thread_smp_protect_wait_counts[this_core]--; */
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET
END

View File

@@ -21,19 +21,6 @@
/**************************************************************************/
/*
#define TX_SOURCE_CODE
#define TX_THREAD_SMP_SOURCE_CODE
*/
/* Include necessary system files. */
/*
#include "tx_api.h"
#include "tx_thread.h"
#include "tx_timer.h"
*/
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_smp_protection
EXTERN _tx_thread_preempt_disable
@@ -46,7 +33,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A5x-SMP/IAR */
/* 6.1.9 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -79,6 +66,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 William E. Lamie Initial Version 6.1.9 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
PUBLIC _tx_thread_smp_unprotect
@@ -119,11 +109,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
B.NE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
B.NE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1006193665" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1782798207" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a72" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a72" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a72" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1836219464" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a73" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a73" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a73" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.275693039" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a75" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a75" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a75" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1189619687" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -23,7 +23,7 @@
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<configuration artifactExtension="axf" artifactName="${ProjName}" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="clean" description="" errorParsers="org.eclipse.cdt.core.GmakeErrorParser;com.arm.eclipse.builder.armcc.error;org.eclipse.cdt.core.CWDLocator" id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" name="Debug" parent="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142" postannouncebuildStep="" postbuildStep="" preannouncebuildStep="" prebuildStep="">
<folderInfo id="com.arm.eclipse.build.config.v6.exe.debug.base.var.arm_compiler_6-6.4.1908576142." name="/" resourcePath="">
@@ -47,7 +47,7 @@
<option id="com.arm.tool.c.compiler.v6.base.options.debug.level.1747478037" name="Debug Level" superClass="com.arm.tool.c.compiler.v6.base.options.debug.level" useByScannerDiscovery="true" value="com.arm.tool.c.compiler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a76" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1133188329" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.c.compiler.v6.base.input.2103612806" superClass="com.arm.tool.c.compiler.v6.base.input"/>
@@ -63,7 +63,7 @@
<option id="com.arm.tool.assembler.v6.base.options.debug.level.2040227767" name="Debug Level" superClass="com.arm.tool.assembler.v6.base.options.debug.level" useByScannerDiscovery="false" value="com.arm.tool.assembler.v6.base.options.debug.level.std" valueType="enumerated"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a76" valueType="string"/>
<option id="com.arm.tool.assembler.v6.base.option.cpu.143750331" name="CPU (-mcpu)" superClass="com.arm.tool.assembler.v6.base.option.cpu" useByScannerDiscovery="false" value="cortex-a35" valueType="string"/>
<inputType id="com.arm.tool.assembler.v6.base.input.1316842528" superClass="com.arm.tool.assembler.v6.base.input"/>

View File

@@ -49,7 +49,7 @@
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6">
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a76" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.option.cpu.1024278185" name="CPU (-mcpu)" superClass="com.arm.tool.c.compiler.v6.base.option.cpu" useByScannerDiscovery="true" value="cortex-a35" valueType="string"/>
<option id="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings.830475099" name="Enable tool specific settings" superClass="com.arm.tool.c.compiler.v6.base.options.preproc.enableToolSpecificSettings" useByScannerDiscovery="false" value="true" valueType="boolean"/>
@@ -129,11 +129,7 @@
<fileInfo id="com.arm.eclipse.build.config.v6.lib.debug.base.1470528944.1145515026" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128">
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428">
<inputType id="com.arm.tool.c.compiler.v6.base.input.1616307318" superClass="com.arm.tool.c.compiler.v6.base.input"/>
</tool>
<tool id="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428.1242981128" name="Arm C Compiler 6" superClass="com.arm.tool.c.compiler.v6.base.var.arm_compiler_6-6.2072709428"/>
</fileInfo>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
LDR w3, [x2, x1, LSL #2] // Pickup wait list value
CMP w3, #0 // Are any entities on this core waiting?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid

View File

@@ -129,6 +129,12 @@
</folderInfo>
<fileInfo id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.config.exe.debug.var.gcc-8.3.0-aarch64-elf.794347674.1482422597" name="tx_misra.c" rcbsApplicability="disable" resourcePath="src_generic/tx_misra.c" toolsToInvoke="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610">
<tool id="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701.1451532610" name="GCC C Compiler 8.3.0 [aarch64-elf]" superClass="com.arm.eclipse.cdt.managedbuild.ds5.gcc.baremetal.tool.c.compiler.var.gcc-8.3.0-aarch64-elf.1247168701"/>
</fileInfo>
<sourceEntries>
<entry flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>

View File

@@ -21,9 +21,6 @@
/**************************************************************************/
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
/* Was the thread being preempted waiting for the lock? */
// if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
// {
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
CMP w3, #0
BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
/* Do we not have the lock? This means the ISR never got the inter-core lock. */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
// {
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w8, w3 // Compare our core to the owning core
BEQ _this_core_has_lock // Do we have the lock?
/* We don't have the lock. This core should be in the list. Remove it. */
// _tx_thread_smp_protect_wait_list_remove(this_core);
_tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
B _nobody_waiting_for_lock // Leave
// }
// else
// {
/* We have the lock. This means the ISR got the inter-core lock, but
never released it because it saw that there was someone waiting.
Note this core is not in the list. */
_this_core_has_lock:
/* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
SUB w3, w3, #1 // Decrement waiting value. Should be zero now
STR w3, [x2, x8, LSL #2] // Store new waiting value
/* Now release the inter-core lock. */
/* Set protected core as invalid. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
LDR x2, =_tx_thread_smp_protection // Load address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
/* Release protection. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
/* Wake up waiting processors. Note interrupts are already enabled. */
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs
#endif
// }
// }
_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //

View File

@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.1.10 */
/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR x2, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it not this core?
BNE _protection_not_owned // No, the protection is not already owned
/* We already have protection. */
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
B _return
_protection_not_owned:
/* Is the lock available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Is the list empty? */
// if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head
LDR w3, [x3]
LDR x4, =_tx_thread_smp_protect_wait_list_tail
LDR w4, [x4]
CMP w3, w4
BNE _list_not_empty
/* Try to get the lock. */
// if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
// {
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
/* We got the lock! */
// _tx_thread_smp_protect_lock_got();
DMB ISH // Ensure write to protection finishes
_tx_thread_smp_protect_lock_got // Call the lock got function
B _return
_list_not_empty:
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _start_waiting
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _start_waiting // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _start_waiting // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
/* For one reason or another, we didn't get the lock. */
/* Increment wait count. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value
/* Have we not added ourselves to the list yet? */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
// {
CMP w4, #1
BNE _already_in_list0 // Is this core already waiting?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
// }
_already_in_list0:
/* Restore interrupts. */
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
/* We do this until we have the lock. */
// while (1)
// {
_try_to_get_lock:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
#endif
/* Do we already have protection? */
// if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
// {
LDR w3, [x2, #4] // Pickup the owning core
CMP w3, w1 // Is it this core?
BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
// an ISR preempted us and got protection
// }
/* Are we at the front of the list? */
// if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
// {
LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w3, [x3] // Get the value of the head
LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w4, [x4, x3, LSL #2] // Get the value at the head index
CMP w1, w4
BNE _did_not_get_lock
/* Is the lock still available? */
// if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
// {
LDAXR w3, [x2, #0] // Pickup the protection flag
CMP w3, #0
BNE _did_not_get_lock // No, protection not available
/* Get the lock. */
// _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
MOV w3, #1 // Build lock value
STXR w4, w3, [x2, #0] // Attempt to get the protection
CMP w4, #0
BNE _did_not_get_lock // Did it fail?
DMB ISH //
/* Got the lock. */
// _tx_thread_smp_protect_lock_got();
_tx_thread_smp_protect_lock_got
/* Remove this core from the wait list. */
// _tx_thread_smp_protect_remove_from_front_of_list();
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
/* For one reason or another, we didn't get the lock. */
/* Were we removed from the list? This can happen if we're a thread
and we got preempted. */
// if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
// {
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
CMP w4, #0
BNE _already_in_list1 // Is this core already in the list?
/* Add ourselves to the list. */
// _tx_thread_smp_protect_wait_list_add(this_core);
_tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
/* Our waiting count was also reset when we were preempted. Increment it again. */
// _tx_thread_smp_protect_wait_counts[this_core]++;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
ADD w4, w4, #1 // Increment wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
// }
_already_in_list1:
/* Restore interrupts and try again. */
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _try_to_get_lock // On waking, restart the protection attempt
_got_lock_after_waiting:
/* We're no longer waiting. */
// _tx_thread_smp_protect_wait_counts[this_core]--;
LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
LDR w4, [x3, x1, LSL #2] // Load current wait value
SUB w4, w4, #1 // Decrement wait value
STR w4, [x3, x1, LSL #2] // Store new wait value value
/* Restore registers and return. */
_return:
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

Some files were not shown because too many files have changed in this diff Show More