6.1 minor release

This commit is contained in:
Scott Larson
2020-09-30 15:42:41 -07:00
parent 7287542cc8
commit 1b5816a206
3038 changed files with 377204 additions and 8606 deletions

View File

@@ -47,7 +47,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_initialize_low_level SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -80,7 +80,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_initialize_low_level(VOID)

View File

@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h SMP/Cortex-A5/AC5 */
/* 6.0.1 */
/* 6.1 */
/* */
/* AUTHOR */
/* */
@@ -47,7 +47,7 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* */
/**************************************************************************/
@@ -394,7 +394,7 @@ void tx_thread_vfp_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX SMP/Cortex-A5/AC5 Version 6.0.1 *";
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX SMP/Cortex-A5/AC5 Version 6.1 *";
#else
extern CHAR _tx_version_id[];
#endif

View File

@@ -351,7 +351,7 @@ For generic code revision information, please refer to the readme_threadx_generi
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
06/30/2020 Initial ThreadX 6.0.1 version for Cortex-A5 using AC5 tools.
09-30-2020 Initial ThreadX 6.1 version for Cortex-A5 using AC5 tools.
Copyright(c) 1996-2020 Microsoft Corporation

View File

@@ -69,7 +69,7 @@ SVC_MODE EQU 0x93 ; SVC mode
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_context_restore SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -101,7 +101,7 @@ SVC_MODE EQU 0x93 ; SVC mode
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_context_restore(VOID)

View File

@@ -45,7 +45,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_context_save SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -76,7 +76,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_context_save(VOID)

View File

@@ -42,7 +42,7 @@ INT_MASK EQU 0x80 ; Interrupt bit mask
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_interrupt_control SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -72,7 +72,7 @@ INT_MASK EQU 0x80 ; Interrupt bit mask
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;UINT _tx_thread_interrupt_control(UINT new_posture)

View File

@@ -35,7 +35,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_interrupt_disable SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -64,7 +64,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;UINT _tx_thread_interrupt_disable(void)

View File

@@ -35,7 +35,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_interrupt_restore SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -65,7 +65,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;UINT _tx_thread_interrupt_restore(UINT old_posture)

View File

@@ -45,7 +45,7 @@ IRQ_MODE_BITS EQU 0x12 ; IRQ mode bits
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_irq_nesting_end SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -84,7 +84,7 @@ IRQ_MODE_BITS EQU 0x12 ; IRQ mode bits
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_irq_nesting_end(VOID)

View File

@@ -41,7 +41,7 @@ SYS_MODE_BITS EQU 0x1F ; System mode bits
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_irq_nesting_start SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -77,7 +77,7 @@ SYS_MODE_BITS EQU 0x1F ; System mode bits
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_irq_nesting_start(VOID)

View File

@@ -46,7 +46,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_schedule SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -79,7 +79,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_schedule(VOID)

View File

@@ -38,7 +38,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_core_get SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -67,7 +67,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_core_get

View File

@@ -40,7 +40,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_core_preempt SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -73,7 +73,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_core_preempt

View File

@@ -40,7 +40,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_current_state_get SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -69,7 +69,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_current_state_get

View File

@@ -40,7 +40,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_current_thread_get SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -69,7 +69,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_current_thread_get

View File

@@ -43,7 +43,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_initialize_wait SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -74,7 +74,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_initialize_wait

View File

@@ -38,7 +38,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_low_level_initialize SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -68,7 +68,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_low_level_initialize

View File

@@ -51,7 +51,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_protect SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -82,7 +82,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_protect

View File

@@ -39,7 +39,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_time_get SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -69,7 +69,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_time_get

View File

@@ -43,7 +43,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_smp_unprotect SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -75,7 +75,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
EXPORT _tx_thread_smp_unprotect

View File

@@ -47,7 +47,7 @@ THUMB_BIT EQU 0x20 ; Thumb-bit
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_stack_build SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -79,7 +79,7 @@ THUMB_BIT EQU 0x20 ; Thumb-bit
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))

View File

@@ -47,7 +47,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_system_return SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -79,7 +79,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_system_return(VOID)

View File

@@ -44,7 +44,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_thread_vectored_context_save SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -75,7 +75,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_thread_vectored_context_save(VOID)

View File

@@ -55,7 +55,7 @@
;/* FUNCTION RELEASE */
;/* */
;/* _tx_timer_interrupt SMP/Cortex-A5/AC5 */
;/* 6.0.1 */
;/* 6.1 */
;/* AUTHOR */
;/* */
;/* William E. Lamie, Microsoft Corporation */
@@ -91,7 +91,7 @@
;/* */
;/* DATE NAME DESCRIPTION */
;/* */
;/* 06-30-2020 William E. Lamie Initial Version 6.0.1 */
;/* 09-30-2020 William E. Lamie Initial Version 6.1 */
;/* */
;/**************************************************************************/
;VOID _tx_timer_interrupt(VOID)

View File

@@ -0,0 +1,516 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Interrupt Controller functions
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.text
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
// ------------------------------------------------------------
// GIC
// ------------------------------------------------------------
// CPU Interface offset from base of private peripheral space --> 0x0100
// Interrupt Distributor offset from base of private peripheral space --> 0x1000
// Typical calls to enable interrupt ID X:
// disableIntID(X) <-- Disable that ID
// setIntPriority(X, 0) <-- Set the priority of X to 0 (the max priority)
// setPriorityMask(0x1F) <-- Set CPU's priority mask to 0x1F (the lowest priority)
// enableGIC() <-- Enable the GIC (global)
// enableGICProcessorInterface() <-- Enable the CPU interface (local to the CPU)
.global enableGIC
// void enableGIC(void)
// Global enable of the Interrupt Distributor
.type enableGIC, "function"
.cfi_startproc
enableGIC:
// Get base address of private peripheral space
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x1000 // Add the GIC offset
LDR r1, [r0] // Read the GIC Enable Register (ICDDCR)
ORR r1, r1, #0x01 // Set bit 0, the enable bit
STR r1, [r0] // Write the GIC Enable Register (ICDDCR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global disableGIC
// void disableGIC(void)
// Global disable of the Interrupt Distributor
.type disableGIC, "function"
.cfi_startproc
disableGIC:
// Get base address of private peripheral space
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x1000 // Add the GIC offset
LDR r1, [r0] // Read the GIC Enable Register (ICDDCR)
BIC r1, r1, #0x01 // Clear bit 0, the enable bit
STR r1, [r0] // Write the GIC Enable Register (ICDDCR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global enableIntID
// void enableIntID(uint32_t ID)
// Enables the interrupt source number ID
.type enableIntID, "function"
.cfi_startproc
enableIntID:
// Get base address of private peripheral space
MOV r1, r0 // Back up passed in ID value
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// Each interrupt source has an enable bit in the GIC. These
// are grouped into registers, with 32 sources per register
// First, we need to identify which 32-bit block the interrupt lives in
MOV r2, r1 // Make working copy of ID in r2
MOV r2, r2, LSR #5 // LSR by 5 places, affective divide by 32
// r2 now contains the 32-bit block this ID lives in
MOV r2, r2, LSL #2 // Now multiply by 4, to convert offset into an address offset (four bytes per reg)
// Now work out which bit within the 32-bit block the ID is
AND r1, r1, #0x1F // Mask off to give offset within 32-bit block
MOV r3, #1 // Move enable value into r3
MOV r3, r3, LSL r1 // Shift it left to position of ID
ADD r2, r2, #0x1100 // Add the base offset of the Enable Set registers to the offset for the ID
STR r3, [r0, r2] // Store out (ICDISER)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global disableIntID
// void disableIntID(uint32_t ID)
// Disables the interrupt source number ID
.type disableIntID, "function"
.cfi_startproc
disableIntID:
// Get base address of private peripheral space
MOV r1, r0 // Back up passed in ID value
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// First, we need to identify which 32-bit block the interrupt lives in
MOV r2, r1 // Make working copy of ID in r2
MOV r2, r2, LSR #5 // LSR by 5 places, affective divide by 32
// r2 now contains the 32-bit block this ID lives in
MOV r2, r2, LSL #2 // Now multiply by 4, to convert offset into an address offset (four bytes per reg)
// Now work out which bit within the 32-bit block the ID is
AND r1, r1, #0x1F // Mask off to give offset within 32-bit block
MOV r3, #1 // Move enable value into r3
MOV r3, r3, LSL r1 // Shift it left to position of ID in 32-bit block
ADD r2, r2, #0x1180 // Add the base offset of the Enable Clear registers to the offset for the ID
STR r3, [r0, r2] // Store out (ICDICER)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global setIntPriority
// void setIntPriority(uint32_t ID, uint32_t priority)
// Sets the priority of the specified ID
// r0 = ID
// r1 = priority
.type setIntPriority, "function"
.cfi_startproc
setIntPriority:
// Get base address of private peripheral space
MOV r2, r0 // Back up passed in ID value
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// r0 = base addr
// r1 = priority
// r2 = ID
// Make sure that priority value is only 5 bits, and convert to expected format
AND r1, r1, #0x1F
MOV r1, r1, LSL #3
// Find which register this ID lives in
BIC r3, r2, #0x03 // Make a copy of the ID, clearing off the bottom two bits
// There are four IDs per reg, by clearing the bottom two bits we get an address offset
ADD r3, r3, #0x1400 // Now add the offset of the Priority Level registers from the base of the private peripheral space
ADD r0, r0, r3 // Now add in the base address of the private peripheral space, giving us the absolute address
// Now work out which ID in the register it is
AND r2, r2, #0x03 // Clear all but the bottom two bits, leaves which ID in the reg it is (which byte)
MOV r2, r2, LSL #3 // Multiply by 8, this gives a bit offset
// Read -> Modify -> Write
MOV r12, #0xFF // 8 bit field mask
MOV r12, r12, LSL r2 // Move mask into correct bit position
MOV r1, r1, LSL r2 // Also, move passed in priority value into correct bit position
LDR r3, [r0] // Read current value of the Priority Level register
BIC r3, r3, r12 // Clear appropriate field
ORR r3, r3, r1 // Now OR in the priority value
STR r3, [r0] // And store it back again (ICDIPR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global getIntPriority
// uint32_t getIntPriority(void)
// Returns the priority of the specified ID
.type getIntPriority, "function"
.cfi_startproc
getIntPriority:
// TBD
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global setIntTarget
// void setIntTarget(uint32_t ID, uint32_t target)
// Sets the target CPUs of the specified ID
.type setIntTarget, "function"
.cfi_startproc
setIntTarget:
// Get base address of private peripheral space
MRC p15, 4, r2, c15, c0, 0 // Read periph base address
// r0 = ID
// r1 = target
// r2 = base addr
// Clear unused bits
AND r1, r1, #0xF
// Find which register this ID lives in
BIC r3, r0, #0x03 // Make a copy of the ID, clearing the bottom 2 bits
// There are four IDs per reg, by clearing the bottom two bits we get an address offset
ADD r3, r3, #0x1800 // Now add the offset of the Target registers from the base of the private peripheral space
ADD r2, r2, r3 // Now add in the base address of the private peripheral space, giving us the absolute address
// Now work out which ID in the register it is
AND r0, r0, #0x03 // Clear all but the bottom two bits, leaves which ID in the reg it is (which byte)
MOV r0, r0, LSL #3 // Multiply by 8, this gives a bit offset
// Read -> Modify -> Write
MOV r12, #0xFF // 8 bit field mask
MOV r12, r12, LSL r0 // Move mask into correct bit position
MOV r1, r1, LSL r0 // Also, move passed in target value into correct bit position
LDR r3, [r2] // Read current value of the Target register
BIC r3, r3, r12 // Clear appropriate field
ORR r3, r3, r1 // Now OR in the target value
STR r3, [r2] // And store it back again
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global getIntTarget
// uint32_t getIntTarget(uint32_t ID)
// Returns the target CPUs of the specified ID
.type getIntTarget, "function"
.cfi_startproc
getIntTarget:
// TBD
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global enableGICProcessorInterface
// void enableGICProcessorInterface(void)
// Enables the processor interface
// Must be done on each core separately
.type enableGICProcessorInterface, "function"
.cfi_startproc
enableGICProcessorInterface:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x100] // Read the Processor Interface Control register (ICCICR/ICPICR)
ORR r1, r1, #0x03 // Bit 0: Enables secure interrupts, Bit 1: Enables Non-Secure interrupts
BIC r1, r1, #0x08 // Bit 3: Ensure Group 0 interrupts are signalled using IRQ, not FIQ
STR r1, [r0, #0x100] // Write the Processor Interface Control register (ICCICR/ICPICR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global disableGICProcessorInterface
// void disableGICProcessorInterface(void)
// Disables the processor interface
// Must be done on each core separately
.type disableGICProcessorInterface, "function"
.cfi_startproc
disableGICProcessorInterface:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x100] // Read the Processor Interface Control register (ICCICR/ICPICR)
BIC r1, r1, #0x03 // Bit 0: Enables secure interrupts, Bit 1: Enables Non-Secure interrupts
STR r1, [r0, #0x100] // Write the Processor Interface Control register (ICCICR/ICPICR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global setPriorityMask
// void setPriorityMask(uint32_t priority)
// Sets the Priority mask register for the CPU run on
// The reset value masks ALL interrupts!
.type setPriorityMask, "function"
.cfi_startproc
setPriorityMask:
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
STR r0, [r1, #0x0104] // Write the Priority Mask register
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global setBinaryPoint
// void setBinaryPoint(uint32_t priority)
// Sets the Binary Point Register for the CPU run on
.type setBinaryPoint, "function"
.cfi_startproc
setBinaryPoint:
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
STR r0, [r1, #0x0108] // Write the Priority Mask register (ICCPMR/ICCIPMR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global readIntAck
// uint32_t readIntAck(void)
// Returns the value of the Interrupt Acknowledge Register
.type readIntAck, "function"
.cfi_startproc
readIntAck:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r0, [r0, #0x010C] // Read the Interrupt Acknowledge Register
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global writeEOI
// void writeEOI(uint32_t ID)
// Writes ID to the End Of Interrupt register
.type writeEOI, "function"
.cfi_startproc
writeEOI:
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
STR r0, [r1, #0x0110] // Write ID to the End of Interrupt register
BX lr
.cfi_endproc
// ------------------------------------------------------------
// SGI
// ------------------------------------------------------------
.global sendSGI
// void sendSGI(uint32_t ID, uint32_t target_list, uint32_t filter_list)
// Send a software generate interrupt
.type sendSGI, "function"
.cfi_startproc
sendSGI:
AND r3, r0, #0x0F // Mask off unused bits of ID, and move to r3
AND r1, r1, #0x0F // Mask off unused bits of target_filter
AND r2, r2, #0x0F // Mask off unused bits of filter_list
ORR r3, r3, r1, LSL #16 // Combine ID and target_filter
ORR r3, r3, r2, LSL #24 // and now the filter list
// Get the address of the GIC
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x1F00 // Add offset of the sgi_trigger reg
STR r3, [r0] // Write to the Software Generated Interrupt Register (ICDSGIR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
// TrustZone
// ------------------------------------------------------------
.global enableSecureFIQs
// void enableSecureFIQs(void)
// Enables the sending of secure interrupts as FIQs
.type enableSecureFIQs, "function"
.cfi_startproc
enableSecureFIQs:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x100] // Read the Processor Interface Control register
ORR r1, r1, #0x08 // Bit 3: Controls whether secure interrupts are signalled as IRQs or FIQs
STR r1, [r0, #0x100] // Write the Processor Interface Control register
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global disableSecureFIQs
// void disableSecureFIQs(void)
// Disables the sending of secure interrupts as FIQs
.type disableSecureFIQs, "function"
.cfi_startproc
disableSecureFIQs:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x100] // Read the Processor Interface Control register
BIC r1, r1, #0x08 // Bit 3: Controls whether secure interrupts are signalled as IRQs or FIQs
STR r1, [r0, #0x100] // Write the Processor Interface Control register
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global makeIntSecure
// void makeIntSecure(uint32_t ID)
// Sets the specified ID as being Secure
// r0 - ID
.type makeIntSecure, "function"
.cfi_startproc
makeIntSecure:
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
// Each interrupt source has a secutiy bit in the GIC. These
// are grouped into registers, with 32 sources per register
// First, we need to identify which 32-bit block the interrupt lives in
MOV r2, r0 // Make working copy of ID in r2
MOV r2, r2, LSR #5 // LSR by 5 places, affective divide by 32
// r2 now contains the 32-bit block this ID lives in
MOV r2, r2, LSL #2 // Now multiply by 4, to convert offset into an address offset (four bytes per reg)
// Now work out which bit within the 32-bit block the ID is
AND r0, r0, #0x1F // Mask off to give offset within 32-bit block
MOV r3, #1 // Move enable value into r3
MOV r3, r3, LSL r0 // Shift it left to position of ID
ADD r2, r2, #0x1080 // Add the base offset of the Interrupt Configuration registers to the offset for the ID
LDR r0, [r1, r2] // Read appropriate Interrupt Configuration
BIC r0, r0, r3 // Clear bit (0 = secure)
STR r0, [r1, r2] // Store out
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global makeIntNonSecure
// void makeIntNonSecure(uint32_t ID)
// Sets the specified ID as being non-secure
// r0 - ID
.type makeIntNonSecure, "function"
.cfi_startproc
makeIntNonSecure:
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
// Each interrupt source has a secutiy bit in the GIC. These
// are grouped into registers, with 32 sources per register
// First, we need to identify which 32-bit block the interrupt lives in
MOV r2, r0 // Make working copy of ID in r2
MOV r2, r2, LSR #5 // LSR by 5 places, affective divide by 32
// r2 now contains the 32-bit block this ID lives in
MOV r2, r2, LSL #2 // Now multiply by 4, to convert offset into an address offset (four bytes per reg)
// Now work out which bit within the 32-bit block the ID is
AND r0, r0, #0x1F // Mask off to give offset within 32-bit block
MOV r3, #1 // Move enable value into r3
MOV r3, r3, LSL r0 // Shift it left to position of ID
ADD r2, r2, #0x1080 // Add the base offset of the Interrupt Configuration registers to the offset for the ID
LDR r0, [r1, r2] // Read appropriate Interrupt Configuration
ORR r0, r0, r3 // Set bit (1 = secure)
STR r0, [r1, r2] // Store out
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global getIntSecurity
// uint32_t getIntSecurity(uint32_t ID, uint32_t security)
// Returns the security of the specified ID
.type getIntSecurity, "function"
.cfi_startproc
getIntSecurity:
// TBD
BX lr
.cfi_endproc
// ------------------------------------------------------------
// End of MP_GIC.s
// ------------------------------------------------------------

View File

@@ -0,0 +1,120 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Interrupt Controller functions
// Header File
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _CORTEXA_GIC_H
#define _CORTEXA_GIC_H
#define SPURIOUS (255)
// PPI IDs:
#define MPCORE_PPI_PRIVATE_TIMER (29)
#define MPCORE_PPI_PRIVATE_WD (30)
#define MPCORE_PPI_GLOBAL_TIMER (27)
#define MPCORE_PPI_LEGACY_IRQ (31)
#define MPCORE_PPI_LEGACY_FIQ (28)
// ------------------------------------------------------------
// GIC
// ------------------------------------------------------------
// Typical calls to enable interrupt ID X:
// enableIntID(X) <-- Enable that ID
// setIntPriority(X, 0) <-- Set the priority of X to 0 (the max priority)
// setPriorityMask(0x1F) <-- Set Core's priority mask to 0x1F (the lowest priority)
// enableGIC() <-- Enable the GIC (global)
// enableGICProcessorInterface() <-- Enable the CPU interface (local to the core)
//
// Global enable of the Interrupt Distributor
void enableGIC(void);
// Global disable of the Interrupt Distributor
void disableGIC(void);
// Enables the interrupt source number ID
void enableIntID(unsigned int ID);
// Disables the interrupt source number ID
void disableIntID(unsigned int ID);
// Enables the processor interface
// Must be done on each core separately
void enableGICProcessorInterface(void);
// Disables the processor interface
// Must be done on each core separately
void disableGICProcessorInterface(void);
// Sets the Priority mask register for the core run on
// The reset value masks ALL interrupts!
//
// NOTE: Bits 2:0 of this register are SBZ, the function does perform any shifting!
void setPriorityMask(unsigned int priority);
// Sets the Binary Point Register for the core run on
void setBinaryPoint(unsigned int priority);
// Sets the priority of the specified ID
void setIntPriority(unsigned int ID, unsigned int priority);
// Returns the priority of the specified ID
unsigned int getIntPriority(unsigned int ID, unsigned int priority);
#define MPCORE_IC_TARGET_NONE (0x0)
#define MPCORE_IC_TARGET_CPU0 (0x1)
#define MPCORE_IC_TARGET_CPU1 (0x2)
#define MPCORE_IC_TARGET_CPU2 (0x4)
#define MPCORE_IC_TARGET_CPU3 (0x8)
// Sets the target CPUs of the specified ID
// For 'target' use one of the above defines
void setIntTarget(unsigned int ID, unsigned int target);
// Returns the target CPUs of the specified ID
unsigned int getIntTarget(unsigned int ID);
// Returns the value of the Interrupt Acknowledge Register
unsigned int readIntAck(void);
// Writes ID to the End Of Interrupt register
void writeEOI(unsigned int ID);
// ------------------------------------------------------------
// SGI
// ------------------------------------------------------------
// Send a software generate interrupt
void sendSGI(unsigned int ID, unsigned int core_list, unsigned int filter_list);
// ------------------------------------------------------------
// TrustZone
// ------------------------------------------------------------
// Enables the sending of secure interrupts as FIQs
void enableSecureFIQs(void);
// Disables the sending of secure interrupts as FIQs
void disableSecureFIQs(void);
// Sets the specified ID as secure
void makeIntSecure(unsigned int ID);
// Set the specified ID as non-secure
void makeIntNonSecure(unsigned int ID);
// Returns the security of the specified ID
unsigned int getIntSecurity(unsigned int ID);
#endif
// ------------------------------------------------------------
// End of MP_GIC.h
// ------------------------------------------------------------

View File

@@ -0,0 +1,134 @@
// ------------------------------------------------------------
// Armv7-A MPCore - Mutex Code
//
// Copyright (c) 2011-2017 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.text
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
//NOTES
// struct mutex_t defined in MP_Mutexes.h
// typedef struct mutex_t
// {
// unsigned int lock// <-- offset 0
// }
//
// lock: 0xFF=unlocked 0x0 = Locked by CPU 0, 0x1 = Locked by CPU 1, 0x2 = Locked by CPU 2, 0x3 = Locked by CPU 3
//
.equ UNLOCKED, 0xFF
// ------------------------------------------------------------
.global initMutex
// void initMutex(mutex_t* pMutex)
// Places mutex into a known state
// r0 = address of mutex_t
.type initMutex, "function"
.cfi_startproc
initMutex:
MOV r1, #UNLOCKED // Mark as unlocked
STR r1, [r0]
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global lockMutex
// void lockMutex(mutex_t* pMutex)
// Blocking call, returns once successfully locked a mutex
// r0 = address of mutex_t
.type lockMutex, "function"
.cfi_startproc
lockMutex:
// Is mutex locked?
// -----------------
LDREX r1, [r0] // Read lock field
CMP r1, #UNLOCKED // Compare with "unlocked"
WFENE // If mutex is locked, go into standby
BNE lockMutex // On waking re-check the mutex
// Attempt to lock mutex
// -----------------------
MRC p15, 0, r1, c0, c0, 5 // Read CPU ID register
AND r1, r1, #0x03 // Mask off, leaving the CPU ID field.
STREX r2, r1, [r0] // Attempt to lock mutex, by write CPU's ID to lock field
CMP r2, #0x0 // Check whether store completed successfully (0=succeeded)
BNE lockMutex // If store failed, go back to beginning and try again
DMB
BX lr // Return as mutex is now locked by this cpu
.cfi_endproc
// ------------------------------------------------------------
.global unlockMutex
// unsigned int unlockMutex(mutex_t* pMutex)
// Releases mutex, returns 0x0 for success and 0x1 for failure
// r0 = address of mutex_t
.type unlockMutex, "function"
.cfi_startproc
unlockMutex:
// Does this CPU own the mutex?
// -----------------------------
MRC p15, 0, r1, c0, c0, 5 // Read CPU ID register
AND r1, r1, #0x03 // Mask off, leaving the CPU ID in r1
LDR r2, [r0] // Read the lock field of the mutex
CMP r1, r2 // Compare ID of this CPU with the lock owner
MOVNE r0, #0x1 // If ID doesn't match, return "fail"
BXNE lr
// Unlock mutex
// -------------
DMB // Ensure that accesses to shared resource have completed
MOV r1, #UNLOCKED // Write "unlocked" into lock field
STR r1, [r0]
DSB // Ensure that no instructions following the barrier execute until
// all memory accesses prior to the barrier have completed.
SEV // Send event to other CPUs, wakes anyone waiting on a mutex (using WFE)
MOV r0, #0x0 // Return "success"
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global isMutexLocked
// unsigned int isMutexLocked(mutex_t* pMutex)
// Returns 0x0 if mutex unlocked, 0x1 is locked
// r0 = address of mutex_t
.type isMutexLocked, "function"
.cfi_startproc
isMutexLocked:
LDR r0, [r0]
CMP r0, #UNLOCKED
MOVEQ r0, #0x0
MOVNE r0, #0x1
BX lr
.cfi_endproc
// ------------------------------------------------------------
// End of MP_Mutexes.s
// ------------------------------------------------------------

View File

@@ -0,0 +1,40 @@
// ------------------------------------------------------------
// MP Mutex Header File
//
// Copyright (c) 2011-2014 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef MP_MUTEX_H
#define MP_MUTEX_H
// 0xFF = unlocked
// 0x0 = Locked by CPU 0
// 0x1 = Locked by CPU 1
// 0x2 = Locked by CPU 2
// 0x3 = Locked by CPU 3
typedef struct
{
unsigned int lock;
}mutex_t;
// Places mutex into a known state
// r0 = address of mutex_t
void initMutex(mutex_t* pMutex);
// Blocking call, returns once successfully locked a mutex
// r0 = address of mutex_t
void lockMutex(mutex_t* pMutex);
// Releases (unlock) mutex. Fails if CPU not owner of mutex.
// returns 0x0 for success, and 0x1 for failure
// r0 = address of mutex_t
unsigned int unlockMutex(mutex_t* pMutex);
// Returns 0x0 if mutex unlocked, 0x1 is locked
// r0 = address of mutex_t
unsigned int isMutexLocked(mutex_t* pMutex);
#endif

View File

@@ -0,0 +1,118 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Private timer functions
//
// Copyright ARM Ltd 2009. All rights reserved.
// ------------------------------------------------------------
.text
.align 3
// PPI ID 29
// Typical set of calls to enable Timer:
// init_private_timer(0xXXXX, 0) <-- Counter down value of 0xXXXX, with auto-reload
// start_private_timer()
// Timer offset from base of private peripheral space --> 0x600
// ------------------------------------------------------------
.global init_private_timer
.type init_private_timer,function
// void init_private_timer(unsigned int load_value, unsigned int auto_reload)
// Sets up the private timer
// r0: initial load value
// r1: IF 0 (AutoReload) ELSE (SingleShot)
init_private_timer:
// Get base address of private perpherial space
MOV r2, r0 // Make a copy of r0 before corrupting
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// Set the load value
STR r2, [r0, #0x600]
// Control register bit layout
// Bit 0 - Enable
// Bit 1 - Auto-Reload // see DE681117
// Bit 2 - IRQ Generation
// Form control reg value
CMP r1, #0 // Check whether to enable auto-reload
MOVNE r2, #0x04 // No auto-reload
MOVEQ r2, #0x06 // With auto-reload
// Store to control register
STR r2, [r0, #0x608]
BX lr
// ------------------------------------------------------------
// void start_private_timer(void)
// Starts the private timer
.global start_private_timer
.type start_private_timer,function
start_private_timer:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x608] // Read control reg
ORR r1, r1, #0x01 // Set enable bit
STR r1, [r0, #0x608] // Write modified value back
BX lr
// ------------------------------------------------------------
// void stop_private_timer(void)
// Stops the private timer
.global stop_private_timer
.type stop_private_timer,function
stop_private_timer:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x608] // Read control reg
BIC r1, r1, #0x01 // Clear enable bit
STR r1, [r0, #0x608] // Write modified value back
BX lr
// ------------------------------------------------------------
// unsigned int read_private_timer(void)
// Reads the current value of the timer count register
.global get_private_timer_count
.type get_private_timer_count,function
get_private_timer_count:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r0, [r0, #0x604] // Read count register
BX lr
// ------------------------------------------------------------
// void clear_private_timer_irq(void)
// Clears the private timer interrupt
.global clear_private_timer_irq
.type clear_private_timer_irq,function
clear_private_timer_irq:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// Clear the interrupt by writing 0x1 to the Timer's Interrupt Status register
MOV r1, #1
STR r1, [r0, #0x60C]
BX lr
// ------------------------------------------------------------
// End of code
// ------------------------------------------------------------
// ------------------------------------------------------------
// End of MP_PrivateTimer.s
// ------------------------------------------------------------

View File

@@ -0,0 +1,36 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Private timer functions
// Header Filer
//
// Copyright ARM Ltd 2009. All rights reserved.
// ------------------------------------------------------------
#ifndef _CORTEXA_PRIVATE_TIMER_
#define _CORTEXA_PRIVATE_TIMER_
// Typical set of calls to enable Timer:
// init_private_timer(0xXXXX, 0) <-- Counter down value of 0xXXXX, with auto-reload
// start_private_timer()
// Sets up the private timer
// r0: initial load value
// r1: IF 0 (AutoReload) ELSE (SingleShot)
void init_private_timer(unsigned int load_value, unsigned int auto_reload);
// Starts the private timer
void start_private_timer(void);
// Stops the private timer
void stop_private_timer(void);
// Reads the current value of the timer count register
unsigned int get_private_timer_count(void);
// Clears the private timer interrupt
void clear_private_timer_irq(void);
#endif
// ------------------------------------------------------------
// End of MP_PrivateTimer.h
// ------------------------------------------------------------

View File

@@ -0,0 +1,188 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Snoop Control Unit (SCU)
// Suitable for Cortex-A5 MPCore and Cortex-A9 MPCore
//
// Copyright (c) 2011-2015 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.text
// ------------------------------------------------------------
// Misc
// ------------------------------------------------------------
.global getNumCPUs
// uint32_t getNumCPUs(void)
// Returns the number of CPUs in the Cluster
.type getNumCPUs, "function"
getNumCPUs:
// Get base address of private peripheral space
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r0, [r0, #0x004] // Read SCU Configuration register
AND r0, r0, #0x3 // Bits 1:0 gives the number of cores-1
ADD r0, r0, #1
BX lr
// ------------------------------------------------------------
// SCU
// ------------------------------------------------------------
// SCU offset from base of private peripheral space --> 0x000
.global enableSCU
// void enableSCU(void)
// Enables the SCU
.type enableSCU, "function"
enableSCU:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r1, [r0, #0x0] // Read the SCU Control Register
ORR r1, r1, #0x1 // Set bit 0 (The Enable bit)
STR r1, [r0, #0x0] // Write back modifed value
BX lr
// ------------------------------------------------------------
.global getCPUsInSMP
// uint32_t getCPUsInSMP(void)
// The return value is 1 bit per core:
// bit 0 - CPU 0
// bit 1 - CPU 1
// etc...
.type getCPUsInSMP, "function"
getCPUsInSMP:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
LDR r0, [r0, #0x004] // Read SCU Configuration register
MOV r0, r0, LSR #4 // Bits 7:4 gives the cores in SMP mode, shift then mask
AND r0, r0, #0x0F
BX lr
// ------------------------------------------------------------
.global enableMaintenanceBroadcast
// void enableMaintenanceBroadcast(void)
// Enable the broadcasting of cache & TLB maintenance operations
// When enabled AND in SMP, broadcast all "inner sharable"
// cache and TLM maintenance operations to other SMP cores
.type enableMaintenanceBroadcast, "function"
enableMaintenanceBroadcast:
MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl register
MOV r1, r0
ORR r0, r0, #0x01 // Set the FW bit (bit 0)
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write Aux Ctrl register
BX lr
// ------------------------------------------------------------
.global disableMaintenanceBroadcast
// void disableMaintenanceBroadcast(void)
// Disable the broadcasting of cache & TLB maintenance operations
.type disableMaintenanceBroadcast, "function"
disableMaintenanceBroadcast:
MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl register
BIC r0, r0, #0x01 // Clear the FW bit (bit 0)
MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl register
BX lr
// ------------------------------------------------------------
.global secureSCUInvalidate
// void secureSCUInvalidate(uint32_t cpu, uint32_t ways)
// cpu: 0x0=CPU 0 0x1=CPU 1 etc...
// This function invalidates the SCU copy of the tag rams
// for the specified core. Typically only done at start-up.
// Possible flow:
// - Invalidate L1 caches
// - Invalidate SCU copy of TAG RAMs
// - Join SMP
.type secureSCUInvalidate, "function"
secureSCUInvalidate:
AND r0, r0, #0x03 // Mask off unused bits of CPU ID
MOV r0, r0, LSL #2 // Convert into bit offset (four bits per core)
AND r1, r1, #0x0F // Mask off unused bits of ways
MOV r1, r1, LSL r0 // Shift ways into the correct CPU field
MRC p15, 4, r2, c15, c0, 0 // Read periph base address
STR r1, [r2, #0x0C] // Write to SCU Invalidate All in Secure State
BX lr
// ------------------------------------------------------------
// TrustZone
// ------------------------------------------------------------
.global setPrivateTimersNonSecureAccess
// void setPrivateTimersNonSecureAccess(uint32_t secure, uint32_t cpu)
// Sets whether the Private Timer & Watchdog can be accessed in NS world
// r0 - IF 0 (secure access only) ELSE (ns access allowed)
.type setPrivateTimersNonSecureAccess, "function"
setPrivateTimersNonSecureAccess:
AND r0, r0, #0x01 // Mask
ADD r1, r1, #0x04 // Adjust r1, as field starts at bit 4
MOV r0, r0, LSL r1 // Shift bit into correct position for CPU
MOV r12, #1
MOV r12, r12, LSL r1 // Form a mask to clear existing bit value
MRC p15, 4, r2, c15, c0, 0 // Read periph base address
LDR r3, [r2, #0x54] // Read SCU Secure Access Control (SSAC) register
BIC r3, r3, r12 // Clear current value
ORR r3, r3, r0 // Set to specified value
STR r3, [r2, #0x54] // Write SCU Secure Access Control (SSAC) register
BX lr
// ------------------------------------------------------------
.global setGlobalTimerNonSecureAccess
// void setGlobalTimerNonSecureAccess(uint32_t secure, uint32_t cpu)
// Sets whether the Global Timer can be accessed in NS world
// r0 - IF 0 (secure access only) ELSE (ns access allowed)
.type setGlobalTimerNonSecureAccess, "function"
setGlobalTimerNonSecureAccess:
AND r0, r0, #0x01 // Mask
ADD r1, r1, #0x08 // Adjust r1, as field starts at bit 8
MOV r0, r0, LSL r1 // Shift bit into correct position for CPU
MOV r12, #1
MOV r12, r12, LSL r1 // Form a mask to clear existing bit value
MRC p15, 4, r2, c15, c0, 0 // Read periph base address
LDR r3, [r2, #0x54] // Read SCU Secure Access Control (SSAC) register
BIC r3, r3, r12 // Clear current value
ORR r3, r3, r0 // Set to specified value
STR r3, [r2, #0x54] // Write SCU Secure Access Control (SSAC) register
BX lr
// ------------------------------------------------------------
// End of MP_SCU.s
// ------------------------------------------------------------

View File

@@ -0,0 +1,65 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Snoop Control Unit (SCU)
// Suitable for Cortex-A5 MPCore and Cortex-A9 MPCore
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _CORTEXA_SCU_H
#define _CORTEXA_SCU_H
// ------------------------------------------------------------
// SCU
// ------------------------------------------------------------
// Returns the number of cores in the cluster
unsigned int getNumCPUs(void);
// ------------------------------------------------------------
// SCU
// ------------------------------------------------------------
// Enables the SCU
void enableSCU(void);
// The return value is 1 bit per core:
// bit 0 (0x1) - CPU 0
// bit 1 (0x2) - CPU 1
// bit 2 (0x4) - CPU 2
// bit 3 (0x8) - CPU 3
unsigned int getCPUsInSMP(void);
//Enable the broadcasting of cache & TLB maintenance operations
// When enabled AND in SMP, broadcast all "inner sharable"
// cache and TLM maintenance operations to other SMP cores
void enableMaintenanceBroadcast(void);
// Disable the broadcasting of cache & TLB maintenance operations
void disableMaintenanceBroadcast(void);
// cpu: 0x0=CPU 0 0x1=CPU 1 etc...
// This function invalidates the SCU copy of the tag rams
// for the specified core.
void secureSCUInvalidate(unsigned int cpu, unsigned int ways);
// ------------------------------------------------------------
// TrustZone
// ------------------------------------------------------------
// Sets whether the Private Timer & Watchdog can be accessed in NS world
// secure - IF 0 (secure access only) ELSE (ns access allowed)
void setPrivateTimersNonSecureAccess(unsigned int secure, unsigned int cpu);
// Sets whether the Global Timer can be accessed in NS world
// secure - IF 0 (secure access only) ELSE (ns access allowed)
void setGlobalTimersNonSecureAccess(unsigned int secure, unsigned int cpu);
#endif
// ------------------------------------------------------------
// End of MP_SCU.h
// ------------------------------------------------------------

View File

@@ -0,0 +1,257 @@
del tx.a
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_context_restore.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_context_save.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_interrupt_control.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_interrupt_disable.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_interrupt_restore.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_irq_nesting_end.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_irq_nesting_start.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_schedule.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_core_get.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_core_preempt.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_current_state_get.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_current_thread_get.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_initialize_wait.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_low_level_initialize.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_protect.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_time_get.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_smp_unprotect.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_stack_build.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_system_return.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_thread_vectored_context_save.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 ../src/tx_timer_interrupt.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_allocate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_cleanup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_pool_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_block_release.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_allocate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_cleanup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_pool_search.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_byte_release.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_cleanup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_set.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_event_flags_set_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_initialize_high_level.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_initialize_kernel_enter.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_initialize_kernel_setup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_cleanup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_priority_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_mutex_put.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_cleanup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_flush.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_front_send.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_receive.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_send.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_queue_send_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_ceiling_put.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_cleanup.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_put.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_semaphore_put_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_entry_exit_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_identify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_preemption_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_priority_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_relinquish.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_reset.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_resume.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_shell_entry.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_sleep.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_stack_analyze.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_stack_error_handler.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_stack_error_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_suspend.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_system_preempt_check.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_system_resume.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_system_suspend.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_terminate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_time_slice.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_time_slice_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_timeout.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_wait_abort.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_time_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_time_set.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_activate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_deactivate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_expiration_process.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_performance_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_performance_system_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_system_activate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_system_deactivate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_thread_entry.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_enable.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_disable.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_interrupt_control.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_isr_enter_insert.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_isr_exit_insert.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_object_register.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_object_unregister.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_user_event_insert.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_buffer_full_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_event_filter.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_trace_event_unfilter.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_block_allocate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_block_pool_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_block_pool_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_block_pool_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_block_pool_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_block_release.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_byte_allocate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_byte_pool_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_byte_pool_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_byte_pool_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_byte_pool_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_byte_release.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_event_flags_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_event_flags_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_event_flags_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_event_flags_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_event_flags_set.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_event_flags_set_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_mutex_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_mutex_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_mutex_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_mutex_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_mutex_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_mutex_put.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_flush.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_front_send.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_receive.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_send.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_queue_send_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_ceiling_put.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_prioritize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_put.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_semaphore_put_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_entry_exit_notify.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_preemption_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_priority_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_relinquish.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_reset.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_resume.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_suspend.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_terminate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_time_slice_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_thread_wait_abort.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_timer_activate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_timer_change.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_timer_create.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_timer_deactivate.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_timer_delete.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/txe_timer_info_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_current_state_set.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_debug_entry_insert.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_high_level_initialize.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_rebalance_execute_list.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_core_exclude.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_core_exclude_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_smp_core_exclude.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_timer_smp_core_exclude_get.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common_smp/inc -I../inc ../../../../common_smp/src/tx_thread_smp_utilities.c
arm-none-eabi-ar -r tx.a tx_thread_stack_build.o tx_thread_schedule.o tx_thread_system_return.o tx_thread_context_save.o tx_thread_context_restore.o tx_timer_interrupt.o tx_thread_interrupt_control.o
arm-none-eabi-ar -r tx.a tx_initialize_low_level.o tx_thread_interrupt_disable.o
arm-none-eabi-ar -r tx.a tx_thread_interrupt_restore.o tx_thread_irq_nesting_end.o tx_thread_irq_nesting_start.o
arm-none-eabi-ar -r tx.a tx_block_allocate.o tx_block_pool_cleanup.o tx_block_pool_create.o tx_block_pool_delete.o tx_block_pool_info_get.o
arm-none-eabi-ar -r tx.a tx_block_pool_initialize.o tx_block_pool_performance_info_get.o tx_block_pool_performance_system_info_get.o tx_block_pool_prioritize.o
arm-none-eabi-ar -r tx.a tx_block_release.o tx_byte_allocate.o tx_byte_pool_cleanup.o tx_byte_pool_create.o tx_byte_pool_delete.o tx_byte_pool_info_get.o
arm-none-eabi-ar -r tx.a tx_byte_pool_initialize.o tx_byte_pool_performance_info_get.o tx_byte_pool_performance_system_info_get.o tx_byte_pool_prioritize.o
arm-none-eabi-ar -r tx.a tx_byte_pool_search.o tx_byte_release.o tx_event_flags_cleanup.o tx_event_flags_create.o tx_event_flags_delete.o tx_event_flags_get.o
arm-none-eabi-ar -r tx.a tx_event_flags_info_get.o tx_event_flags_initialize.o tx_event_flags_performance_info_get.o tx_event_flags_performance_system_info_get.o
arm-none-eabi-ar -r tx.a tx_event_flags_set.o tx_event_flags_set_notify.o tx_initialize_high_level.o tx_initialize_kernel_enter.o tx_initialize_kernel_setup.o
arm-none-eabi-ar -r tx.a tx_mutex_cleanup.o tx_mutex_create.o tx_mutex_delete.o tx_mutex_get.o tx_mutex_info_get.o tx_mutex_initialize.o tx_mutex_performance_info_get.o
arm-none-eabi-ar -r tx.a tx_mutex_performance_system_info_get.o tx_mutex_prioritize.o tx_mutex_priority_change.o tx_mutex_put.o tx_queue_cleanup.o tx_queue_create.o
arm-none-eabi-ar -r tx.a tx_queue_delete.o tx_queue_flush.o tx_queue_front_send.o tx_queue_info_get.o tx_queue_initialize.o tx_queue_performance_info_get.o
arm-none-eabi-ar -r tx.a tx_queue_performance_system_info_get.o tx_queue_prioritize.o tx_queue_receive.o tx_queue_send.o tx_queue_send_notify.o tx_semaphore_ceiling_put.o
arm-none-eabi-ar -r tx.a tx_semaphore_cleanup.o tx_semaphore_create.o tx_semaphore_delete.o tx_semaphore_get.o tx_semaphore_info_get.o tx_semaphore_initialize.o
arm-none-eabi-ar -r tx.a tx_semaphore_performance_info_get.o tx_semaphore_performance_system_info_get.o tx_semaphore_prioritize.o tx_semaphore_put.o tx_semaphore_put_notify.o
arm-none-eabi-ar -r tx.a tx_thread_create.o tx_thread_delete.o tx_thread_entry_exit_notify.o tx_thread_identify.o tx_thread_info_get.o tx_thread_initialize.o
arm-none-eabi-ar -r tx.a tx_thread_performance_info_get.o tx_thread_performance_system_info_get.o tx_thread_preemption_change.o tx_thread_priority_change.o tx_thread_relinquish.o
arm-none-eabi-ar -r tx.a tx_thread_reset.o tx_thread_resume.o tx_thread_shell_entry.o tx_thread_sleep.o tx_thread_stack_analyze.o tx_thread_stack_error_handler.o
arm-none-eabi-ar -r tx.a tx_thread_stack_error_notify.o tx_thread_suspend.o tx_thread_system_preempt_check.o tx_thread_system_resume.o tx_thread_system_suspend.o
arm-none-eabi-ar -r tx.a tx_thread_terminate.o tx_thread_time_slice.o tx_thread_time_slice_change.o tx_thread_timeout.o tx_thread_wait_abort.o tx_time_get.o
arm-none-eabi-ar -r tx.a tx_time_set.o tx_timer_activate.o tx_timer_change.o tx_timer_create.o tx_timer_deactivate.o tx_timer_delete.o tx_timer_expiration_process.o
arm-none-eabi-ar -r tx.a tx_timer_info_get.o tx_timer_initialize.o tx_timer_performance_info_get.o tx_timer_performance_system_info_get.o tx_timer_system_activate.o
arm-none-eabi-ar -r tx.a tx_timer_system_deactivate.o tx_timer_thread_entry.o tx_trace_enable.o tx_trace_disable.o tx_trace_initialize.o tx_trace_interrupt_control.o
arm-none-eabi-ar -r tx.a tx_trace_isr_enter_insert.o tx_trace_isr_exit_insert.o tx_trace_object_register.o tx_trace_object_unregister.o tx_trace_user_event_insert.o
arm-none-eabi-ar -r tx.a tx_trace_buffer_full_notify.o tx_trace_event_filter.o tx_trace_event_unfilter.o
arm-none-eabi-ar -r tx.a txe_block_allocate.o txe_block_pool_create.o txe_block_pool_delete.o txe_block_pool_info_get.o txe_block_pool_prioritize.o txe_block_release.o
arm-none-eabi-ar -r tx.a txe_byte_allocate.o txe_byte_pool_create.o txe_byte_pool_delete.o txe_byte_pool_info_get.o txe_byte_pool_prioritize.o txe_byte_release.o
arm-none-eabi-ar -r tx.a txe_event_flags_create.o txe_event_flags_delete.o txe_event_flags_get.o txe_event_flags_info_get.o txe_event_flags_set.o
arm-none-eabi-ar -r tx.a txe_event_flags_set_notify.o txe_mutex_create.o txe_mutex_delete.o txe_mutex_get.o txe_mutex_info_get.o txe_mutex_prioritize.o
arm-none-eabi-ar -r tx.a txe_mutex_put.o txe_queue_create.o txe_queue_delete.o txe_queue_flush.o txe_queue_front_send.o txe_queue_info_get.o txe_queue_prioritize.o
arm-none-eabi-ar -r tx.a txe_queue_receive.o txe_queue_send.o txe_queue_send_notify.o txe_semaphore_ceiling_put.o txe_semaphore_create.o txe_semaphore_delete.o
arm-none-eabi-ar -r tx.a txe_semaphore_get.o txe_semaphore_info_get.o txe_semaphore_prioritize.o txe_semaphore_put.o txe_semaphore_put_notify.o txe_thread_create.o
arm-none-eabi-ar -r tx.a txe_thread_delete.o txe_thread_entry_exit_notify.o txe_thread_info_get.o txe_thread_preemption_change.o txe_thread_priority_change.o
arm-none-eabi-ar -r tx.a txe_thread_relinquish.o txe_thread_reset.o txe_thread_resume.o txe_thread_suspend.o txe_thread_terminate.o txe_thread_time_slice_change.o
arm-none-eabi-ar -r tx.a txe_thread_wait_abort.o txe_timer_activate.o txe_timer_change.o txe_timer_create.o txe_timer_deactivate.o txe_timer_delete.o txe_timer_info_get.o
arm-none-eabi-ar -r tx.a tx_thread_smp_current_state_set.o tx_thread_smp_debug_entry_insert.o tx_thread_smp_high_level_initialize.o
arm-none-eabi-ar -r tx.a tx_thread_smp_rebalance_execute_list.o tx_thread_smp_core_exclude.o tx_thread_smp_core_exclude_get.o
arm-none-eabi-ar -r tx.a tx_timer_smp_core_exclude.o tx_timer_smp_core_exclude_get.o tx_thread_smp_utilities.o
arm-none-eabi-ar -r tx.a tx_thread_smp_core_get.o tx_thread_smp_core_preempt.o tx_thread_smp_current_state_get.o tx_thread_smp_current_thread_get.o tx_thread_smp_initialize_wait.o
arm-none-eabi-ar -r tx.a tx_thread_smp_low_level_initialize.o tx_thread_smp_protect.o tx_thread_smp_time_get.o tx_thread_smp_unprotect.o

View File

@@ -0,0 +1,8 @@
arm-none-eabi-gcc -c -g -I../../../../common_smp/inc -I../inc -mcpu=cortex-a5 sample_threadx.c
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 startup.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 MP_GIC.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 MP_SCU.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 MP_Mutexes.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 MP_PrivateTimer.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 v7.S
arm-none-eabi-gcc -T sample_threadx.ld -e Vectors -o sample_threadx.axf MP_PrivateTimer.o MP_GIC.o MP_Mutexes.o MP_SCU.o sample_threadx.o startup.o v7.o tx.a -Wl,-M > sample_threadx.map

View File

@@ -0,0 +1,381 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
#define DEMO_STACK_SIZE 1024
#define DEMO_BYTE_POOL_SIZE 9120
#define DEMO_BLOCK_POOL_SIZE 100
#define DEMO_QUEUE_SIZE 100
/* Define the ThreadX object control blocks... */
TX_THREAD thread_0;
TX_THREAD thread_1;
TX_THREAD thread_2;
TX_THREAD thread_3;
TX_THREAD thread_4;
TX_THREAD thread_5;
TX_THREAD thread_6;
TX_THREAD thread_7;
TX_TIMER timer_0;
TX_QUEUE queue_0;
TX_SEMAPHORE semaphore_0;
TX_MUTEX mutex_0;
TX_EVENT_FLAGS_GROUP event_flags_0;
TX_BYTE_POOL byte_pool_0;
TX_BLOCK_POOL block_pool_0;
/* Define the counters used in the demo application... */
ULONG thread_0_counter;
ULONG thread_1_counter;
ULONG thread_1_messages_sent;
ULONG thread_2_counter;
ULONG thread_2_messages_received;
ULONG thread_3_counter;
ULONG thread_4_counter;
ULONG thread_5_counter;
ULONG thread_6_counter;
ULONG thread_7_counter;
/* Define thread prototypes. */
void thread_0_entry(ULONG thread_input);
void thread_1_entry(ULONG thread_input);
void thread_2_entry(ULONG thread_input);
void thread_3_and_4_entry(ULONG thread_input);
void thread_5_entry(ULONG thread_input);
void thread_6_and_7_entry(ULONG thread_input);
#ifdef TX_ENABLE_EVENT_TRACE
UCHAR event_buffer[65536];
#endif
int main(void)
{
/* Enter ThreadX. */
tx_kernel_enter();
return 0;
}
/* Define what the initial system looks like. */
void tx_application_define(void *first_unused_memory)
{
CHAR *pointer = TX_NULL;
#ifdef TX_ENABLE_EVENT_TRACE
tx_trace_enable(event_buffer, sizeof(event_buffer), 32);
#endif
/* Create a byte memory pool from which to allocate the thread stacks. */
tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
/* Allocate the stack for thread 0. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
/* Create the message queue shared by threads 1 and 2. */
tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
/* Create the semaphore used by threads 3 and 4. */
tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
/* Create the event flags group used by threads 1 and 5. */
tx_event_flags_create(&event_flags_0, "event flags 0");
/* Create the mutex used by thread 6 and 7 without priority inheritance. */
tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
/* Allocate the memory for a small block pool. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
/* Create a block memory pool to allocate a message buffer from. */
tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
/* Allocate a block and release the block memory. */
tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
/* Release the block back to the pool. */
tx_block_release(pointer);
}
/* Define the test threads. */
void thread_0_entry(ULONG thread_input)
{
UINT status;
/* This thread simply sits in while-forever-sleep loop. */
while(1)
{
/* Increment the thread counter. */
thread_0_counter++;
/* Sleep for 10 ticks. */
tx_thread_sleep(10);
/* Set event flag 0 to wakeup thread 5. */
status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
/* Check status. */
if (status != TX_SUCCESS)
break;
}
}
void thread_1_entry(ULONG thread_input)
{
UINT status;
/* This thread simply sends messages to a queue shared by thread 2. */
while(1)
{
/* Increment the thread counter. */
thread_1_counter++;
/* Send message to queue 0. */
status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
/* Check completion status. */
if (status != TX_SUCCESS)
break;
/* Increment the message sent. */
thread_1_messages_sent++;
}
}
void thread_2_entry(ULONG thread_input)
{
ULONG received_message;
UINT status;
/* This thread retrieves messages placed on the queue by thread 1. */
while(1)
{
/* Increment the thread counter. */
thread_2_counter++;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
/* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
}
void thread_3_and_4_entry(ULONG thread_input)
{
UINT status;
/* This function is executed from thread 3 and thread 4. As the loop
below shows, these function compete for ownership of semaphore_0. */
while(1)
{
/* Increment the thread counter. */
if (thread_input == 3)
thread_3_counter++;
else
thread_4_counter++;
/* Get the semaphore with suspension. */
status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
/* Check status. */
if (status != TX_SUCCESS)
break;
/* Sleep for 2 ticks to hold the semaphore. */
tx_thread_sleep(2);
/* Release the semaphore. */
status = tx_semaphore_put(&semaphore_0);
/* Check status. */
if (status != TX_SUCCESS)
break;
}
}
void thread_5_entry(ULONG thread_input)
{
UINT status;
ULONG actual_flags;
/* This thread simply waits for an event in a forever loop. */
while(1)
{
/* Increment the thread counter. */
thread_5_counter++;
/* Wait for event flag 0. */
status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
if ((status != TX_SUCCESS) || (actual_flags != 0x1))
break;
}
}
void thread_6_and_7_entry(ULONG thread_input)
{
UINT status;
/* This function is executed from thread 6 and thread 7. As the loop
below shows, these function compete for ownership of mutex_0. */
while(1)
{
/* Increment the thread counter. */
if (thread_input == 6)
thread_6_counter++;
else
thread_7_counter++;
/* Get the mutex with suspension. */
status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
/* Check status. */
if (status != TX_SUCCESS)
break;
/* Get the mutex again with suspension. This shows
that an owning thread may retrieve the mutex it
owns multiple times. */
status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
/* Check status. */
if (status != TX_SUCCESS)
break;
/* Sleep for 2 ticks to hold the mutex. */
tx_thread_sleep(2);
/* Release the mutex. */
status = tx_mutex_put(&mutex_0);
/* Check status. */
if (status != TX_SUCCESS)
break;
/* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
/* Check status. */
if (status != TX_SUCCESS)
break;
}
}

View File

@@ -0,0 +1,182 @@
/* Linker script to place sections and symbol values.
* It references following symbols, which must be defined in code:
* Vectors : Entry point
*
* It defines following symbols, which code can use without definition:
* __code_start
* __exidx_start
* __exidx_end
* __data_start
* __preinit_array_start
* __preinit_array_end
* __init_array_start
* __init_array_end
* __fini_array_start
* __fini_array_end
* __bss_start__
* __bss_end__
* __end__
* __stack
* __irq_stack
* __stack
* __pagetable_start
*/
ENTRY(Vectors)
SECTIONS
{
.vectors 0x80008000:
{
_exec = .;
__code_start = .;
KEEP(*(VECTORS))
}
.init :
{
KEEP (*(SORT_NONE(.init)))
}
.text :
{
KEEP(*(ENABLE_CACHES))
*(.text*)
}
.fini :
{
KEEP (*(SORT_NONE(.fini)))
}
.rodata :
{
*(.rodata .rodata.* .gnu.linkonce.r.*)
}
.eh_frame :
{
KEEP (*(.eh_frame))
}
.ARM.extab :
{
*(.ARM.extab* .gnu.linkonce.armextab.*)
}
.ARM.exidx :
{
__exidx_start = .;
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
__exidx_end = .;
}
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
.init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array ))
PROVIDE_HIDDEN (__init_array_end = .);
}
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(SORT(.fini_array.*)))
KEEP (*(.fini_array ))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
KEEP (*crtbegin?.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*crtbegin?.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr :
{
KEEP (*(.jcr))
}
.data :
{
__data_start = . ;
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.bss :
{
. = ALIGN(4);
__bss_start__ = .;
*(.bss*)
*(COMMON)
. = ALIGN(4);
__bss_end__ = .;
}
.heap (NOLOAD):
{
. = ALIGN(64);
__end__ = .;
PROVIDE(end = .);
. = . + 0xA0000;
}
.stack (NOLOAD):
{
. = ALIGN(64);
. = . + 4 * 0x4000;
__stack = .;
_stack_init_usr = .;
}
.irq_stacks (NOLOAD):
{
. = ALIGN(64);
. = . + 4 * 1024;
__irq_stack = .;
_stack_init_irq = .;
}
_end = .;
.pagetable 0x80100000 (NOLOAD):
{
_page_table_top = .;
__pagetable_start = .;
. = . + 0x4000;
}
}

View File

@@ -0,0 +1,690 @@
@; ------------------------------------------------------------
@; Cortex-A15 MPCore SMP Prime Number Generator Example
@;
@; Copyright (c) 2011-2012 ARM Ltd. All rights reserved.
@; ------------------------------------------------------------
@
@ PRESERVE8
@
@ AREA StartUp,CODE,READONLY
@
@; Standard definitions of mode bits and interrupt (I&F) flags in PSRs
@
Mode_USR = 0x10
Mode_FIQ = 0x11
Mode_IRQ = 0x12
Mode_SVC = 0x13
Mode_ABT = 0x17
Mode_UNDEF = 0x1B
Mode_SYS = 0x1F
I_Bit = 0x80 @ when I bit is set, IRQ is disabled
F_Bit = 0x40 @ when F bit is set, FIQ is disabled
SYS_MODE = 0xDF
SVC_MODE = 0xD3
IRQ_MODE = 0xD2
@; ------------------------------------------------------------
@; Porting defines
@; ------------------------------------------------------------
@
L1_COHERENT = 0x00014c06 @ Template descriptor for coherent memory
L1_NONCOHERENT = 0x00000c1e @ Template descriptor for non-coherent memory
L1_DEVICE = 0x00000c06 @ Template descriptor for device memory
.section VECTORS, "ax"
.align 3
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
@; ------------------------------------------------------------
@
@ ENTRY
@
.global Vectors
Vectors:
B Reset_Handler
B Undefined_Handler
B SVC_Handler
B Prefetch_Handler
B Abort_Handler
B Hypervisor_Handler
B IRQ_Handler
B FIQ_Handler
@; ------------------------------------------------------------
@; Handlers for unused exceptions
@; ------------------------------------------------------------
@
Undefined_Handler:
B Undefined_Handler
SVC_Handler:
B SVC_Handler
Prefetch_Handler:
B Prefetch_Handler
Abort_Handler:
B Abort_Handler
Hypervisor_Handler:
B Hypervisor_Handler
FIQ_Handler:
B FIQ_Handler
@; ------------------------------------------------------------
@; Imports
@; ------------------------------------------------------------
.global readIntAck
.global writeEOI
.global enableGIC
.global enableGICProcessorInterface
.global setPriorityMask
.global enableIntID
.global setIntPriority
.global joinSMP
.global invalidateCaches
.global disableHighVecs
.global _start
@; [Grape Change Start]
@; IMPORT main_app
.global _tx_thread_smp_initialize_wait
.global _tx_thread_smp_release_cores_flag
.global _tx_thread_context_save
.global _tx_thread_context_restore
.global _tx_timer_interrupt
.global _tx_thread_smp_inter_core_interrupts
.global enableBranchPrediction
.global enableCaches
VFPEnable = 0x40000000 @ VFP enable value
@;/*------------------------------------------------------------------------*/
@;/*--- Versatile Express(Timer0) ---*/
GIC_DIST_CPUTARGET = 0x2C001820
GIC_DIST_CPUTARGET_VALUE = 0x000f0000
GIC_DIST_CONFIG = 0x2C001C08
GIC_DIST_CONFIG_VALUE = 0x00000000
GIC_DIST_PRIO = 0x2C001420
GIC_DIST_PRIO_VALUE = 0x00a00000
GIC_DIST_CONTROL = 0x2C001000
GIC_DIST_CONTROL_VALUE = 0x00000001
GIC_CPU_CONTROL = 0x2C002000
GIC_CPU_CONTROL_VALUE = 0x00000001
GIC_CPU_PRIO_MASK = 0x2C002004
GIC_CPU_PRIO_MASK_VALUE = 0x000000ff
GIC_DIST_ENABLE_SET = 0x2C001104
GIC_DIST_ENABLE_SET_VALUE = 0x00000004
GIC_CPU_INTACK = 0x2C00200C
GIC_CPU_EOI = 0x2C002010
;
;
;
TIMCLK_CTRL = 0x1C020000
TIMCLK_CTRL_VALUE = 0x00028000 @ Use EXTCLK (1MHz) for TIMCLK not REFCLK32KHZ
TIMER_LOAD = 0x1C110000
TIMER_LOAD_VALUE = 0x00000140 @ 10ms
TIMER_CTRL = 0x1C110008
TIMER_CTRL_STOP = 0x00000020
TIMER_CTRL_VALUE = 0x000000E0
TIMER_ACK = 34 @ Timer0
TIMER_INT_CLR = 0x1C11000C
;
HANDLER_SET = 0x80000018
HANDLER_SET_VALUE = 0xE59FF018
HANDLER_ADDRESS = 0x80000038 @ irq
@;/*--- Versatile Express(Timer0) ---*/
@;/*------------------------------------------------------------------------*/
@; [Grape Change End]
.global _page_table_top
.global _exec
.global _stack_init_irq
.global _stack_init_usr
@; ------------------------------------------------------------
@; Interrupt Handler
@; ------------------------------------------------------------
@
@ EXPORT IRQ_Handler
.align 2
.global IRQ_Handler
.type IRQ_Handler,function
IRQ_Handler:
@; [Grape Change Start]
.global __tx_irq_processing_return
@; SUB lr, lr, #4 ; Pre-adjust lr
@; SRSFD sp!, #Mode_IRQ ; Save lr and SPRS to IRQ mode stack
@; PUSH {r0-r4, r12} ; Save APCS corruptible registers to IRQ mode stack (and maintain 8 byte alignment)
@; /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
PUSH {r4, r5} @ Save some preserved registers (r5 is saved just for 8-byte alignment)
@; [Grape Change End]
@ Acknowledge the interrupt
BL readIntAck
MOV r4, r0
//
// This example only uses (and enables) one. At this point
// you would normally check the ID, and clear the source.
//
//
// Additonal code to handler private timer interrupt on CPU0
//
CMP r0, #29 // If not Private Timer interrupt (ID 29), by pass
BNE by_pass
// [EL Change Start]
// MOV r0, #0x04 // Code for SYS_WRITE0
// LDR r1, =irq_handler_message0
// SVC 0x123456
// [EL Change End]
// Clear timer interrupt
BL clear_private_timer_irq
DSB
// [EL Change Start]
BL _tx_timer_interrupt // Timer interrupt handler
// [EL Change End]
B by_pass2
by_pass:
// [EL Change Start]
//
// Additional code to handle SGI on CPU0
//
//
// MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
// ANDS r0, r0, #0x03 // Mask off, leaving the CPU ID field
// BNE by_pass2
//
// MOV r0, #0x04 // Code for SYS_WRITE0
// LDR r1, =irq_handler_message1
// SVC 0x123456
//
// /* Just increment the per-thread interrupt count for analysis purposes. */
//
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
LSL r0, r0, #2 // Build offset to array indexes
LDR r1,=_tx_thread_smp_inter_core_interrupts // Pickup base address of core interrupt counter array
ADD r1, r1, r0 // Build array index
LDR r0, [r1] // Pickup counter
ADD r0, r0, #1 // Increment counter
STR r0, [r1] // Store back counter
//
// [EL Change End]
by_pass2:
// Write end of interrupt reg
MOV r0, r4
BL writeEOI
// [EL Change Start]
//
// /* Jump to context restore to restore system context. */
POP {r4, r5} // Recover preserved registers
B _tx_thread_context_restore
// POP {r0-r4, r12} // Restore stacked APCS registers
// MOV r2, #0x01 // Set r2 so CPU leaves holding pen
// RFEFD sp! // Return from exception
// [EL Change End]
@; ------------------------------------------------------------
@; Reset Handler - Generic initialization, run by all CPUs
@; ------------------------------------------------------------
@
@ EXPORT Reset_Handler
.align 2
.global $Reset_Handler
.type $Reset_Handler,function
Reset_Handler:
@ ;
@ ; Set ACTLR.SMP bit
@ ; ------------------
BL joinSMP
@;
@; Disable caches, MMU and branch prediction in case they were left enabled from an earlier run
@; This does not need to be done from a cold reset
@; ------------------------------------------------------------
MRC p15, 0, r0, c1, c0, 0 @ Read CP15 System Control register
BIC r0, r0, #(0x1 << 12) @ Clear I bit 12 to disable I Cache
BIC r0, r0, #(0x1 << 2) @ Clear C bit 2 to disable D Cache
BIC r0, r0, #0x1 @ Clear M bit 0 to disable MMU
BIC r0, r0, #(0x1 << 11) @ Clear Z bit 11 to disable branch prediction
MCR p15, 0, r0, c1, c0, 0 @ Write CP15 System Control register
@; The MMU is enabled later, before calling main(). Caches and branch prediction are enabled inside main(),
@; after the MMU has been enabled and scatterloading has been performed.
@
@ ;
@ ; Setup stacks
@ ;---------------
MRC p15, 0, r0, c0, c0, 5 @ Read CPU ID register
ANDS r0, r0, #0x03 @ Mask off, leaving the CPU ID field
@; [Grape Change Start]
@; MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
@; LDR r1, =_stack_init_irq ; IRQ stacks for CPU 0,1,2,3
@; SUB r1, r1, r0, LSL #8 ; 256 bytes of IRQ stack per CPU (0,1,2,3) - see scatter.scat
@; MOV sp, r1
@;
@; MSR CPSR_c, #Mode_SYS:OR:I_Bit:OR:F_Bit ; Interrupts initially disabled
@; LDR r1, =_stack_init_usr ; App stacks for all CPUs
@; SUB r1, r1, r0, LSL #12 ; 0x1000 bytes of App stack per CPU - see scatter.scat
@; MOV sp, r1
MOV r1, #IRQ_MODE @ Build IRQ mode CPSR
MSR CPSR_c, r1 @ Enter IRQ mode
@ MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
LDR r1, =_stack_init_irq @ IRQ stacks for CPU 0,1,2,3
SUB r1, r1, r0, LSL #10 @ 1024 bytes of IRQ stack per CPU (0,1,2,3) - see scatter.scat
MOV sp, r1
MOV r1, #SYS_MODE @ Build SYS mode CPSR
MSR CPSR_c, r1 @ Enter SYS mode
@ MSR CPSR_c, #Mode_SYS:OR:I_Bit:OR:F_Bit @ Interrupts initially disabled
LDR r1, =_stack_init_usr @ App stacks for all CPUs
SUB r1, r1, r0, LSL #12 @ 0x1000 bytes of App stack per CPU - see scatter.scat
MOV sp, r1
MOV r2, #SVC_MODE @ Build SVC mode CPSR
MSR CPSR_c, r2 @ Enter SVC mode
@ MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit @ Interrupts initially disabled
MOV sp, r1
@; [Grape Change End]
@
@ ;
@ ; Set vector base address
@ ; ------------------------
LDR r0, =Vectors
MCR p15, 0, r0, c12, c0, 0 @ Write Secure or Non-secure Vector Base Address
BL disableHighVecs @ Ensure that V-bit is cleared
@ ;
@ ; Invalidate caches
@ ; ------------------
BL invalidateCaches
@ ;
@ ; Clear Branch Prediction Array
@ ; ------------------------------
MOV r0, #0x0
MCR p15, 0, r0, c7, c5, 6 @ BPIALL - Invalidate entire branch predictor array
@; [Grape Change Start]
@; ; Disable loop-buffer to fix errata on A15 r0p0
@; MRC p15, 0, r0, c0, c0, 0 ; Read main ID register MIDR
@; MOV r1, r0, lsr #4 ; Extract Primary Part Number
@; LDR r2, =0xFFF
@; AND r1, r1, r2
@; LDR r2, =0xC0F
@; CMP r1, r2 ; Is this an A15?
@; BNE notA15r0p0 ; Jump if not A15
@; AND r5, r0, #0x00f00000 ; Variant
@; AND r6, r0, #0x0000000f ; Revision
@; ORRS r6, r6, r5 ; Combine variant and revision
@; BNE notA15r0p0 ; Jump if not r0p0
@; MRC p15, 0, r0, c1, c0, 1 ; Read Aux Ctrl Reg
@; ORR r0, r0, #(1 << 1) ; Set bit 1 to Disable Loop Buffer
@; MCR p15, 0, r0, c1, c0, 1 ; Write Aux Ctrl Reg
@; ISB
@;notA15r0p0
@; [Grape Change End]
@
@ ;
@ ; Invalidate TLBs
@ ;------------------
MOV r0, #0x0
MCR p15, 0, r0, c8, c7, 0 @ TLBIALL - Invalidate entire Unified TLB
@ ;
@ ; Set up Domain Access Control Reg
@ ; ----------------------------------
@ ; b00 - No Access (abort)
@ ; b01 - Client (respect table entry)
@ ; b10 - RESERVED
@ ; b11 - Manager (ignore access permissions)
MRC p15, 0, r0, c3, c0, 0 @ Read Domain Access Control Register
LDR r0, =0x55555555 @ Initialize every domain entry to b01 (client)
MCR p15, 0, r0, c3, c0, 0 @ Write Domain Access Control Register
@ ;;
@ ;; Enable L1 Preloader - Auxiliary Control
@ ;; -----------------------------------------
@ ;; Seems to undef on panda?
@ ;MRC p15, 0, r0, c1, c0, 1 ; Read ACTLR
@ ;ORR r0, r0, #0x4
@ ;MCR p15, 0, r0, c1, c0, 1 ; Write ACTLR
@
@ ; Page tables
@ ; -------------------------
@ ; Each CPU will have its own L1 page table. The
@ ; code reads the base address from the scatter file
@ ; the uses the CPUID to calculate an offset for each
@ ; CPU.
@ ;
@ ; The page tables are generated at boot time. First
@ ; the table is zeroed. Then the individual valid
@ ; entries are written in
@ ;
@
@ ; Calculate offset for this CPU
LDR r0, =_page_table_top
MRC p15, 0, r1, c0, c0, 5 @ Read Multiprocessor Affinity Register
ANDS r1, r1, #0x03 @ Mask off, leaving the CPU ID field
MOV r1, r1, LSL #14 @ Convert core ID into a 16K offset (this is the size of the table)
ADD r0, r1, r0 @ Add offset to current table location to get dst
@ Fill table with zeros
MOV r2, #1024 @ Set r3 to loop count (4 entries per iteration, 1024 iterations)
MOV r1, r0 @ Make a copy of the base dst
MOV r3, #0
MOV r4, #0
MOV r5, #0
MOV r6, #0
ttb_zero_loop:
STMIA r1!, {r3-r6} @ Store out four entries
SUBS r2, r2, #1 @ Decrement counter
BNE ttb_zero_loop
@ ;
@ ; STANDARD ENTRIES
@ ;
@
@ ; Entry for VA 0x0
@ ; This region must be coherent
@ ;LDR r1, =PABASE_VA0 ; Physical address
@ ;LDR r2, =L1_COHERENT ; Descriptor template
@ ;ORR r1, r1, r2 ; Combine address and template
@ ;STR r1, [r0]
@
@
@ ; If not flat mapping, you need a page table entry covering
@ ; the physical address of the boot code.
@ ; This region must be coherent
LDR r1,=_exec @ Base physical address of code segment
LSR r1,#20 @ Shift right to align to 1MB boundaries
LDR r3, =L1_COHERENT @ Descriptor template
ORR r3, r1, LSL#20 @ Setup the initial level1 descriptor again
STR r3, [r0, r1, LSL#2] @ str table entry
@; [Grape Change Start]
@;/*------------------------------------------------------------------------*/
@;/*--- Versatile Express(Timer0) ---*/
LDR r1, =0x80000000 @ Physical address of HANDLER
LSR r1, r1, #20 @ Clear bottom 20 bits, to find which 1MB block it is in
LSL r2, r1, #2 @ Make a copy, and multiply by four. This gives offset into the page tables
LSL r1, r1, #20 @ Put back in address format
LDR r3, =L1_COHERENT @ Descriptor template
ORR r1, r1, r3 @ Combine address and template
STR r1, [r0, r2]
LDR r1, =0x2C000000 @ Physical address of GIC_DIST
LSR r1, r1, #20 @ Clear bottom 20 bits, to find which 1MB block it is in
LSL r2, r1, #2 @ Make a copy, and multiply by four. This gives offset into the page tables
LSL r1, r1, #20 @ Put back in address format
LDR r3, =L1_DEVICE @ Descriptor template
ORR r1, r1, r3 @ Combine address and template
STR r1, [r0, r2]
LDR r1, =0x1C000000 @ Physical address of TIMER
LSR r1, r1, #20 @ Clear bottom 20 bits, to find which 1MB block it is in
LSL r2, r1, #2 @ Make a copy, and multiply by four. This gives offset into the page tables
LSL r1, r1, #20 @ Put back in address format
LDR r3, =L1_DEVICE @ Descriptor template
ORR r1, r1, r3 @ Combine address and template
STR r1, [r0, r2]
LDR r1, =0x1C100000 @ Physical address of TIMER
LSR r1, r1, #20 @ Clear bottom 20 bits, to find which 1MB block it is in
LSL r2, r1, #2 @ Make a copy, and multiply by four. This gives offset into the page tables
LSL r1, r1, #20 @ Put back in address format
LDR r3, =L1_DEVICE @ Descriptor template
ORR r1, r1, r3 @ Combine address and template
STR r1, [r0, r2]
@;/*--- Versatile Express(Timer0) ---*/
@;/*------------------------------------------------------------------------*/
@; [Grape Change End]
@
@ ; Entry for private address space
@ ; Needs to be marked as Device memory
MRC p15, 4, r1, c15, c0, 0 @ Get base address of private address space
LSR r1, r1, #20 @ Clear bottom 20 bits, to find which 1MB block it is in
LSL r2, r1, #2 @ Make a copy, and multiply by four. This gives offset into the page tables
LSL r1, r1, #20 @ Put back in address format
LDR r3, =L1_DEVICE @ Descriptor template
ORR r1, r1, r3 @ Combine address and template
STR r1, [r0, r2]
@ ;
@ ; OPTIONAL ENTRIES
@ ; You will need additional translations if:
@ ; - No RAM at zero, so cannot use flat mapping
@ ; - You wish to retarget
@ ;
@ ; If you wish to output to stdio to a UART you will need
@ ; an additional entry
@ ;LDR r1, =PABASE_UART ; Physical address of UART
@ ;LSR r1, r1, #20 ; Mask off bottom 20 bits to find which 1MB it is within
@ ;LSL r2, r1, #2 ; Make a copy and multiply by 4 to get table offset
@ ;LSL r1, r1, #20 ; Put back into address format
@ ;LDR r3, =L1_DEVICE ; Descriptor template
@ ;ORR r1, r1, r3 ; Combine address and template
@ ;STR r1, [r0, r2]
@
@ ;
@ ; Barrier
@ ; --------
DSB
@ ;
@ ; Set location of level 1 page table
@ ;------------------------------------
@ ; 31:14 - Base addr: 0x8050,0000 (CPU0), 0x8050,4000 (CPU1)
@ ; 13:5 - 0x0
@ ; 4:3 - RGN 0x0 (Outer Noncachable)
@ ; 2 - P 0x0
@ ; 1 - S 0x0 (Non-shared)
@ ; 0 - C 0x0 (Inner Noncachable)
MCR p15, 0, r0, c2, c0 ,0
@ ; Enable MMU
@ ;-------------
@ ; Leaving the caches disabled until after scatter loading.
MRC p15, 0, r0, c1, c0, 0 @ Read CP15 System Control register
BIC r0, r0, #(0x1 << 12) @ Clear I bit 12 to disable I Cache
BIC r0, r0, #(0x1 << 2) @ Clear C bit 2 to disable D Cache
BIC r0, r0, #0x2 @ Clear A bit 1 to disable strict alignment fault checking
ORR r0, r0, #0x1 @ Set M bit 0 to enable MMU before scatter loading
MCR p15, 0, r0, c1, c0, 0 @ Write CP15 System Control register
@ ;
@ ; MMU now enabled - Virtual address system now active
@ ;
@; [Grape Change Start]
#ifdef TARGET_FPU_VFP
MRC p15, 0, r1, c1, c0, 2 @ r1 = Access Control Register
ORR r1, r1, #(0xf << 20) @ Enable full access for p10,11
MCR p15, 0, r1, c1, c0, 2 @ Access Control Register = r1
MOV r1, #0
MCR p15, 0, r1, c7, c5, 4 @ Flush prefetch buffer because of FMXR below and
@ CP 10 & 11 were only just enabled
MOV r0, #VFPEnable @ Enable VFP itself
FMXR FPEXC, r0 @ FPEXC = r0
#endif
LDR r0, =_tx_thread_smp_release_cores_flag @ Build address of release cores flag
MOV r1, #0
STR r1, [r0]
@; [Grape Change End]
@
@ ;
@ ; SMP initialization
@ ; -------------------
MRC p15, 0, r0, c0, c0, 5 @ Read CPU ID register
ANDS r0, r0, #0x03 @ Mask off, leaving the CPU ID field
BEQ primaryCPUInit
BNE secondaryCPUsInit
@; ------------------------------------------------------------
@; Initialization for PRIMARY CPU
@; ------------------------------------------------------------
@
@
@ EXPORT primaryCPUInit
.align 2
.global primaryCPUInit
.type primaryCPUInit,function
primaryCPUInit:
@ ;
@ ; GIC Init
@ ; ---------
BL enableGIC
BL enableGICProcessorInterface
BL enableCaches
//
// Enable Private Timer for periodic IRQ
// --------------------------------------
MOV r0, #0x1F
BL setPriorityMask // Set priority mask (local)
// [EL] Change start - don't enable interrupts here!
//CPSIE i // Clear CPSR I bit
// [EL] Change end
// Enable the Private Timer Interrupt Source
MOV r0, #29
MOV r1, #0
BL enableIntID
// Set the priority
MOV r0, #29
MOV r1, #0
BL setIntPriority
// Configure Timer
MOV r0, #0xF0000
MOV r1, #0x0
BL init_private_timer
BL start_private_timer
//
// Enable receipt of SGI 0
// ------------------------
MOV r0, #0x0 // ID
BL enableIntID
MOV r0, #0x0 // ID
MOV r1, #0x0 // Priority
BL setIntPriority
@ ;
@ ; Branch to C lib code
@ ; ----------------------
B _start
@; [Grape Change End]
@; ------------------------------------------------------------
@; Initialization for SECONDARY CPUs
@; ------------------------------------------------------------
@
@ EXPORT secondaryCPUsInit
.align 2
.global secondaryCPUsInit
.type secondaryCPUsInit,function
secondaryCPUsInit:
@ ;
@ ; GIC Init
@ ; ---------
BL enableGICProcessorInterface
MOV r0, #0x1F @ Priority
BL setPriorityMask
MOV r0, #0x0 @ ID
BL enableIntID
MOV r0, #0x0 @ ID
MOV r1, #0x0 @ Priority
BL setIntPriority
@ ;
@ ; Holding Pen
@ ; ------------
@; [Grape Change Start]
@; MOV r2, #0x00 ; Clear r2
@; CPSIE i ; Enable interrupts
@;holding_pen
@; CMP r2, #0x0 ; r2 will be set to 0x1 by IRQ handler on receiving SGI
@; WFIEQ
@; BEQ holding_pen
@; CPSID i ; IRQs not used in rest of example, so mask out interrupts
@; [Grape Change End]
@
@
@ ;
@ ; Branch to application
@ ; ----------------------
@; [Grape Change Start]
@; B main_app
@; BL enableBranchPrediction
BL enableCaches
B _tx_thread_smp_initialize_wait
@; [Grape Change End]
@
@; ------------------------------------------------------------
@; End of code
@; ------------------------------------------------------------
@
@ END
@
@; ------------------------------------------------------------
@; End of startup.s
@; ------------------------------------------------------------

View File

@@ -0,0 +1,122 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Initialize */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_initialize.h"
@#include "tx_thread.h"
@#include "tx_timer.h"
@
@
@
@
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_version_id
.global _tx_build_options
.global _end
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_initialize_low_level SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is responsible for any low-level processor */
@/* initialization, including setting up interrupt vectors, setting */
@/* up a periodic timer interrupt source, saving the system stack */
@/* pointer for use in ISR processing later, and finding the first */
@/* available RAM memory address for tx_application_define. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* _tx_initialize_kernel_enter ThreadX entry function */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_initialize_low_level(VOID)
@{
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
@
@ /* Save the first available memory address. */
@ _tx_initialize_unused_memory = (VOID_PTR) _end;
@
LDR r0, =_end @ Get end of non-initialized RAM area
LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
ADD r0, r0, #8 @ Increment to next free word
STR r0, [r2, #0] @ Save first free memory address
@
@
@ /* Done, return to caller. */
@
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@}
@
@ /* Reference build options and version ID to ensure they come in. */
@
LDR r2, =_tx_build_options @ Pickup build options variable address
LDR r0, [r2, #0] @ Pickup build options content
LDR r2, =_tx_version_id @ Pickup version ID variable address
LDR r0, [r2, #0] @ Pickup version ID content
@
@

View File

@@ -0,0 +1,531 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.text
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use compiler intrinsics instead of these
.global enableInterrupts
// void enableInterrupts(void)
.type enableInterrupts, "function"
.cfi_startproc
enableInterrupts:
CPSIE i
BX lr
.cfi_endproc
.global disableInterrupts
// void disableInterrupts(void)
.type disableInterrupts, "function"
.cfi_startproc
disableInterrupts:
CPSID i
BX lr
.cfi_endproc
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
// void enableCaches(void)
.type enableCaches, "function"
.cfi_startproc
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.cfi_endproc
.global disableCaches
// void disableCaches(void)
.type disableCaches, "function"
.cfi_startproc
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.cfi_endproc
.global cleanDCache
// void cleanDCache(void)
.type cleanDCache, "function"
.cfi_startproc
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.cfi_endproc
.global cleanInvalidateDCache
// void cleanInvalidateDCache(void)
.type cleanInvalidateDCache, "function"
.cfi_startproc
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.cfi_endproc
.global invalidateCaches
// void invalidateCaches(void)
.type invalidateCaches, "function"
.cfi_startproc
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.cfi_endproc
.global invalidateCaches_IS
// void invalidateCaches_IS(void)
.type invalidateCaches_IS, "function"
.cfi_startproc
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
.cfi_endproc
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
// void invalidateUnifiedTLB(void)
.type invalidateUnifiedTLB, "function"
.cfi_startproc
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.cfi_endproc
.global invalidateUnifiedTLB_IS
// void invalidateUnifiedTLB_IS(void)
.type invalidateUnifiedTLB_IS, "function"
.cfi_startproc
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
.cfi_endproc
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
// void flushBranchTargetCache(void)
.type flushBranchTargetCache, "function"
.cfi_startproc
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.cfi_endproc
.global flushBranchTargetCache_IS
// void flushBranchTargetCache_IS(void)
.type flushBranchTargetCache_IS, "function"
.cfi_startproc
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
.cfi_endproc
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
// void enableHighVecs(void)
.type enableHighVecs, "function"
.cfi_startproc
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.cfi_endproc
.global disableHighVecs
// void disable_highvecs(void)
.type disableHighVecs, "function"
.cfi_startproc
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.cfi_endproc
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
// uint32_t getContextIDd(void)
.type getContextID, "function"
.cfi_startproc
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.cfi_endproc
.global setContextID
// void setContextID(uint32_t)
.type setContextID, "function"
.cfi_startproc
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
.cfi_endproc
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
// uint32_t getMIDR(void)
.type getMIDR, "function"
.cfi_startproc
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.cfi_endproc
.global getMPIDR
// uint32_t getMPIDR(void)
.type getMPIDR, "function"
.cfi_startproc
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5 // Read Multiprocessor ID register (MPIDR)
BX lr
.cfi_endproc
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
.type getBaseAddr, "function"
.cfi_startproc
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global getCPUID
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
.type getCPUID, "function"
.cfi_startproc
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global goToSleep
// void goToSleep(void)
.type goToSleep, "function"
.cfi_startproc
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global joinSMP
// void joinSMP(void)
// Sets the ACTRL.SMP bit
.type joinSMP, "function"
.cfi_startproc
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
.cfi_endproc
// ------------------------------------------------------------
.global leaveSMP
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
.type leaveSMP, "function"
.cfi_startproc
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
.cfi_endproc
.align 2
.global _exit
.type _exit,function
_exit:
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@@ -0,0 +1,409 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Port Specific */
/** */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h SMP/Cortex-A5/GNU */
/* 6.1 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This file contains data type definitions that make the ThreadX */
/* real-time kernel function identically on a variety of different */
/* processor architectures. For example, the size or number of bits */
/* in an "int" data type vary between microprocessor architectures and */
/* even C compilers for the same microprocessor. ThreadX does not */
/* directly use native C data types. Instead, ThreadX creates its */
/* own special types that can be mapped to actual data types by this */
/* file to guarantee consistency in the interface and functionality. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* */
/**************************************************************************/
#ifndef TX_PORT_H
#define TX_PORT_H
/************* Define ThreadX SMP constants. *************/
/* Define the ThreadX SMP maximum number of cores. */
#ifndef TX_THREAD_SMP_MAX_CORES
#define TX_THREAD_SMP_MAX_CORES 2
#endif
/* Define the ThreadX SMP core mask. */
#ifndef TX_THREAD_SMP_CORE_MASK
#define TX_THREAD_SMP_CORE_MASK 0x3 /* Where bit 0 represents Core 0, bit 1 represents Core 1, etc. */
#endif
/* Define INLINE_DECLARE to whitespace for ARM compiler. */
#define INLINE_DECLARE
/* Define ThreadX SMP initialization macro. */
#define TX_PORT_SPECIFIC_PRE_INITIALIZATION
/* Define ThreadX SMP pre-scheduler initialization. */
#define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION
/* Enable the inter-core interrupt logic. */
#define TX_THREAD_SMP_INTER_CORE_INTERRUPT
/* Determine if there is customer-specific wakeup logic needed. */
#ifdef TX_THREAD_SMP_WAKEUP_LOGIC
/* Include customer-specific wakeup code. */
#include "tx_thread_smp_core_wakeup.h"
#else
#ifdef TX_THREAD_SMP_DEFAULT_WAKEUP_LOGIC
/* Default wakeup code. */
#define TX_THREAD_SMP_WAKEUP_LOGIC
#define TX_THREAD_SMP_WAKEUP(i) _tx_thread_smp_core_preempt(i)
#endif
#endif
/* Ensure that the in-line resume/suspend define is not allowed. */
#ifdef TX_INLINE_THREAD_RESUME_SUSPEND
#undef TX_INLINE_THREAD_RESUME_SUSPEND
#endif
/************* End ThreadX SMP constants. *************/
/* Determine if the optional ThreadX user define file should be used. */
#ifdef TX_INCLUDE_USER_DEFINE_FILE
/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
#endif
/* Define compiler library include files. */
#include <stdlib.h>
#include <string.h>
/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned long ULONG;
typedef short SHORT;
typedef unsigned short USHORT;
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
#ifndef TX_MAX_PRIORITIES
#define TX_MAX_PRIORITIES 32
#endif
/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
thread creation is less than this value, the thread create call will return an error. */
#ifndef TX_MINIMUM_STACK
#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
#endif
/* Define the system timer thread's default stack size and priority. These are only applicable
if TX_TIMER_PROCESS_IN_ISR is not defined. */
#ifndef TX_TIMER_THREAD_STACK_SIZE
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
#else
#define TX_INT_DISABLE 0x80 /* Disable IRQ interrupts */
#endif
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
#define TX_TRACE_TIME_SOURCE _tx_thread_smp_time_get()
#endif
#else
#ifndef TX_TRACE_TIME_SOURCE
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
/* Define the port specific options for the _tx_build_options variable. This variable indicates
how the ThreadX library was built. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_FIQ_ENABLED 1
#else
#define TX_FIQ_ENABLED 0
#endif
#ifdef TX_ENABLE_IRQ_NESTING
#define TX_IRQ_NESTING_ENABLED 2
#else
#define TX_IRQ_NESTING_ENABLED 0
#endif
#ifdef TX_ENABLE_FIQ_NESTING
#define TX_FIQ_NESTING_ENABLED 4
#else
#define TX_FIQ_NESTING_ENABLED 0
#endif
#define TX_PORT_SPECIFIC_BUILD_OPTIONS (TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED)
/* Define the in-line initialization constant so that modules with in-line
initialization capabilities can prevent their initialization from being
a function call. */
#ifdef TX_MISRA_ENABLE
#define TX_DISABLE_INLINE
#else
#define TX_INLINE_INITIALIZATION
#endif
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
logic. */
#ifndef TX_MISRA_ENABLE
#ifdef TX_ENABLE_STACK_CHECKING
#undef TX_DISABLE_STACK_FILLING
#endif
#endif
/* Define the TX_THREAD control block extensions for this port. The main reason
for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
#define TX_BLOCK_POOL_EXTENSION
#define TX_BYTE_POOL_EXTENSION
#define TX_EVENT_FLAGS_GROUP_EXTENSION
#define TX_MUTEX_EXTENSION
#define TX_QUEUE_EXTENSION
#define TX_SEMAPHORE_EXTENSION
#define TX_TIMER_EXTENSION
/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
#define TX_THREAD_USER_EXTENSION
#endif
/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
tx_thread_shell_entry, and tx_thread_terminate. */
#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
/* Define the ThreadX object creation extensions for the remaining objects. */
#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
/* Define the ThreadX object deletion extensions for the remaining objects. */
#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
/* Determine if the ARM architecture has the CLZ instruction. This is available on
architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
#ifndef TX_DISABLE_INLINE
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
b = 31 - b;
#endif
#endif
#endif
/************* Define ThreadX SMP data types and function prototypes. *************/
struct TX_THREAD_STRUCT;
/* Define the ThreadX SMP protection structure. */
typedef struct TX_THREAD_SMP_PROTECT_STRUCT
{
ULONG tx_thread_smp_protect_in_force;
struct TX_THREAD_STRUCT *
tx_thread_smp_protect_thread;
ULONG tx_thread_smp_protect_core;
ULONG tx_thread_smp_protect_count;
/* Implementation specific information follows. */
ULONG tx_thread_smp_protect_get_caller;
ULONG tx_thread_smp_protect_sr;
ULONG tx_thread_smp_protect_release_caller;
} TX_THREAD_SMP_PROTECT;
/* Define ThreadX interrupt lockout and restore macros for protection on
access of critical kernel information. The restore interrupt macro must
restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
#define TX_INTERRUPT_SAVE_AREA unsigned int interrupt_save;
#define TX_DISABLE interrupt_save = _tx_thread_smp_protect();
#define TX_RESTORE _tx_thread_smp_unprotect(interrupt_save);
/************* End ThreadX SMP data type and function prototype definitions. *************/
/* Define the interrupt lockout macros for each ThreadX object. */
#define TX_BLOCK_POOL_DISABLE TX_DISABLE
#define TX_BYTE_POOL_DISABLE TX_DISABLE
#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
#define TX_MUTEX_DISABLE TX_DISABLE
#define TX_QUEUE_DISABLE TX_DISABLE
#define TX_SEMAPHORE_DISABLE TX_DISABLE
/* Define VFP extension for the Cortex-A5. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
"Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX SMP/Cortex-A5/GNU Version 6.1 *";
#else
extern CHAR _tx_version_id[];
#endif
#endif

View File

@@ -0,0 +1,343 @@
Microsoft's Azure RTOS ThreadX SMP for Cortex-A5
Using the GNU Tools
1. Building the ThreadX run-time Library
First make sure you are in the "example_build" directory. Also, make sure that
you have setup your path and other environment variables necessary for the GNU
development environment.
At this point you may run the build_threadx.bat batch file. This will build the
ThreadX run-time environment in the "example_build" directory.
You should observe assembly and compilation of a series of ThreadX source
files. At the end of the batch file, they are all combined into the
run-time library file: tx.a. This file must be linked with your
application in order to use ThreadX.
2. Demonstration System
The ThreadX demonstration is designed to execute under the ARM Cortex-A5x4 FVP.
Building the demonstration is easy; simply execute the build_threadx_sample.bat
batch file while inside the "example_build" directory.
You should observe the compilation of sample_threadx.c (which is the demonstration
application) and linking with TX.A. The resulting file DEMO is a binary file
that can be downloaded and executed.
3. System Initialization
The entry point in ThreadX for the Cortex-A5 using GNU tools is at label
Reset_Handler in startup.s. After the basic core initialization is complete,
control will transfer to __main, which is where all static and global pre-set
C variable initialization processing takes place.
The ThreadX tx_initialize_low_level.s file is responsible for setting up
various system data structures, the vector area, and a periodic timer interrupt
source. By default, the vector area is defined to be located in the Init area,
which is defined at the top of tx_initialize_low_level.s. This area is typically
located at 0. In situations where this is impossible, the vectors at the beginning
of the Init area should be copied to address 0.
This is also where initialization of a periodic timer interrupt source
should take place.
In addition, _tx_initialize_low_level determines the first available
address for use by the application, which is supplied as the sole input
parameter to your application definition function, tx_application_define.
4. Register Usage and Stack Frames
The GNU compiler assumes that registers r0-r3 (a1-a4) and r12 (ip) are scratch
registers for each function. All other registers used by a C function must
be preserved by the function. ThreadX takes advantage of this in situations
where a context switch happens as a result of making a ThreadX service call
(which is itself a C function). In such cases, the saved context of a thread
is only the non-scratch registers.
The following defines the saved context stack frames for context switches
that occur as a result of interrupt handling or from thread-level API calls.
All suspended threads have one of these two types of stack frames. The top
of the suspended thread's stack is pointed to by tx_thread_stack_ptr in the
associated thread control block TX_THREAD.
Offset Interrupted Stack Frame Non-Interrupt Stack Frame
0x00 1 0
0x04 CPSR CPSR
0x08 r0 (a1) r4 (v1)
0x0C r1 (a2) r5 (v2)
0x10 r2 (a3) r6 (v3)
0x14 r3 (a4) r7 (v4)
0x18 r4 (v1) r8 (v5)
0x1C r5 (v2) r9 (v6)
0x20 r6 (v3) r10 (v7)
0x24 r7 (v4) r11 (fp)
0x28 r8 (v5) r14 (lr)
0x2C r9 (v6)
0x30 r10 (v7)
0x34 r11 (fp)
0x38 r12 (ip)
0x3C r14 (lr)
0x40 PC
5. Improving Performance
The distribution version of ThreadX is built without any compiler
optimizations. This makes it easy to debug because you can trace or set
breakpoints inside of ThreadX itself. Of course, this costs some
performance. To make it run faster, you can change the build_threadx.bat file to
remove the -g option and enable all compiler optimizations.
In addition, you can eliminate the ThreadX basic API error checking by
compiling your application code with the symbol TX_DISABLE_ERROR_CHECKING
defined.
6. Interrupt Handling
ThreadX provides complete and high-performance interrupt handling for Cortex-A5
targets. There are a certain set of requirements that are defined in the
following sub-sections:
6.1 Vector Area
The Cortex-A5 vectors start at address zero. The demonstration system startup
Init area contains the vectors and is loaded at address zero. On actual
hardware platforms, this area might have to be copied to address 0.
6.2 IRQ ISRs
ThreadX fully manages standard and vectored IRQ interrupts. ThreadX also supports nested
IRQ interrupts. The following sub-sections define the IRQ capabilities.
6.2.1 Standard IRQ ISRs
The standard ARM IRQ mechanism has a single interrupt vector at address 0x18. This IRQ
interrupt is managed by the __tx_irq_handler code in tx_initialize_low_level. The following
is the default IRQ handler defined in tx_initialize_low_level.s:
EXPORT __tx_irq_handler
EXPORT __tx_irq_processing_return
__tx_irq_handler
;
; /* Jump to context save to save system context. */
B _tx_thread_context_save ; Jump to the context save
__tx_irq_processing_return
;
; /* At this point execution is still in the IRQ mode. The CPSR, point of
; interrupt, and all C scratch registers are available for use. Note
; that IRQ interrupts are still disabled upon return from the context
; save function. */
;
; /* Application ISR call(s) go here! */
;
; /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
6.2.2 Vectored IRQ ISRs
The vectored ARM IRQ mechanism has multiple interrupt vectors at addresses specified
by the particular implementation. The following is an example IRQ handler defined in
tx_initialize_low_level.s:
EXPORT __tx_irq_example_handler
__tx_irq_example_handler
;
; /* Call context save to save system context. */
STMDB sp!, {r0-r3} ; Save some scratch registers
MRS r0, SPSR ; Pickup saved SPSR
SUB lr, lr, #4 ; Adjust point of interrupt
STMDB sp!, {r0, r10, r12, lr} ; Store other scratch registers
BL _tx_thread_vectored_context_save ; Call the vectored IRQ context save
;
; /* At this point execution is still in the IRQ mode. The CPSR, point of
; interrupt, and all C scratch registers are available for use. Note
; that IRQ interrupts are still disabled upon return from the context
; save function. */
;
; /* Application ISR call goes here! */
;
; /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
6.2.3 Nested IRQ Support
By default, nested IRQ interrupt support is not enabled. To enable nested
IRQ support, the entire library should be built with TX_ENABLE_IRQ_NESTING
defined. With this defined, two new IRQ interrupt management services are
available, namely _tx_thread_irq_nesting_start and _tx_thread_irq_nesting_end.
These function should be called between the IRQ context save and restore
calls.
Execution between the calls to _tx_thread_irq_nesting_start and
_tx_thread_irq_nesting_end is enabled for IRQ nesting. This is achieved
by switching from IRQ mode to SYS mode and enabling IRQ interrupts.
The SYS mode stack is used during the SYS mode operation, which was
setup in tx_initialize_low_level.s. When nested IRQ interrupts are no longer required,
calling the _tx_thread_irq_nesting_end service disables nesting by disabling
IRQ interrupts and switching back to IRQ mode in preparation for the IRQ
context restore service.
The following is an example of enabling IRQ nested interrupts in a standard
IRQ handler:
EXPORT __tx_irq_handler
EXPORT __tx_irq_processing_return
__tx_irq_handler
;
; /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return
;
; /* Enable nested IRQ interrupts. NOTE: Since this service returns
; with IRQ interrupts enabled, all IRQ interrupt sources must be
; cleared prior to calling this service. */
BL _tx_thread_irq_nesting_start
;
; /* Application ISR call(s) go here! */
;
; /* Disable nested IRQ interrupts. The mode is switched back to
; IRQ mode and IRQ interrupts are disable upon return. */
BL _tx_thread_irq_nesting_end
;
; /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
6.3 FIQ Interrupts
By default, Cortex-A5 FIQ interrupts are left alone by ThreadX. Of course, this
means that the application is fully responsible for enabling the FIQ interrupt
and saving/restoring any registers used in the FIQ ISR processing. To globally
enable FIQ interrupts, the application should enable FIQ interrupts at the
beginning of each thread or before any threads are created in tx_application_define.
In addition, the application must ensure that no ThreadX service calls are made
from default FIQ ISRs, which is located in tx_initialize_low_level.s.
6.3.1 Managed FIQ Interrupts
Full ThreadX management of FIQ interrupts is provided if the ThreadX sources
are built with the TX_ENABLE_FIQ_SUPPORT defined. If the library is built
this way, the FIQ interrupt handlers are very similar to the IRQ interrupt
handlers defined previously. The following is default FIQ handler
defined in tx_initialize_low_level.s:
EXPORT __tx_fiq_handler
EXPORT __tx_fiq_processing_return
__tx_fiq_handler
;
; /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
;
; /* At this point execution is still in the FIQ mode. The CPSR, point of
; interrupt, and all C scratch registers are available for use. */
;
; /* Application FIQ handlers can be called here! */
;
; /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
6.3.1.1 Nested FIQ Support
By default, nested FIQ interrupt support is not enabled. To enable nested
FIQ support, the entire library should be built with TX_ENABLE_FIQ_NESTING
defined. With this defined, two new FIQ interrupt management services are
available, namely _tx_thread_fiq_nesting_start and _tx_thread_fiq_nesting_end.
These function should be called between the FIQ context save and restore
calls.
Execution between the calls to _tx_thread_fiq_nesting_start and
_tx_thread_fiq_nesting_end is enabled for FIQ nesting. This is achieved
by switching from FIQ mode to SYS mode and enabling FIQ interrupts.
The SYS mode stack is used during the SYS mode operation, which was
setup in tx_initialize_low_level.s. When nested FIQ interrupts are no longer required,
calling the _tx_thread_fiq_nesting_end service disables nesting by disabling
FIQ interrupts and switching back to FIQ mode in preparation for the FIQ
context restore service.
The following is an example of enabling FIQ nested interrupts in the
typical FIQ handler:
EXPORT __tx_fiq_handler
EXPORT __tx_fiq_processing_return
__tx_fiq_handler
;
; /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return
;
; /* At this point execution is still in the FIQ mode. The CPSR, point of
; interrupt, and all C scratch registers are available for use. */
;
; /* Enable nested FIQ interrupts. NOTE: Since this service returns
; with FIQ interrupts enabled, all FIQ interrupt sources must be
; cleared prior to calling this service. */
BL _tx_thread_fiq_nesting_start
;
; /* Application FIQ handlers can be called here! */
;
; /* Disable nested FIQ interrupts. The mode is switched back to
; FIQ mode and FIQ interrupts are disable upon return. */
BL _tx_thread_fiq_nesting_end
;
; /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
7. ThreadX Timer Interrupt
ThreadX requires a periodic interrupt source to manage all time-slicing,
thread sleeps, timeouts, and application timers. Without such a timer
interrupt source, these services are not functional. However, all other
ThreadX services are operational without a periodic timer source.
To add the timer interrupt processing, simply make a call to
_tx_timer_interrupt in the IRQ processing. An example of this can be
found in the file tx_initialize_low_level.s in the Integrator sub-directories.
8. VFP Support
VFP support is optional, it can be enabled by building the ThreadX library
assembly code with the following command-line option:
-mfpu=neon -DTARGET_FPU_VFP
Note that if ISRs need to use VFP registers, their contents much be saved
before their use and restored after.
9. Revision History
For generic code revision information, please refer to the readme_threadx_generic.txt
file, which is included in your distribution. The following details the revision
information associated with this specific port of ThreadX:
09-30-2020 Initial ThreadX 6.1 version for Cortex-A5 using GNU tools.
Copyright(c) 1996-2020 Microsoft Corporation
https://azure.com/rtos

View File

@@ -0,0 +1,372 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h"
@
@/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
#ifdef TX_ENABLE_FIQ_SUPPORT
DISABLE_INTS = 0xC0 @ Disable IRQ & FIQ interrupts
IRQ_MODE = 0xD2 @ IRQ mode
SVC_MODE = 0xD3 @ SVC mode
#else
DISABLE_INTS = 0x80 @ Disable IRQ interrupts
IRQ_MODE = 0x92 @ IRQ mode
SVC_MODE = 0x93 @ SVC mode
#endif
@
@
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_timer_interrupt_active
.global _tx_thread_smp_protection
.global _tx_thread_smp_protect_wait_counts
.global _tx_thread_smp_protect_wait_list
.global _tx_thread_smp_protect_wait_list_lock_protect_in_force
.global _tx_thread_smp_protect_wait_list_tail
.global _tx_thread_smp_protect_wait_list_size
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
.global _tx_execution_isr_exit
#endif
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_context_restore SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function restores the interrupt context if it is processing a */
@/* nested interrupt. If not, it returns to the interrupt thread if no */
@/* preemption is necessary. Otherwise, if preemption is necessary or */
@/* if no thread was running, the function returns to the scheduler. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* _tx_thread_schedule Thread scheduling routine */
@/* */
@/* CALLED BY */
@/* */
@/* ISRs Interrupt Service Routines */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_context_restore(VOID)
@{
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
@
@ /* Lockout interrupts. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR exit function to indicate an ISR is complete. */
@
BL _tx_execution_isr_exit @ Call the ISR exit function
#endif
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r12, r10, #2 @ Build offset to array indexes
@
@ /* Determine if interrupts are nested. */
@ if (--_tx_thread_system_state[core])
@ {
@
LDR r3, =_tx_thread_system_state @ Pickup address of system state var
ADD r3, r3, r12 @ Build array offset
LDR r2, [r3, #0] @ Pickup system state
SUB r2, r2, #1 @ Decrement the counter
STR r2, [r3, #0] @ Store the counter
CMP r2, #0 @ Was this the first interrupt?
BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
@
@ /* Interrupts are nested. */
@
@ /* Just recover the saved registers and return to the point of
@ interrupt. */
@
LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
MSR SPSR_cxsf, r0 @ Put SPSR back
LDMIA sp!, {r0-r3} @ Recover r0-r3
MOVS pc, lr @ Return to point of interrupt
@
@ }
__tx_thread_not_nested_restore:
@
@ /* Determine if a thread was interrupted and no preemption is required. */
@ else if (((_tx_thread_current_ptr[core]) && (_tx_thread_current_ptr[core] == _tx_thread_execute_ptr[core])
@ || (_tx_thread_preempt_disable))
@ {
@
LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
ADD r1, r1, r12 @ Build index to this core's current thread ptr
LDR r0, [r1, #0] @ Pickup actual current thread pointer
CMP r0, #0 @ Is it NULL?
BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
@
LDR r3, =_tx_thread_smp_protection @ Get address of protection structure
LDR r2, [r3, #8] @ Pickup owning core
CMP r2, r10 @ Is the owning core the same as the protected core?
BNE __tx_thread_skip_preempt_check @ No, skip the preempt disable check since this is only valid for the owning core
LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
LDR r2, [r3, #0] @ Pickup actual preempt disable flag
CMP r2, #0 @ Is it set?
BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
__tx_thread_skip_preempt_check:
LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
ADD r3, r3, r12 @ Build index to this core's execute thread ptr
LDR r2, [r3, #0] @ Pickup actual execute thread pointer
CMP r0, r2 @ Is the same thread highest priority?
BNE __tx_thread_preempt_restore @ No, preemption needs to happen
@
@
__tx_thread_no_preempt_restore:
@
@ /* Restore interrupted thread or ISR. */
@
@ /* Pickup the saved stack pointer. */
@ tmp_ptr = _tx_thread_current_ptr[core] -> tx_thread_stack_ptr;
@
@ /* Recover the saved context and return to the point of interrupt. */
@
LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
MSR SPSR_cxsf, r0 @ Put SPSR back
LDMIA sp!, {r0-r3} @ Recover r0-r3
MOVS pc, lr @ Return to point of interrupt
@
@ }
@ else
@ {
@
__tx_thread_preempt_restore:
@
@ /* Was the thread being preempted waiting for the lock? */
@ if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
@ {
@
LDR r1, =_tx_thread_smp_protect_wait_counts @ Load waiting count list
LDR r2, [r1, r10, LSL #2] @ Load waiting value for this core
CMP r2, #0
BEQ _nobody_waiting_for_lock @ Is the core waiting for the lock?
@
@ /* Do we not have the lock? This means the ISR never got the inter-core lock. */
@ if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
@ {
@
LDR r1, =_tx_thread_smp_protection @ Load address of protection structure
LDR r2, [r1, #8] @ Pickup the owning core
CMP r10, r2 @ Compare our core to the owning core
BEQ _this_core_has_lock @ Do we have the lock?
@
@ /* We don't have the lock. This core should be in the list. Remove it. */
@ _tx_thread_smp_protect_wait_list_remove(this_core);
@
MOV r0, r10 @ Move the core ID to r0 for the macro
_tx_thread_smp_protect_wait_list_remove @ Call macro to remove core from the list
B _nobody_waiting_for_lock @ Leave
@
@ }
@ else
@ {
@ /* We have the lock. This means the ISR got the inter-core lock, but
@ never released it because it saw that there was someone waiting.
@ Note this core is not in the list. */
@
_this_core_has_lock:
@
@ /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
@ _tx_thread_smp_protect_wait_counts[core]--;
@
LDR r1, =_tx_thread_smp_protect_wait_counts @ Load waiting count list
LDR r2, [r1, r10, LSL #2] @ Load waiting value for this core
SUB r2, r2, #1 @ Decrement waiting value. Should be zero now
STR r2, [r1, r10, LSL #2] @ Store new waiting value
@
@ /* Now release the inter-core lock. */
@
@ /* Set protected core as invalid. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
@
LDR r1, =_tx_thread_smp_protection @ Load address of protection structure
MOV r2, #0xFFFFFFFF @ Build invalid value
STR r2, [r1, #8] @ Mark the protected core as invalid
DMB @ Ensure that accesses to shared resource have completed
@
@ /* Release protection. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
@
MOV r2, #0 @ Build release protection value
STR r2, [r1, #0] @ Release the protection
DSB ISH @ To ensure update of the protection occurs before other CPUs awake
@
@ /* Wake up waiting processors. Note interrupts are already enabled. */
@
#ifdef TX_ENABLE_WFE
SEV @ Send event to other CPUs
#endif
@
@ }
@ }
@
_nobody_waiting_for_lock:
LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
MOV r1, lr @ Save lr (point of interrupt)
MOV r2, #SVC_MODE @ Build SVC mode CPSR
MSR CPSR_c, r2 @ Enter SVC mode
STR r1, [sp, #-4]! @ Save point of interrupt
STMDB sp!, {r4-r12, lr} @ Save upper half of registers
MOV r4, r3 @ Save SPSR in r4
MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
MSR CPSR_c, r2 @ Enter IRQ mode
LDMIA sp!, {r0-r3} @ Recover r0-r3
MOV r5, #SVC_MODE @ Build SVC mode CPSR
MSR CPSR_c, r5 @ Enter SVC mode
STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r12, r10, #2 @ Build offset to array indexes
LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
ADD r1, r1, r12 @ Build index to current thread ptr
LDR r0, [r1, #0] @ Pickup current thread pointer
#ifdef TARGET_FPU_VFP
LDR r2, [r0, #160] @ Pickup the VFP enabled flag
CMP r2, #0 @ Is the VFP enabled?
BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
VMRS r2, FPSCR @ Pickup the FPSCR
STR r2, [sp, #-4]! @ Save FPSCR
VSTMDB sp!, {D16-D31} @ Save D16-D31
VSTMDB sp!, {D0-D15} @ Save D0-D15
_tx_skip_irq_vfp_save:
#endif
MOV r3, #1 @ Build interrupt stack type
STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
STR sp, [r0, #8] @ Save stack pointer in thread control
@ block
@
@ /* Save the remaining time-slice and disable it. */
@ if (_tx_timer_time_slice[core])
@ {
@
LDR r3, =_tx_timer_interrupt_active @ Pickup timer interrupt active flag's address
_tx_wait_for_timer_to_finish:
LDR r2, [r3, #0] @ Pickup timer interrupt active flag
CMP r2, #0 @ Is the timer interrupt active?
BNE _tx_wait_for_timer_to_finish @ If timer interrupt is active, wait until it completes
LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
ADD r3, r3, r12 @ Build index to core's time slice
LDR r2, [r3, #0] @ Pickup time-slice
CMP r2, #0 @ Is it active?
BEQ __tx_thread_dont_save_ts @ No, don't save it
@
@ _tx_thread_current_ptr[core] -> tx_thread_time_slice = _tx_timer_time_slice[core];
@ _tx_timer_time_slice[core] = 0;
@
STR r2, [r0, #24] @ Save thread's time-slice
MOV r2, #0 @ Clear value
STR r2, [r3, #0] @ Disable global time-slice flag
@
@ }
__tx_thread_dont_save_ts:
@
@
@ /* Clear the current task pointer. */
@ _tx_thread_current_ptr[core] = TX_NULL;
@
MOV r2, #0 @ NULL value
STR r2, [r1, #0] @ Clear current thread pointer
@
@ /* Set bit indicating this thread is ready for execution. */
@
LDR r2, [r0, #152] @ Pickup the ready bit
ORR r2, r2, #0x8000 @ Set ready bit (bit 15)
STR r2, [r0, #152] @ Make this thread ready for executing again
DMB @ Ensure that accesses to shared resource have completed
@
@ /* Return to the scheduler. */
@ _tx_thread_schedule();
@
B _tx_thread_schedule @ Return to scheduler
@ }
@
__tx_thread_idle_system_restore:
@
@ /* Just return back to the scheduler! */
@
MOV r3, #SVC_MODE @ Build SVC mode with interrupts disabled
MSR CPSR_c, r3 @ Change to SVC mode
B _tx_thread_schedule @ Return to scheduler
@}
@

View File

@@ -0,0 +1,204 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_irq_processing_return
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
.global _tx_execution_isr_enter
#endif
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_context_save SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function saves the context of an executing thread in the */
@/* beginning of interrupt processing. The function also ensures that */
@/* the system stack is used upon return to the calling ISR. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ISRs */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_context_save(VOID)
@{
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
@
@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
@ out, we are in IRQ mode, and all registers are intact. */
@
@ /* Check for a nested interrupt condition. */
@ if (_tx_thread_system_state[core]++)
@ {
@
STMDB sp!, {r0-r3} @ Save some working registers
@
@ /* Save the rest of the scratch registers on the stack and return to the
@ calling ISR. */
@
MRS r0, SPSR @ Pickup saved SPSR
SUB lr, lr, #4 @ Adjust point of interrupt
STMDB sp!, {r0, r10, r12, lr} @ Store other registers
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable FIQ interrupts
#endif
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r12, r10, #2 @ Build offset to array indexes
@
LDR r3, =_tx_thread_system_state @ Pickup address of system state var
ADD r3, r3, r12 @ Build index into the system state array
LDR r2, [r3, #0] @ Pickup system state
CMP r2, #0 @ Is this the first interrupt?
BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
@
@ /* Nested interrupt condition. */
@
ADD r2, r2, #1 @ Increment the interrupt counter
STR r2, [r3, #0] @ Store it back in the variable
@
@ /* Return to the ISR. */
@
MOV r10, #0 @ Clear stack limit
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR enter function to indicate an ISR is executing. */
@
PUSH {r12, lr} @ Save ISR lr & r12
BL _tx_execution_isr_enter @ Call the ISR enter function
POP {r12, lr} @ Recover ISR lr & r12
#endif
B __tx_irq_processing_return @ Continue IRQ processing
@
__tx_thread_not_nested_save:
@ }
@
@ /* Otherwise, not nested, check to see if a thread was running. */
@ else if (_tx_thread_current_ptr[core])
@ {
@
ADD r2, r2, #1 @ Increment the interrupt counter
STR r2, [r3, #0] @ Store it back in the variable
LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
ADD r1, r1, r12 @ Build index into current thread ptr
LDR r0, [r1, #0] @ Pickup current thread pointer
CMP r0, #0 @ Is it NULL?
BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
@ scheduling loop - nothing needs saving!
@
@ /* Save the current stack pointer in the thread's control block. */
@ _tx_thread_current_ptr[core] -> tx_thread_stack_ptr = sp;
@
@ /* Switch to the system stack. */
@ sp = _tx_thread_system_stack_ptr;
@
MOV r10, #0 @ Clear stack limit
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR enter function to indicate an ISR is executing. */
@
PUSH {r12, lr} @ Save ISR lr & r12
BL _tx_execution_isr_enter @ Call the ISR enter function
POP {r12, lr} @ Recover ISR lr & r12
#endif
B __tx_irq_processing_return @ Continue IRQ processing
@
@ }
@ else
@ {
@
__tx_thread_idle_system_save:
@
@ /* Interrupt occurred in the scheduling loop. */
@
@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
@ processing. */
@
MOV r10, #0 @ Clear stack limit
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR enter function to indicate an ISR is executing. */
@
PUSH {r12, lr} @ Save ISR lr & r12
BL _tx_execution_isr_enter @ Call the ISR enter function
POP {r12, lr} @ Recover ISR lr & r12
#endif
ADD sp, sp, #32 @ Recover saved registers
B __tx_irq_processing_return @ Continue IRQ processing
@
@ }
@}
@

View File

@@ -0,0 +1,104 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
#ifdef TX_ENABLE_FIQ_SUPPORT
INT_MASK = 0xC0 @ Interrupt bit mask
#else
INT_MASK = 0x80 @ Interrupt bit mask
#endif
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_interrupt_control SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is responsible for changing the interrupt lockout */
@/* posture of the system. */
@/* */
@/* INPUT */
@/* */
@/* new_posture New interrupt lockout posture */
@/* */
@/* OUTPUT */
@/* */
@/* old_posture Old interrupt lockout posture */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* Application Code */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@UINT _tx_thread_interrupt_control(UINT new_posture)
@{
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
@
@ /* Pickup current interrupt lockout posture. */
@
MRS r3, CPSR @ Pickup current CPSR
BIC r1, r3, #INT_MASK @ Clear interrupt lockout bits
ORR r1, r1, r0 @ Or-in new interrupt lockout bits
@
@ /* Apply the new interrupt posture. */
@
MSR CPSR_c, r1 @ Setup new CPSR
AND r0, r3, #INT_MASK @ Return previous interrupt mask
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@
@}
@

View File

@@ -0,0 +1,97 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_interrupt_disable SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is responsible for disabling interrupts */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* old_posture Old interrupt lockout posture */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* Application Code */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@UINT _tx_thread_interrupt_disable(void)
@{
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
@
@ /* Pickup current interrupt lockout posture. */
@
MRS r0, CPSR @ Pickup current CPSR
@
@ /* Mask interrupts. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ
#else
CPSID i @ Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@}
@

View File

@@ -0,0 +1,89 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_interrupt_restore SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is responsible for restoring interrupts to the state */
@/* returned by a previous _tx_thread_interrupt_disable call. */
@/* */
@/* INPUT */
@/* */
@/* old_posture Old interrupt lockout posture */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* Application Code */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@UINT _tx_thread_interrupt_restore(UINT old_posture)
@{
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
@
@ /* Apply the new interrupt posture. */
@
MSR CPSR_c, r0 @ Setup new CPSR
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@}
@

View File

@@ -0,0 +1,112 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
#ifdef TX_ENABLE_FIQ_SUPPORT
DISABLE_INTS = 0xC0 @ Disable IRQ & FIQ interrupts
#else
DISABLE_INTS = 0x80 @ Disable IRQ interrupts
#endif
MODE_MASK = 0x1F @ Mode mask
IRQ_MODE_BITS = 0x12 @ IRQ mode bits
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_irq_nesting_end SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is called by the application from IRQ mode after */
@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
@/* processing from system mode back to IRQ mode prior to the ISR */
@/* calling _tx_thread_context_restore. Note that this function */
@/* assumes the system stack pointer is in the same position after */
@/* nesting start function was called. */
@/* */
@/* This function assumes that the system mode stack pointer was setup */
@/* during low-level initialization (tx_initialize_low_level.s). */
@/* */
@/* This function returns with IRQ interrupts disabled. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ISRs */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_irq_nesting_end(VOID)
@{
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
MOV r3,lr @ Save ISR return address
MRS r0, CPSR @ Pickup the CPSR
ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
MSR CPSR_c, r0 @ Disable interrupts
LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
@ 8-byte alignment logic)
BIC r0, r0, #MODE_MASK @ Clear mode bits
ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
MSR CPSR_c, r0 @ Re-enter IRQ mode
#ifdef __THUMB_INTERWORK
BX r3 @ Return to caller
#else
MOV pc, r3 @ Return to caller
#endif
@}

View File

@@ -0,0 +1,106 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
IRQ_DISABLE = 0x80 @ IRQ disable bit
MODE_MASK = 0x1F @ Mode mask
SYS_MODE_BITS = 0x1F @ System mode bits
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_irq_nesting_start SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is called by the application from IRQ mode after */
@/* _tx_thread_context_save has been called and switches the IRQ */
@/* processing to the system mode so nested IRQ interrupt processing */
@/* is possible (system mode has its own "lr" register). Note that */
@/* this function assumes that the system mode stack pointer was setup */
@/* during low-level initialization (tx_initialize_low_level.s). */
@/* */
@/* This function returns with IRQ interrupts enabled. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ISRs */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_irq_nesting_start(VOID)
@{
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
MOV r3,lr @ Save ISR return address
MRS r0, CPSR @ Pickup the CPSR
BIC r0, r0, #MODE_MASK @ Clear the mode bits
ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
MSR CPSR_c, r0 @ Enter system mode
STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
@ and push r1 just to keep 8-byte alignment
BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
MSR CPSR_c, r0 @ Enter system mode
#ifdef __THUMB_INTERWORK
BX r3 @ Return to caller
#else
MOV pc, r3 @ Return to caller
#endif
@}
@

View File

@@ -0,0 +1,315 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h"
@
@
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
.global _tx_execution_thread_enter
#endif
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_schedule SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function waits for a thread control block pointer to appear in */
@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
@/* in the variable, the corresponding thread is resumed. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* _tx_initialize_kernel_enter ThreadX entry function */
@/* _tx_thread_system_return Return to system from thread */
@/* _tx_thread_context_restore Restore thread's context */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_schedule(VOID)
@{
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
@
@ /* Enable interrupts. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSIE if @ Enable IRQ and FIQ interrupts
#else
CPSIE i @ Enable IRQ interrupts
#endif
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r12, r10, #2 @ Build offset to array indexes
LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
ADD r1, r1, r12 @ Build offset to execute ptr for this core
@
@ /* Lockout interrupts transfer control to it. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
@
@ /* Wait for a thread to execute. */
@ do
@ {
@
@
LDR r0, [r1, #0] @ Pickup next thread to execute
CMP r0, #0 @ Is it NULL?
BEQ _tx_thread_schedule @ If so, keep looking for a thread
@
@ }
@ while(_tx_thread_execute_ptr[core] == TX_NULL);
@
@
@ /* Get the lock for accessing the thread's ready bit. */
@
MOV r2, #172 @ Build offset to the lock
ADD r2, r0, r2 @ Get the address to the lock
LDREX r3, [r2] @ Pickup the lock value
CMP r3, #0 @ Check if it's available
BNE _tx_thread_schedule @ No, lock not available
MOV r3, #1 @ Build the lock set value
STREX r4, r3, [r2] @ Try to get the lock
CMP r4, #0 @ Check if we got the lock
BNE _tx_thread_schedule @ No, another core got it first
DMB @ Ensure write to lock completes
@
@ /* Now make sure the thread's ready bit is set. */
@
LDR r3, [r0, #152] @ Pickup the thread ready bit
AND r4, r3, #0x8000 @ Isolate the ready bit
CMP r4, #0 @ Is it set?
BNE _tx_thread_ready_for_execution @ Yes, schedule the thread
@
@ /* The ready bit isn't set. Release the lock and jump back to the scheduler. */
@
MOV r3, #0 @ Build clear value
STR r3, [r2] @ Release the lock
DMB @ Ensure write to lock completes
B _tx_thread_schedule @ Jump back to the scheduler
@
_tx_thread_ready_for_execution:
@
@ /* We have a thread to execute. */
@
@ /* Clear the ready bit and release the lock. */
@
BIC r3, r3, #0x8000 @ Clear ready bit
STR r3, [r0, #152] @ Store it back in the thread control block
DMB
MOV r3, #0 @ Build clear value for the lock
STR r3, [r2] @ Release the lock
DMB
@
@ /* Setup the current thread pointer. */
@ _tx_thread_current_ptr[core] = _tx_thread_execute_ptr[core];
@
LDR r2, =_tx_thread_current_ptr @ Pickup address of current thread
ADD r2, r2, r12 @ Build index into the current thread array
STR r0, [r2, #0] @ Setup current thread pointer
@
@ /* In the time between reading the execute pointer and assigning
@ it to the current pointer, the execute pointer was changed by
@ some external code. If the current pointer was still null when
@ the external code checked if a core preempt was necessary, then
@ it wouldn't have done it and a preemption will be missed. To
@ handle this, undo some things and jump back to the scheduler so
@ it can schedule the new thread. */
@
LDR r1, [r1, #0] @ Reload the execute pointer
CMP r0, r1 @ Did it change?
BEQ _execute_pointer_did_not_change @ If not, skip handling
MOV r1, #0 @ Build clear value
STR r1, [r2, #0] @ Clear current thread pointer
LDR r1, [r0, #152] @ Pickup the ready bit
ORR r1, r1, #0x8000 @ Set ready bit (bit 15)
STR r1, [r0, #152] @ Make this thread ready for executing again
DMB @ Ensure that accesses to shared resource have completed
B _tx_thread_schedule @ Jump back to the scheduler to schedule the new thread
_execute_pointer_did_not_change:
@ /* Increment the run count for this thread. */
@ _tx_thread_current_ptr[core] -> tx_thread_run_count++;
@
LDR r2, [r0, #4] @ Pickup run counter
LDR r3, [r0, #24] @ Pickup time-slice for this thread
ADD r2, r2, #1 @ Increment thread run-counter
STR r2, [r0, #4] @ Store the new run counter
@
@ /* Setup time-slice, if present. */
@ _tx_timer_time_slice[core] = _tx_thread_current_ptr[core] -> tx_thread_time_slice;
@
LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
@ variable
ADD r2, r2, r12 @ Build index into the time-slice array
LDR sp, [r0, #8] @ Switch stack pointers
STR r3, [r2, #0] @ Setup time-slice
@
@ /* Switch to the thread's stack. */
@ sp = _tx_thread_execute_ptr[core] -> tx_thread_stack_ptr;
@
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the thread entry function to indicate the thread is executing. */
@
MOV r5, r0 @ Save r0
BL _tx_execution_thread_enter @ Call the thread execution enter function
MOV r0, r5 @ Restore r0
#endif
@
@ /* Determine if an interrupt frame or a synchronous task suspension frame
@ is present. */
@
LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
CMP r4, #0 @ Check for synchronous context switch
BEQ _tx_solicited_return
MSR SPSR_cxsf, r5 @ Setup SPSR for return
#ifdef TARGET_FPU_VFP
LDR r1, [r0, #160] @ Pickup the VFP enabled flag
CMP r1, #0 @ Is the VFP enabled?
BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
VLDMIA sp!, {D0-D15} @ Recover D0-D15
VLDMIA sp!, {D16-D31} @ Recover D16-D31
LDR r4, [sp], #4 @ Pickup FPSCR
VMSR FPSCR, r4 @ Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
_tx_solicited_return:
#ifdef TARGET_FPU_VFP
MSR CPSR_cxsf, r5 @ Recover CPSR
LDR r1, [r0, #160] @ Pickup the VFP enabled flag
CMP r1, #0 @ Is the VFP enabled?
BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
VLDMIA sp!, {D8-D15} @ Recover D8-D15
VLDMIA sp!, {D16-D31} @ Recover D16-D31
LDR r4, [sp], #4 @ Pickup FPSCR
VMSR FPSCR, r4 @ Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
MSR CPSR_cxsf, r5 @ Recover CPSR
LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
BX lr @ Return to caller
@
@}
@
#ifdef TARGET_FPU_VFP
.global tx_thread_vfp_enable
tx_thread_vfp_enable:
MRS r2, CPSR @ Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
MRC p15, 0, r1, c0, c0, 5 @ Read CPU ID register
AND r1, r1, #0x03 @ Mask off, leaving the CPU ID field
LSL r1, r1, #2 @ Build offset to array indexes
LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
ADD r0, r0, r1 @ Build index into the current thread array
LDR r1, [r0] @ Pickup current thread pointer
CMP r1, #0 @ Check for NULL thread pointer
BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
MOV r0, #1 @ Build enable value
STR r0, [r1, #160] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
MSR CPSR_cxsf, r2 @ Recover CPSR
BX LR @ Return to caller
.global tx_thread_vfp_disable
tx_thread_vfp_disable:
MRS r2, CPSR @ Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
MRC p15, 0, r1, c0, c0, 5 @ Read CPU ID register
AND r1, r1, #0x03 @ Mask off, leaving the CPU ID field
LSL r1, r1, #2 @ Build offset to array indexes
LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
ADD r0, r0, r1 @ Build index into the current thread array
LDR r1, [r0] @ Pickup current thread pointer
CMP r1, #0 @ Check for NULL thread pointer
BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
MOV r0, #0 @ Build disable value
STR r0, [r1, #160] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
MSR CPSR_cxsf, r2 @ Recover CPSR
BX LR @ Return to caller
#endif
@
@}
@

View File

@@ -0,0 +1,86 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_core_get SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function gets the currently running core number and returns it.*/
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* Core ID */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Source */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_core_get
.type _tx_thread_smp_core_get,function
_tx_thread_smp_core_get:
MRC p15, 0, r0, c0, c0, 5 @ Read CPU ID register
AND r0, r0, #0x03 @ Mask off, leaving the CPU ID field
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,102 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.global sendSGI
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_core_preempt SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function preempts the specified core in situations where the */
@/* thread corresponding to this core is no longer ready or when the */
@/* core must be used for a higher-priority thread. If the specified is */
@/* the current core, this processing is skipped since the will give up */
@/* control subsequently on its own. */
@/* */
@/* INPUT */
@/* */
@/* core The core to preempt */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Source */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_core_preempt
.type _tx_thread_smp_core_preempt,function
_tx_thread_smp_core_preempt:
STMDB sp!, {r4, lr} @ Save the lr and r4 register on the stack
@
@ /* Place call to send inter-processor interrupt here! */
@
DSB @
MOV r1, #1 @ Build parameter list
LSL r1, r1, r0 @
MOV r0, #0 @
MOV r2, #0 @
BL sendSGI @ Make call to send inter-processor interrupt
LDMIA sp!, {r4, lr} @ Recover lr register and r4
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,104 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.global _tx_thread_system_state
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_current_state_get SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is gets the current state of the calling core. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Components */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_current_state_get
.type _tx_thread_smp_current_state_get,function
_tx_thread_smp_current_state_get:
MRS r3, CPSR @ Pickup current CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r2, c0, c0, 5 @ Read CPU ID register
AND r2, r2, #0x03 @ Mask off, leaving the CPU ID field
LSL r2, r2, #2 @ Build offset to array indexes
LDR r1, =_tx_thread_system_state @ Pickup start of the current state array
ADD r1, r1, r2 @ Build index into the current state array
LDR r0, [r1] @ Pickup state for this core
MSR CPSR_c, r3 @ Restore CPSR
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,104 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.global _tx_thread_current_ptr
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_current_thread_get SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is gets the current thread of the calling core. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Components */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_current_thread_get
.type _tx_thread_smp_current_thread_get,function
_tx_thread_smp_current_thread_get:
MRS r3, CPSR @ Pickup current CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r2, c0, c0, 5 @ Read CPU ID register
AND r2, r2, #0x03 @ Mask off, leaving the CPU ID field
LSL r2, r2, #2 @ Build offset to array indexes
LDR r1, =_tx_thread_current_ptr @ Pickup start of the current thread array
ADD r1, r1, r2 @ Build index into the current thread array
LDR r0, [r1] @ Pickup current thread for this core
MSR CPSR_c, r3 @ Restore CPSR
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,141 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_smp_release_cores_flag
.global _tx_thread_schedule
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_initialize_wait SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is the place where additional cores wait until */
@/* initialization is complete before they enter the thread scheduling */
@/* loop. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* _tx_thread_schedule Thread scheduling loop */
@/* */
@/* CALLED BY */
@/* */
@/* Hardware */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_initialize_wait
.type _tx_thread_smp_initialize_wait,function
_tx_thread_smp_initialize_wait:
@ /* Lockout interrupts. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r10, r10, #2 @ Build offset to array indexes
@
@ /* Make sure the system state for this core is TX_INITIALIZE_IN_PROGRESS before we check the release
@ flag. */
@
LDR r3, =_tx_thread_system_state @ Build address of system state variable
ADD r3, r3, r10 @ Build index into the system state array
LDR r2, =0xF0F0F0F0 @ Build TX_INITIALIZE_IN_PROGRESS flag
wait_for_initialize:
LDR r1, [r3] @ Pickup system state
CMP r1, r2 @ Has initialization completed?
BNE wait_for_initialize @ If different, wait here!
@
@ /* Pickup the release cores flag. */
@
LDR r2, =_tx_thread_smp_release_cores_flag @ Build address of release cores flag
wait_for_release:
LDR r3, [r2] @ Pickup the flag
CMP r3, #0 @ Is it set?
BEQ wait_for_release @ Wait for the flag to be set
@
@ /* Core 0 has released this core. */
@
@ /* Clear this core's system state variable. */
@
LDR r3, =_tx_thread_system_state @ Build address of system state variable
ADD r3, r3, r10 @ Build index into the system state array
MOV r0, #0 @ Build clear value
STR r0, [r3] @ Clear this core's entry in the system state array
@
@ /* Now wait for core 0 to finish it's initialization. */
@
LDR r3, =_tx_thread_system_state @ Build address of system state variable of logical 0
core_0_wait_loop:
LDR r2, [r3] @ Pickup system state for core 0
CMP r2, #0 @ Is it 0?
BNE core_0_wait_loop @ No, keep waiting for core 0 to finish its initialization
@
@ /* Initialize is complete, enter the scheduling loop! */
@
B _tx_thread_schedule @ Enter scheduling loop for this core!

View File

@@ -0,0 +1,85 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_low_level_initialize SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function performs low-level initialization of the booting */
@/* core. */
@/* */
@/* INPUT */
@/* */
@/* number_of_cores Number of cores */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* _tx_initialize_high_level ThreadX high-level init */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_low_level_initialize
.type _tx_thread_smp_low_level_initialize,function
_tx_thread_smp_low_level_initialize:
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,371 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
@/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.global _tx_thread_current_ptr
.global _tx_thread_smp_protection
.global _tx_thread_smp_protect_wait_counts
.global _tx_thread_smp_protect_wait_list
.global _tx_thread_smp_protect_wait_list_lock_protect_in_force
.global _tx_thread_smp_protect_wait_list_head
.global _tx_thread_smp_protect_wait_list_tail
.global _tx_thread_smp_protect_wait_list_size
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_protect SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function gets protection for running inside the ThreadX */
@/* source. This is acomplished by a combination of a test-and-set */
@/* flag and periodically disabling interrupts. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* Previous Status Register */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Source */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_protect
.type _tx_thread_smp_protect,function
_tx_thread_smp_protect:
PUSH {r4-r6} @ Save registers we'll be using
@
@ /* Disable interrupts so we don't get preempted. */
@
MRS r0, CPSR @ Pickup current CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
@
@ /* Do we already have protection? */
@ if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
@ {
@
MRC p15, 0, r1, c0, c0, 5 @ Read CPU ID register
AND r1, r1, #0x03 @ Mask off, leaving the CPU ID field
LDR r2, =_tx_thread_smp_protection @ Build address to protection structure
LDR r3, [r2, #8] @ Pickup the owning core
CMP r1, r3 @ Is it not this core?
BNE _protection_not_owned @ No, the protection is not already owned
@
@ /* We already have protection. */
@
@ /* Increment the protection count. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_count++;
@
LDR r3, [r2, #12] @ Pickup ownership count
ADD r3, r3, #1 @ Increment ownership count
STR r3, [r2, #12] @ Store ownership count
DMB
B _return
_protection_not_owned:
@
@ /* Is the lock available? */
@ if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
@ {
@
LDREX r3, [r2, #0] @ Pickup the protection flag
CMP r3, #0
BNE _start_waiting @ No, protection not available
@
@ /* Is the list empty? */
@ if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
@ {
@
LDR r3, =_tx_thread_smp_protect_wait_list_head
LDR r3, [r3]
LDR r4, =_tx_thread_smp_protect_wait_list_tail
LDR r4, [r4]
CMP r3, r4
BNE _list_not_empty
@
@ /* Try to get the lock. */
@ if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
@ {
@
MOV r3, #1 @ Build lock value
STREX r4, r3, [r2, #0] @ Attempt to get the protection
CMP r4, #0
BNE _start_waiting @ Did it fail?
@
@ /* We got the lock! */
@ _tx_thread_smp_protect_lock_got();
@
DMB @ Ensure write to protection finishes
_tx_thread_smp_protect_lock_got @ Call the lock got function
B _return
_list_not_empty:
@
@ /* Are we at the front of the list? */
@ if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
@ {
@
LDR r3, =_tx_thread_smp_protect_wait_list_head @ Get the address of the head
LDR r3, [r3] @ Get the value of the head
LDR r4, =_tx_thread_smp_protect_wait_list @ Get the address of the list
LDR r4, [r4, r3, LSL #2] @ Get the value at the head index
CMP r1, r4
BNE _start_waiting
@
@ /* Is the lock still available? */
@ if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
@ {
@
LDR r3, [r2, #0] @ Pickup the protection flag
CMP r3, #0
BNE _start_waiting @ No, protection not available
@
@ /* Get the lock. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
@
MOV r3, #1 @ Build lock value
STR r3, [r2, #0] @ Store lock value
DMB @
@
@ /* Got the lock. */
@ _tx_thread_smp_protect_lock_got();
@
_tx_thread_smp_protect_lock_got
@
@ /* Remove this core from the wait list. */
@ _tx_thread_smp_protect_remove_from_front_of_list();
@
_tx_thread_smp_protect_remove_from_front_of_list
B _return
_start_waiting:
@
@ /* For one reason or another, we didn't get the lock. */
@
@ /* Increment wait count. */
@ _tx_thread_smp_protect_wait_counts[this_core]++;
@
LDR r3, =_tx_thread_smp_protect_wait_counts @ Load wait list counts
LDR r4, [r3, r1, LSL #2] @ Load waiting value for this core
ADD r4, r4, #1 @ Increment wait value
STR r4, [r3, r1, LSL #2] @ Store new wait value
@
@ /* Have we not added ourselves to the list yet? */
@ if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
@ {
@
CMP r4, #1
BNE _already_in_list0 @ Is this core already waiting?
@
@ /* Add ourselves to the list. */
@ _tx_thread_smp_protect_wait_list_add(this_core);
@
_tx_thread_smp_protect_wait_list_add @ Call macro to add ourselves to the list
@
@ }
@
_already_in_list0:
@
@ /* Restore interrupts. */
@
MSR CPSR_c, r0 @ Restore CPSR
#ifdef TX_ENABLE_WFE
WFE @ Go into standby
#endif
@
@ /* We do this until we have the lock. */
@ while (1)
@ {
@
_try_to_get_lock:
@
@ /* Disable interrupts so we don't get preempted. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
MRC p15, 0, r1, c0, c0, 5 @ Read CPU ID register
AND r1, r1, #0x03 @ Mask off, leaving the CPU ID field
@
@ /* Do we already have protection? */
@ if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
@ {
@
LDR r3, [r2, #8] @ Pickup the owning core
CMP r3, r1 @ Is it this core?
BEQ _got_lock_after_waiting @ Yes, the protection is already owned. This means
@ an ISR preempted us and got protection
@
@ }
@
@ /* Are we at the front of the list? */
@ if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
@ {
@
LDR r3, =_tx_thread_smp_protect_wait_list_head @ Get the address of the head
LDR r3, [r3] @ Get the value of the head
LDR r4, =_tx_thread_smp_protect_wait_list @ Get the address of the list
LDR r4, [r4, r3, LSL #2] @ Get the value at the head index
CMP r1, r4
BNE _did_not_get_lock
@
@ /* Is the lock still available? */
@ if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
@ {
@
LDR r3, [r2, #0] @ Pickup the protection flag
CMP r3, #0
BNE _did_not_get_lock @ No, protection not available
@
@ /* Get the lock. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
@
MOV r3, #1 @ Build lock value
STR r3, [r2, #0] @ Store lock value
DMB @
@
@ /* Got the lock. */
@ _tx_thread_smp_protect_lock_got();
@
_tx_thread_smp_protect_lock_got
@
@ /* Remove this core from the wait list. */
@ _tx_thread_smp_protect_remove_from_front_of_list();
@
_tx_thread_smp_protect_remove_from_front_of_list
B _got_lock_after_waiting
_did_not_get_lock:
@
@ /* For one reason or another, we didn't get the lock. */
@
@ /* Were we removed from the list? This can happen if we're a thread
@ and we got preempted. */
@ if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
@ {
@
LDR r3, =_tx_thread_smp_protect_wait_counts @ Load wait list counts
LDR r4, [r3, r1, LSL #2] @ Load waiting value for this core
CMP r4, #0
BNE _already_in_list1 @ Is this core already in the list?
@
@ /* Add ourselves to the list. */
@ _tx_thread_smp_protect_wait_list_add(this_core);
@
_tx_thread_smp_protect_wait_list_add @ Call macro to add ourselves to the list
@
@ /* Our waiting count was also reset when we were preempted. Increment it again. */
@ _tx_thread_smp_protect_wait_counts[this_core]++;
@
LDR r3, =_tx_thread_smp_protect_wait_counts @ Load wait list counts
LDR r4, [r3, r1, LSL #2] @ Load waiting value for this core
ADD r4, r4, #1 @ Increment wait value
STR r4, [r3, r1, LSL #2] @ Store new wait value value
@
@ }
@
_already_in_list1:
@
@ /* Restore interrupts and try again. */
@
MSR CPSR_c, r0 @ Restore CPSR
#ifdef TX_ENABLE_WFE
WFE @ Go into standby
#endif
B _try_to_get_lock @ On waking, restart the protection attempt
_got_lock_after_waiting:
@
@ /* We're no longer waiting. */
@ _tx_thread_smp_protect_wait_counts[this_core]--;
@
LDR r3, =_tx_thread_smp_protect_wait_counts @ Load waiting list
LDR r4, [r3, r1, LSL #2] @ Load current wait value
SUB r4, r4, #1 @ Decrement wait value
STR r4, [r3, r1, LSL #2] @ Store new wait value value
@
@ /* Restore link register and return. */
@
_return:
POP {r4-r6} @ Restore registers
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,310 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
.macro _tx_thread_smp_protect_lock_got
@
@ /* Set the currently owned core. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_core = this_core;
@
STR r1, [r2, #8] @ Store this core
@
@ /* Increment the protection count. */
@ _tx_thread_smp_protection.tx_thread_smp_protect_count++;
@
LDR r3, [r2, #12] @ Pickup ownership count
ADD r3, r3, #1 @ Increment ownership count
STR r3, [r2, #12] @ Store ownership count
DMB
#ifdef TX_MPCORE_DEBUG_ENABLE
LSL r3, r1, #2 @ Build offset to array indexes
LDR r4, =_tx_thread_current_ptr @ Pickup start of the current thread array
ADD r4, r3, r4 @ Build index into the current thread array
LDR r3, [r4] @ Pickup current thread for this core
STR r3, [r2, #4] @ Save current thread pointer
STR LR, [r2, #16] @ Save caller's return address
STR r0, [r2, #20] @ Save CPSR
#endif
.endm
.macro _tx_thread_smp_protect_remove_from_front_of_list
@
@ /* Remove ourselves from the list. */
@ _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head++] = 0xFFFFFFFF;
@
MOV r3, #0xFFFFFFFF @ Build the invalid core value
LDR r4, =_tx_thread_smp_protect_wait_list_head @ Get the address of the head
LDR r5, [r4] @ Get the value of the head
LDR r6, =_tx_thread_smp_protect_wait_list @ Get the address of the list
STR r3, [r6, r5, LSL #2] @ Store the invalid core value
ADD r5, r5, #1 @ Increment the head
@
@ /* Did we wrap? */
@ if (_tx_thread_smp_protect_wait_list_head == TX_THREAD_SMP_MAX_CORES + 1)
@ {
@
LDR r3, =_tx_thread_smp_protect_wait_list_size @ Load address of core list size
LDR r3, [r3] @ Load the max cores value
CMP r5, r3 @ Compare the head to it
BNE _store_new_head\@ @ Are we at the max?
@
@ _tx_thread_smp_protect_wait_list_head = 0;
@
EOR r5, r5, r5 @ We're at the max. Set it to zero
@
@ }
@
_store_new_head\@:
STR r5, [r4] @ Store the new head
@
@ /* We have the lock! */
@ return;
@
.endm
.macro _tx_thread_smp_protect_wait_list_lock_get
@VOID _tx_thread_smp_protect_wait_list_lock_get()
@{
@ /* We do this until we have the lock. */
@ while (1)
@ {
@
_tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock\@:
@
@ /* Is the list lock available? */
@ _tx_thread_smp_protect_wait_list_lock_protect_in_force = load_exclusive(&_tx_thread_smp_protect_wait_list_lock_protect_in_force);
@
LDR r1, =_tx_thread_smp_protect_wait_list_lock_protect_in_force
LDREX r2, [r1] @ Pickup the protection flag
@
@ if (protect_in_force == 0)
@ {
@
CMP r2, #0
BNE _tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock\@ @ No, protection not available
@
@ /* Try to get the list. */
@ int status = store_exclusive(&_tx_thread_smp_protect_wait_list_lock_protect_in_force, 1);
@
MOV r2, #1 @ Build lock value
STREX r3, r2, [r1] @ Attempt to get the protection
@
@ if (status == SUCCESS)
@
CMP r3, #0
BNE _tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock\@ @ Did it fail? If so, try again.
@
@ /* We have the lock! */
@ return;
@
.endm
.macro _tx_thread_smp_protect_wait_list_add
@VOID _tx_thread_smp_protect_wait_list_add(UINT new_core)
@{
@
@ /* We're about to modify the list, so get the list lock. */
@ _tx_thread_smp_protect_wait_list_lock_get();
@
PUSH {r1-r2}
_tx_thread_smp_protect_wait_list_lock_get
POP {r1-r2}
@
@ /* Add this core. */
@ _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_tail++] = new_core;
@
LDR r3, =_tx_thread_smp_protect_wait_list_tail @ Get the address of the tail
LDR r4, [r3] @ Get the value of tail
LDR r5, =_tx_thread_smp_protect_wait_list @ Get the address of the list
STR r1, [r5, r4, LSL #2] @ Store the new core value
ADD r4, r4, #1 @ Increment the tail
@
@ /* Did we wrap? */
@ if (_tx_thread_smp_protect_wait_list_tail == _tx_thread_smp_protect_wait_list_size)
@ {
@
LDR r5, =_tx_thread_smp_protect_wait_list_size @ Load max cores address
LDR r5, [r5] @ Load max cores value
CMP r4, r5 @ Compare max cores to tail
BNE _tx_thread_smp_protect_wait_list_add__no_wrap\@ @ Did we wrap?
@
@ _tx_thread_smp_protect_wait_list_tail = 0;
@
MOV r4, #0
@
@ }
@
_tx_thread_smp_protect_wait_list_add__no_wrap\@:
STR r4, [r3] @ Store the new tail value.
@
@ /* Release the list lock. */
@ _tx_thread_smp_protect_wait_list_lock_protect_in_force = 0;
@
MOV r3, #0 @ Build lock value
LDR r4, =_tx_thread_smp_protect_wait_list_lock_protect_in_force
STR r3, [r4] @ Store the new value
.endm
.macro _tx_thread_smp_protect_wait_list_remove
@VOID _tx_thread_smp_protect_wait_list_remove(UINT core)
@{
@
@ /* Get the core index. */
@ UINT core_index;
@ for (core_index = 0;; core_index++)
@
EOR r1, r1, r1 @ Clear for 'core_index'
LDR r2, =_tx_thread_smp_protect_wait_list @ Get the address of the list
@
@ {
@
_tx_thread_smp_protect_wait_list_remove__check_cur_core\@:
@
@ /* Is this the core? */
@ if (_tx_thread_smp_protect_wait_list[core_index] == core)
@ {
@ break;
@
LDR r3, [r2, r1, LSL #2] @ Get the value at the current index
CMP r3, r0 @ Did we find the core?
BEQ _tx_thread_smp_protect_wait_list_remove__found_core\@
@
@ }
@
ADD r1, r1, #1 @ Increment cur index
B _tx_thread_smp_protect_wait_list_remove__check_cur_core\@ @ Restart the loop
@
@ }
@
_tx_thread_smp_protect_wait_list_remove__found_core\@:
@
@ /* We're about to modify the list. Get the lock. We need the lock because another
@ core could be simultaneously adding (a core is simultaneously trying to get
@ the inter-core lock) or removing (a core is simultaneously being preempted,
@ like what is currently happening). */
@ _tx_thread_smp_protect_wait_list_lock_get();
@
PUSH {r1}
_tx_thread_smp_protect_wait_list_lock_get
POP {r1}
@
@ /* We remove by shifting. */
@ while (core_index != _tx_thread_smp_protect_wait_list_tail)
@ {
@
_tx_thread_smp_protect_wait_list_remove__compare_index_to_tail\@:
LDR r2, =_tx_thread_smp_protect_wait_list_tail @ Load tail address
LDR r2, [r2] @ Load tail value
CMP r1, r2 @ Compare cur index and tail
BEQ _tx_thread_smp_protect_wait_list_remove__removed\@
@
@ UINT next_index = core_index + 1;
@
MOV r2, r1 @ Move current index to next index register
ADD r2, r2, #1 @ Add 1
@
@ if (next_index == _tx_thread_smp_protect_wait_list_size)
@ {
@
LDR r3, =_tx_thread_smp_protect_wait_list_size
LDR r3, [r3]
CMP r2, r3
BNE _tx_thread_smp_protect_wait_list_remove__next_index_no_wrap\@
@
@ next_index = 0;
@
MOV r2, #0
@
@ }
@
_tx_thread_smp_protect_wait_list_remove__next_index_no_wrap\@:
@
@ list_cores[core_index] = list_cores[next_index];
@
LDR r0, =_tx_thread_smp_protect_wait_list @ Get the address of the list
LDR r3, [r0, r2, LSL #2] @ Get the value at the next index
STR r3, [r0, r1, LSL #2] @ Store the value at the current index
@
@ core_index = next_index;
@
MOV r1, r2
B _tx_thread_smp_protect_wait_list_remove__compare_index_to_tail\@
@
@ }
@
_tx_thread_smp_protect_wait_list_remove__removed\@:
@
@ /* Now update the tail. */
@ if (_tx_thread_smp_protect_wait_list_tail == 0)
@ {
@
LDR r0, =_tx_thread_smp_protect_wait_list_tail @ Load tail address
LDR r1, [r0] @ Load tail value
CMP r1, #0
BNE _tx_thread_smp_protect_wait_list_remove__tail_not_zero\@
@
@ _tx_thread_smp_protect_wait_list_tail = _tx_thread_smp_protect_wait_list_size;
@
LDR r2, =_tx_thread_smp_protect_wait_list_size
LDR r1, [r2]
@
@ }
@
_tx_thread_smp_protect_wait_list_remove__tail_not_zero\@:
@
@ _tx_thread_smp_protect_wait_list_tail--;
@
SUB r1, r1, #1
STR r1, [r0] @ Store new tail value
@
@ /* Release the list lock. */
@ _tx_thread_smp_protect_wait_list_lock_protect_in_force = 0;
@
MOV r0, #0 @ Build lock value
LDR r1, =_tx_thread_smp_protect_wait_list_lock_protect_in_force @ Load lock address
STR r0, [r1] @ Store the new value
@
@ /* We're no longer waiting. Note that this should be zero since, again,
@ this function is only called when a thread preemption is occurring. */
@ _tx_thread_smp_protect_wait_counts[core]--;
@
MRC p15, 0, r0, c0, c0, 5 @ Read CPU ID register
AND r0, r0, #0x03 @ Mask off, leaving the CPU ID field
LDR r1, =_tx_thread_smp_protect_wait_counts @ Load wait list counts
LDR r2, [r1, r0, LSL #2] @ Load waiting value
SUB r2, r2, #1 @ Subtract 1
STR r2, [r1, r0, LSL #2] @ Store new waiting value
.endm

View File

@@ -0,0 +1,89 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_time_get SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function gets the global time value that is used for debug */
@/* information and event tracing. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* 32-bit time stamp */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Source */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_time_get
.type _tx_thread_smp_time_get,function
_tx_thread_smp_time_get:
MRC p15, 4, r0, c15, c0, 0 @ Read periph base address
LDR r0, [r0, #0x604] @ Read count register
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,143 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread - Low Level SMP Support */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@#define TX_THREAD_SMP_SOURCE_CODE
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h" */
@
@
.global _tx_thread_current_ptr
.global _tx_thread_smp_protection
.global _tx_thread_preempt_disable
.global _tx_thread_smp_protect_wait_counts
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_smp_unprotect SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function releases previously obtained protection. The supplied */
@/* previous SR is restored. If the value of _tx_thread_system_state */
@/* and _tx_thread_preempt_disable are both zero, then multithreading */
@/* is enabled as well. */
@/* */
@/* INPUT */
@/* */
@/* Previous Status Register */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX Source */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
.global _tx_thread_smp_unprotect
.type _tx_thread_smp_unprotect,function
_tx_thread_smp_unprotect:
@
@ /* Lockout interrupts. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
MRC p15, 0, r1, c0, c0, 5 @ Read CPU ID register
AND r1, r1, #0x03 @ Mask off, leaving the CPU ID field
LDR r2,=_tx_thread_smp_protection @ Build address of protection structure
LDR r3, [r2, #8] @ Pickup the owning core
CMP r1, r3 @ Is it this core?
BNE _still_protected @ If this is not the owning core, protection is in force elsewhere
LDR r3, [r2, #12] @ Pickup the protection count
CMP r3, #0 @ Check to see if the protection is still active
BEQ _still_protected @ If the protection count is zero, protection has already been cleared
SUB r3, r3, #1 @ Decrement the protection count
STR r3, [r2, #12] @ Store the new count back
CMP r3, #0 @ Check to see if the protection is still active
BNE _still_protected @ If the protection count is non-zero, protection is still in force
LDR r2,=_tx_thread_preempt_disable @ Build address of preempt disable flag
LDR r3, [r2] @ Pickup preempt disable flag
CMP r3, #0 @ Is the preempt disable flag set?
BNE _still_protected @ Yes, skip the protection release
LDR r2,=_tx_thread_smp_protect_wait_counts @ Build build address of wait counts
LDR r3, [r2, r1, LSL #2] @ Pickup wait list value
CMP r3, #0 @ Are any entities on this core waiting?
BNE _still_protected @ Yes, skip the protection release
LDR r2,=_tx_thread_smp_protection @ Build address of protection structure
MOV r3, #0xFFFFFFFF @ Build invalid value
STR r3, [r2, #8] @ Mark the protected core as invalid
#ifdef TX_MPCORE_DEBUG_ENABLE
STR LR, [r2, #16] @ Save caller's return address
#endif
DMB @ Ensure that accesses to shared resource have completed
MOV r3, #0 @ Build release protection value
STR r3, [r2, #0] @ Release the protection
DSB @ To ensure update of the protection occurs before other CPUs awake
#ifdef TX_ENABLE_WFE
SEV @ Send event to other CPUs, wakes anyone waiting on the protection (using WFE)
#endif
_still_protected:
MSR CPSR_c, r0 @ Restore CPSR
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif

View File

@@ -0,0 +1,174 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
SVC_MODE = 0x13 @ SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ ints enabled
#else
CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ ints enabled
#endif
THUMB_BIT = 0x20 @ Thumb-bit
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_stack_build SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function builds a stack frame on the supplied thread's stack. */
@/* The stack frame results in a fake interrupt return to the supplied */
@/* function pointer. */
@/* */
@/* INPUT */
@/* */
@/* thread_ptr Pointer to thread control blk */
@/* function_ptr Pointer to return function */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* _tx_thread_create Create thread service */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
@{
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
@
@
@ /* Build a fake interrupt frame. The form of the fake interrupt stack
@ on the Cortex-A5 should look like the following after it is built:
@
@ Stack Top: 1 Interrupt stack frame type
@ CPSR Initial value for CPSR
@ a1 (r0) Initial value for a1
@ a2 (r1) Initial value for a2
@ a3 (r2) Initial value for a3
@ a4 (r3) Initial value for a4
@ v1 (r4) Initial value for v1
@ v2 (r5) Initial value for v2
@ v3 (r6) Initial value for v3
@ v4 (r7) Initial value for v4
@ v5 (r8) Initial value for v5
@ sb (r9) Initial value for sb
@ sl (r10) Initial value for sl
@ fp (r11) Initial value for fp
@ ip (r12) Initial value for ip
@ lr (r14) Initial value for lr
@ pc (r15) Initial value for pc
@ 0 For stack backtracing
@
@ Stack Bottom: (higher memory address) */
@
LDR r2, [r0, #16] @ Pickup end of stack area
BIC r2, r2, #7 @ Ensure 8-byte alignment
SUB r2, r2, #76 @ Allocate space for the stack frame
@
@ /* Actually build the stack frame. */
@
MOV r3, #1 @ Build interrupt stack type
STR r3, [r2, #0] @ Store stack type
MOV r3, #0 @ Build initial register value
STR r3, [r2, #8] @ Store initial r0
STR r3, [r2, #12] @ Store initial r1
STR r3, [r2, #16] @ Store initial r2
STR r3, [r2, #20] @ Store initial r3
STR r3, [r2, #24] @ Store initial r4
STR r3, [r2, #28] @ Store initial r5
STR r3, [r2, #32] @ Store initial r6
STR r3, [r2, #36] @ Store initial r7
STR r3, [r2, #40] @ Store initial r8
STR r3, [r2, #44] @ Store initial r9
LDR r3, [r0, #12] @ Pickup stack starting address
STR r3, [r2, #48] @ Store initial r10 (sl)
MOV r3, #0 @ Build initial register value
STR r3, [r2, #52] @ Store initial r11
STR r3, [r2, #56] @ Store initial r12
STR r3, [r2, #60] @ Store initial lr
STR r1, [r2, #64] @ Store initial pc
STR r3, [r2, #68] @ 0 for back-trace
MRS r3, CPSR @ Pickup CPSR
BIC r3, r3, #CPSR_MASK @ Mask mode bits of CPSR
ORR r3, r3, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
BIC r3, r3, #THUMB_BIT @ Clear Thumb-bit by default
AND r1, r1, #1 @ Determine if the entry function is in Thumb mode
CMP r1, #1 @ Is the Thumb-bit set?
ORREQ r3, r3, #THUMB_BIT @ Yes, set the Thumb-bit
STR r3, [r2, #4] @ Store initial CPSR
@
@ /* Setup stack pointer. */
@ thread_ptr -> tx_thread_stack_ptr = r2;
@
STR r2, [r0, #8] @ Save stack pointer in thread's
@ control block
@
@ /* Set ready bit in thread control block. */
@
LDR r2, [r0, #152] @ Pickup word with ready bit
ORR r2, r2, #0x8000 @ Build ready bit set
STR r2, [r0, #152] @ Set ready bit
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@}

View File

@@ -0,0 +1,206 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@#include "tx_timer.h"
@
@
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_thread_smp_protection
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
.global _tx_execution_thread_exit
#endif
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_system_return SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function is target processor specific. It is used to transfer */
@/* control from a thread back to the ThreadX system. Only a */
@/* minimal context is saved since the compiler assumes temp registers */
@/* are going to get slicked by a function call anyway. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* _tx_thread_schedule Thread scheduling loop */
@/* */
@/* CALLED BY */
@/* */
@/* ThreadX components */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_system_return(VOID)
@{
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
@
@ /* Save minimal context on the stack. */
@
STMDB sp!, {r4-r11, lr} @ Save minimal context
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r12, r10, #2 @ Build offset to array indexes
LDR r3, =_tx_thread_current_ptr @ Pickup address of current ptr
ADD r3, r3, r12 @ Build index into current ptr array
LDR r0, [r3, #0] @ Pickup current thread pointer
#ifdef TARGET_FPU_VFP
LDR r1, [r0, #160] @ Pickup the VFP enabled flag
CMP r1, #0 @ Is the VFP enabled?
BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
VMRS r4, FPSCR @ Pickup the FPSCR
STR r4, [sp, #-4]! @ Save FPSCR
VSTMDB sp!, {D16-D31} @ Save D16-D31
VSTMDB sp!, {D8-D15} @ Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
MOV r4, #0 @ Build a solicited stack type
MRS r5, CPSR @ Pickup the CPSR
STMDB sp!, {r4-r5} @ Save type and CPSR
@
@ /* Lockout interrupts. */
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#else
CPSID i @ Disable IRQ interrupts
#endif
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the thread exit function to indicate the thread is no longer executing. */
@
MOV r4, r0 @ Save r0
MOV r5, r3 @ Save r3
MOV r6, r12 @ Save r12
BL _tx_execution_thread_exit @ Call the thread exit function
MOV r3, r5 @ Recover r3
MOV r0, r4 @ Recover r4
MOV r12,r6 @ Recover r12
#endif
@
LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
ADD r2, r2, r12 @ Build index into time-slice array
LDR r1, [r2, #0] @ Pickup current time slice
@
@ /* Save current stack and switch to system stack. */
@ _tx_thread_current_ptr[core] -> tx_thread_stack_ptr = sp;
@ sp = _tx_thread_system_stack_ptr[core];
@
STR sp, [r0, #8] @ Save thread stack pointer
@
@ /* Determine if the time-slice is active. */
@ if (_tx_timer_time_slice[core])
@ {
@
MOV r4, #0 @ Build clear value
CMP r1, #0 @ Is a time-slice active?
BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
@
@ /* Save time-slice for the thread and clear the current time-slice. */
@ _tx_thread_current_ptr[core] -> tx_thread_time_slice = _tx_timer_time_slice[core];
@ _tx_timer_time_slice[core] = 0;
@
STR r4, [r2, #0] @ Clear time-slice
STR r1, [r0, #24] @ Save current time-slice
@
@ }
__tx_thread_dont_save_ts:
@
@ /* Clear the current thread pointer. */
@ _tx_thread_current_ptr[core] = TX_NULL;
@
STR r4, [r3, #0] @ Clear current thread pointer
@
@ /* Set ready bit in thread control block. */
@
LDR r2, [r0, #152] @ Pickup word with ready bit
ORR r2, r2, #0x8000 @ Build ready bit set
DMB @ Ensure that accesses to shared resource have completed
STR r2, [r0, #152] @ Set ready bit
@
@ /* Now clear protection. It is assumed that protection is in force whenever this routine is called. */
@
LDR r3, =_tx_thread_smp_protection @ Pickup address of protection structure
#ifdef TX_MPCORE_DEBUG_ENABLE
STR lr, [r3, #24] @ Save last caller
LDR r2, [r3, #4] @ Pickup owning thread
CMP r0, r2 @ Is it the same as the current thread?
__error_loop:
BNE __error_loop @ If not, we have a problem!!
#endif
LDR r1, =_tx_thread_preempt_disable @ Build address to preempt disable flag
MOV r2, #0 @ Build clear value
STR r2, [r1, #0] @ Clear preempt disable flag
STR r2, [r3, #12] @ Clear protection count
MOV r1, #0xFFFFFFFF @ Build invalid value
STR r1, [r3, #8] @ Set core to an invalid value
DMB @ Ensure that accesses to shared resource have completed
STR r2, [r3] @ Clear protection
DSB @ To ensure update of the shared resource occurs before other CPUs awake
SEV @ Send event to other CPUs, wakes anyone waiting on a mutex (using WFE)
B _tx_thread_schedule @ Jump to scheduler!
@
@}

View File

@@ -0,0 +1,210 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Thread */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_thread.h"
@
@
.global _tx_thread_system_state
.global _tx_thread_current_ptr
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
.global _tx_execution_isr_enter
#endif
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_thread_vectored_context_save SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function saves the context of an executing thread in the */
@/* beginning of interrupt processing. The function also ensures that */
@/* the system stack is used upon return to the calling ISR. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* None */
@/* */
@/* CALLED BY */
@/* */
@/* ISRs */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_thread_vectored_context_save(VOID)
@{
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
@
@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
@ out, we are in IRQ mode, and all registers are intact. */
@
@ /* Check for a nested interrupt condition. */
@ if (_tx_thread_system_state[core]++)
@ {
@
#ifdef TX_ENABLE_FIQ_SUPPORT
CPSID if @ Disable IRQ and FIQ interrupts
#endif
@
@ /* Pickup the CPU ID. */
@
MRC p15, 0, r10, c0, c0, 5 @ Read CPU ID register
AND r10, r10, #0x03 @ Mask off, leaving the CPU ID field
LSL r12, r10, #2 @ Build offset to array indexes
LDR r3, =_tx_thread_system_state @ Pickup address of system state var
ADD r3, r3, r12 @ Build index into the system state array
LDR r2, [r3, #0] @ Pickup system state
CMP r2, #0 @ Is this the first interrupt?
BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
@
@ /* Nested interrupt condition. */
@
ADD r2, r2, #1 @ Increment the interrupt counter
STR r2, [r3, #0] @ Store it back in the variable
@
@ /* Note: Minimal context of interrupted thread is already saved. */
@
@ /* Return to the ISR. */
@
MOV r10, #0 @ Clear stack limit
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR enter function to indicate an ISR is executing. */
@
PUSH {r12, lr} @ Save ISR lr & r12
BL _tx_execution_isr_enter @ Call the ISR enter function
POP {r12, lr} @ Recover ISR lr & r12
#endif
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@
__tx_thread_not_nested_save:
@ }
@
@ /* Otherwise, not nested, check to see if a thread was running. */
@ else if (_tx_thread_current_ptr[core])
@ {
@
ADD r2, r2, #1 @ Increment the interrupt counter
STR r2, [r3, #0] @ Store it back in the variable
LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
ADD r1, r1, r12 @ Build index into current thread ptr
LDR r0, [r1, #0] @ Pickup current thread pointer
CMP r0, #0 @ Is it NULL?
BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
@ scheduling loop - nothing needs saving!
@
@ /* Note: Minimal context of interrupted thread is already saved. */
@
@ /* Save the current stack pointer in the thread's control block. */
@ _tx_thread_current_ptr[core] -> tx_thread_stack_ptr = sp;
@
@ /* Switch to the system stack. */
@ sp = _tx_thread_system_stack_ptr[core];
@
MOV r10, #0 @ Clear stack limit
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR enter function to indicate an ISR is executing. */
@
PUSH {r12, lr} @ Save ISR lr & r12
BL _tx_execution_isr_enter @ Call the ISR enter function
POP {r12, lr} @ Recover ISR lr & r12
#endif
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@
@ }
@ else
@ {
@
__tx_thread_idle_system_save:
@
@ /* Interrupt occurred in the scheduling loop. */
@
@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
@ processing. */
@
MOV r10, #0 @ Clear stack limit
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
@
@ /* Call the ISR enter function to indicate an ISR is executing. */
@
PUSH {r12, lr} @ Save ISR lr & r12
BL _tx_execution_isr_enter @ Call the ISR enter function
POP {r12, lr} @ Recover ISR lr & r12
#endif
ADD sp, sp, #32 @ Recover saved registers
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@
@ }
@}
@

View File

@@ -0,0 +1,230 @@
@/**************************************************************************/
@/* */
@/* Copyright (c) Microsoft Corporation. All rights reserved. */
@/* */
@/* This software is licensed under the Microsoft Software License */
@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
@/* and in the root directory of this software. */
@/* */
@/**************************************************************************/
@
@
@/**************************************************************************/
@/**************************************************************************/
@/** */
@/** ThreadX Component */
@/** */
@/** Timer */
@/** */
@/**************************************************************************/
@/**************************************************************************/
@
@#define TX_SOURCE_CODE
@
@
@/* Include necessary system files. */
@
@#include "tx_api.h"
@#include "tx_timer.h"
@#include "tx_thread.h"
@
@
@Define Assembly language external references...
@
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
.global _tx_timer_list_start
.global _tx_timer_list_end
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
.global _tx_timer_expiration_process
.global _tx_timer_interrupt_active
.global _tx_thread_smp_protect
.global _tx_thread_smp_unprotect
.global _tx_trace_isr_enter_insert
.global _tx_trace_isr_exit_insert
@
@
.arm
.text
.align 2
@/**************************************************************************/
@/* */
@/* FUNCTION RELEASE */
@/* */
@/* _tx_timer_interrupt SMP/Cortex-A5/GNU */
@/* 6.1 */
@/* AUTHOR */
@/* */
@/* William E. Lamie, Microsoft Corporation */
@/* */
@/* DESCRIPTION */
@/* */
@/* This function processes the hardware timer interrupt. This */
@/* processing includes incrementing the system clock and checking for */
@/* time slice and/or timer expiration. If either is found, the */
@/* interrupt context save/restore functions are called along with the */
@/* expiration functions. */
@/* */
@/* INPUT */
@/* */
@/* None */
@/* */
@/* OUTPUT */
@/* */
@/* None */
@/* */
@/* CALLS */
@/* */
@/* _tx_thread_time_slice Time slice interrupted thread */
@/* _tx_thread_smp_protect Get SMP protection */
@/* _tx_thread_smp_unprotect Releast SMP protection */
@/* _tx_timer_expiration_process Timer expiration processing */
@/* */
@/* CALLED BY */
@/* */
@/* interrupt vector */
@/* */
@/* RELEASE HISTORY */
@/* */
@/* DATE NAME DESCRIPTION */
@/* */
@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
@/* */
@/**************************************************************************/
@VOID _tx_timer_interrupt(VOID)
@{
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
@
@ /* Upon entry to this routine, it is assumed that context save has already
@ been called, and therefore the compiler scratch registers are available
@ for use. */
@
MRC p15, 0, r0, c0, c0, 5 @ Read CPU ID register
AND r0, r0, #0x03 @ Mask off, leaving the CPU ID field
CMP r0, #0 @ Only process timer interrupts from core 0 (to change this simply change the constant!)
BEQ __tx_process_timer @ If the same process the interrupt
BX lr @ Return to caller if not matched
__tx_process_timer:
STMDB sp!, {r4, lr} @ Save the lr and r4 register on the stack
BL _tx_thread_smp_protect @ Get protection
MOV r4, r0 @ Save the return value in preserved register
LDR r1, =_tx_timer_interrupt_active @ Pickup address of timer interrupt active count
LDR r0, [r1, #0] @ Pickup interrupt active count
ADD r0, r0, #1 @ Increment interrupt active count
STR r0, [r1, #0] @ Store new interrupt active count
DMB @ Ensure that accesses to shared resource have completed
@
@ /* Increment the system clock. */
@ _tx_timer_system_clock++;
@
LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
LDR r0, [r1, #0] @ Pickup system clock
ADD r0, r0, #1 @ Increment system clock
STR r0, [r1, #0] @ Store new system clock
@
@ /* Test for timer expiration. */
@ if (*_tx_timer_current_ptr)
@ {
@
LDR r1, =_tx_timer_expired @ Pickup addr of expired flag
LDR r0, [r1, #0] @ Pickup timer expired flag
CMP r0, #0 @ Check for previous timer expiration still active
BNE __tx_timer_done @ If so, skip timer processing
LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer addr
LDR r0, [r1, #0] @ Pickup current timer
LDR r2, [r0, #0] @ Pickup timer list entry
CMP r2, #0 @ Is there anything in the list?
BEQ __tx_timer_no_timer @ No, just increment the timer
@
@ /* Set expiration flag. */
@ _tx_timer_expired = TX_TRUE;
@
LDR r3, =_tx_timer_expired @ Pickup expiration flag address
MOV r2, #1 @ Build expired value
STR r2, [r3, #0] @ Set expired flag
B __tx_timer_done @ Finished timer processing
@
@ }
@ else
@ {
__tx_timer_no_timer:
@
@ /* No timer expired, increment the timer pointer. */
@ _tx_timer_current_ptr++;
@
ADD r0, r0, #4 @ Move to next timer
@
@ /* Check for wrap-around. */
@ if (_tx_timer_current_ptr == _tx_timer_list_end)
@
LDR r3, =_tx_timer_list_end @ Pickup addr of timer list end
LDR r2, [r3, #0] @ Pickup list end
CMP r0, r2 @ Are we at list end?
BNE __tx_timer_skip_wrap @ No, skip wrap-around logic
@
@ /* Wrap to beginning of list. */
@ _tx_timer_current_ptr = _tx_timer_list_start;
@
LDR r3, =_tx_timer_list_start @ Pickup addr of timer list start
LDR r0, [r3, #0] @ Set current pointer to list start
@
__tx_timer_skip_wrap:
@
STR r0, [r1, #0] @ Store new current timer pointer
@ }
@
__tx_timer_done:
@
@
@ /* Did a timer expire? */
@ if (_tx_timer_expired)
@ {
@
LDR r1, =_tx_timer_expired @ Pickup addr of expired flag
LDR r0, [r1, #0] @ Pickup timer expired flag
CMP r0, #0 @ Check for timer expiration
BEQ __tx_timer_dont_activate @ If not set, skip timer activation
@
@ /* Process timer expiration. */
@ _tx_timer_expiration_process();
@
BL _tx_timer_expiration_process @ Call the timer expiration handling routine
@
@ }
__tx_timer_dont_activate:
@
@ /* Call time-slice processing. */
@ _tx_thread_time_slice();
BL _tx_thread_time_slice @ Call time-slice processing
@
@ }
@
LDR r1, =_tx_timer_interrupt_active @ Pickup address of timer interrupt active count
LDR r0, [r1, #0] @ Pickup interrupt active count
SUB r0, r0, #1 @ Decrement interrupt active count
STR r0, [r1, #0] @ Store new interrupt active count
DMB @ Ensure that accesses to shared resource have completed
@
@ /* Release protection. */
@
MOV r0, r4 @ Pass the previous status register back
BL _tx_thread_smp_unprotect @ Release protection
LDMIA sp!, {r4, lr} @ Recover lr register and r4
#ifdef __THUMB_INTERWORK
BX lr @ Return to caller
#else
MOV pc, lr @ Return to caller
#endif
@
@}