forked from Imagelibrary/rtems
score: Add AArch64 port
This adds a CPU port for AArch64(ARMv8) with support for exceptions and interrupts.
This commit is contained in:
committed by
Joel Sherrill
parent
232fc52d4c
commit
8387c52e47
@@ -85,7 +85,11 @@ typedef struct TLS_Thread_control_block {
|
||||
struct TLS_Thread_control_block *tcb;
|
||||
#else /* !__i386__ */
|
||||
TLS_Dynamic_thread_vector *dtv;
|
||||
#if CPU_SIZEOF_POINTER == 4
|
||||
/*
|
||||
* GCC under AArch64/LP64 expects a 16 byte TCB at the beginning of the TLS
|
||||
* data segment and indexes into it accordingly for TLS variable addresses.
|
||||
*/
|
||||
#if CPU_SIZEOF_POINTER == 4 || defined(AARCH64_MULTILIB_ARCH_V8)
|
||||
uintptr_t reserved;
|
||||
#endif
|
||||
#endif /* __i386__ */
|
||||
|
||||
305
cpukit/score/cpu/aarch64/aarch64-context-validate.S
Normal file
305
cpukit/score/cpu/aarch64/aarch64-context-validate.S
Normal file
@@ -0,0 +1,305 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief Implementation of _CPU_Context_validate
|
||||
*
|
||||
* This file implements _CPU_Context_validate for use in spcontext01.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/asm.h>
|
||||
#include <rtems/score/cpu.h>
|
||||
|
||||
#define FRAME_OFFSET_X4 0
|
||||
#define FRAME_OFFSET_X5 8
|
||||
#define FRAME_OFFSET_X6 16
|
||||
#define FRAME_OFFSET_X7 24
|
||||
#define FRAME_OFFSET_X8 32
|
||||
#define FRAME_OFFSET_X9 40
|
||||
#define FRAME_OFFSET_X10 48
|
||||
#define FRAME_OFFSET_X11 56
|
||||
#define FRAME_OFFSET_LR 64
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
#define FRAME_OFFSET_V8 72
|
||||
#define FRAME_OFFSET_V9 88
|
||||
#define FRAME_OFFSET_V10 104
|
||||
#define FRAME_OFFSET_V11 120
|
||||
#define FRAME_OFFSET_V12 136
|
||||
#define FRAME_OFFSET_V13 152
|
||||
#define FRAME_OFFSET_V14 168
|
||||
#define FRAME_OFFSET_V15 184
|
||||
|
||||
#define FRAME_SIZE (FRAME_OFFSET_V15 + 16)
|
||||
#else
|
||||
#define FRAME_SIZE (FRAME_OFFSET_LR + 8)
|
||||
#endif
|
||||
|
||||
.section .text
|
||||
|
||||
FUNCTION_ENTRY(_CPU_Context_validate)
|
||||
|
||||
/* Save */
|
||||
|
||||
sub sp, sp, #FRAME_SIZE
|
||||
|
||||
str x4, [sp, #FRAME_OFFSET_X4]
|
||||
str x5, [sp, #FRAME_OFFSET_X5]
|
||||
str x6, [sp, #FRAME_OFFSET_X6]
|
||||
str x7, [sp, #FRAME_OFFSET_X7]
|
||||
str x8, [sp, #FRAME_OFFSET_X8]
|
||||
str x9, [sp, #FRAME_OFFSET_X9]
|
||||
str x10, [sp, #FRAME_OFFSET_X10]
|
||||
str x11, [sp, #FRAME_OFFSET_X11]
|
||||
str lr, [sp, #FRAME_OFFSET_LR]
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
str d8, [sp, #FRAME_OFFSET_V8]
|
||||
str d9, [sp, #FRAME_OFFSET_V9]
|
||||
str d10, [sp, #FRAME_OFFSET_V10]
|
||||
str d11, [sp, #FRAME_OFFSET_V11]
|
||||
str d12, [sp, #FRAME_OFFSET_V12]
|
||||
str d13, [sp, #FRAME_OFFSET_V13]
|
||||
str d14, [sp, #FRAME_OFFSET_V14]
|
||||
str d15, [sp, #FRAME_OFFSET_V15]
|
||||
#endif
|
||||
|
||||
/* Fill */
|
||||
|
||||
/* R1 is used for temporary values */
|
||||
mov x1, x0
|
||||
|
||||
/* R2 contains the stack pointer */
|
||||
mov x2, sp
|
||||
|
||||
.macro fill_register reg
|
||||
add x1, x1, #1
|
||||
mov \reg, x1
|
||||
.endm
|
||||
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
/* X3 contains the FPSCR */
|
||||
mrs x3, FPSR
|
||||
ldr x4, =0xf000001f
|
||||
bic x3, x3, x4
|
||||
and x4, x4, x0
|
||||
orr x3, x3, x4
|
||||
msr FPSR, x3
|
||||
#else
|
||||
fill_register x3
|
||||
#endif
|
||||
|
||||
fill_register x4
|
||||
fill_register x5
|
||||
fill_register x6
|
||||
fill_register x7
|
||||
fill_register x8
|
||||
fill_register x9
|
||||
fill_register x10
|
||||
fill_register x11
|
||||
fill_register x12
|
||||
fill_register lr
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
.macro fill_vfp_register regnum
|
||||
add x1, x1, #1
|
||||
fmov d\regnum\(), x1
|
||||
fmov v\regnum\().D[1], x1
|
||||
.endm
|
||||
|
||||
fill_vfp_register 0
|
||||
fill_vfp_register 1
|
||||
fill_vfp_register 2
|
||||
fill_vfp_register 3
|
||||
fill_vfp_register 4
|
||||
fill_vfp_register 5
|
||||
fill_vfp_register 6
|
||||
fill_vfp_register 7
|
||||
fill_vfp_register 8
|
||||
fill_vfp_register 9
|
||||
fill_vfp_register 10
|
||||
fill_vfp_register 11
|
||||
fill_vfp_register 12
|
||||
fill_vfp_register 13
|
||||
fill_vfp_register 14
|
||||
fill_vfp_register 15
|
||||
fill_vfp_register 16
|
||||
fill_vfp_register 17
|
||||
fill_vfp_register 18
|
||||
fill_vfp_register 19
|
||||
fill_vfp_register 20
|
||||
fill_vfp_register 21
|
||||
fill_vfp_register 22
|
||||
fill_vfp_register 23
|
||||
fill_vfp_register 24
|
||||
fill_vfp_register 25
|
||||
fill_vfp_register 26
|
||||
fill_vfp_register 27
|
||||
fill_vfp_register 28
|
||||
fill_vfp_register 29
|
||||
fill_vfp_register 30
|
||||
fill_vfp_register 31
|
||||
#endif /* AARCH64_MULTILIB_VFP */
|
||||
|
||||
/* Check */
|
||||
check:
|
||||
|
||||
.macro check_register reg
|
||||
add x1, x1, #1
|
||||
cmp \reg, x1
|
||||
bne restore
|
||||
.endm
|
||||
|
||||
/* A compare involving the stack pointer is deprecated */
|
||||
mov x1, sp
|
||||
cmp x2, x1
|
||||
bne restore
|
||||
|
||||
mov x1, x0
|
||||
|
||||
#ifndef AARCH64_MULTILIB_VFP
|
||||
check_register x3
|
||||
#endif
|
||||
|
||||
check_register x4
|
||||
check_register x5
|
||||
check_register x6
|
||||
check_register x7
|
||||
check_register x8
|
||||
check_register x9
|
||||
check_register x10
|
||||
check_register x11
|
||||
check_register x12
|
||||
check_register lr
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
b check_vfp
|
||||
#endif
|
||||
|
||||
b check
|
||||
|
||||
/* Restore */
|
||||
restore:
|
||||
|
||||
ldr x4, [sp, #FRAME_OFFSET_X4]
|
||||
ldr x5, [sp, #FRAME_OFFSET_X5]
|
||||
ldr x6, [sp, #FRAME_OFFSET_X6]
|
||||
ldr x7, [sp, #FRAME_OFFSET_X7]
|
||||
ldr x8, [sp, #FRAME_OFFSET_X8]
|
||||
ldr x9, [sp, #FRAME_OFFSET_X9]
|
||||
ldr x10, [sp, #FRAME_OFFSET_X10]
|
||||
ldr x11, [sp, #FRAME_OFFSET_X11]
|
||||
ldr lr, [sp, #FRAME_OFFSET_LR]
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
ldr d8, [sp, #FRAME_OFFSET_V8]
|
||||
ldr d9, [sp, #FRAME_OFFSET_V9]
|
||||
ldr d10, [sp, #FRAME_OFFSET_V10]
|
||||
ldr d11, [sp, #FRAME_OFFSET_V11]
|
||||
ldr d12, [sp, #FRAME_OFFSET_V12]
|
||||
ldr d13, [sp, #FRAME_OFFSET_V13]
|
||||
ldr d14, [sp, #FRAME_OFFSET_V14]
|
||||
ldr d15, [sp, #FRAME_OFFSET_V15]
|
||||
#endif
|
||||
|
||||
add sp, sp, #FRAME_SIZE
|
||||
|
||||
ret
|
||||
|
||||
FUNCTION_END(_CPU_Context_validate)
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
check_vfp:
|
||||
|
||||
.macro check_vfp_register regnum
|
||||
add x1, x1, #1
|
||||
fmov x4, d\regnum
|
||||
fmov x5, v\regnum\().D[1]
|
||||
cmp x5, x4
|
||||
bne 1f
|
||||
cmp x1, x4
|
||||
bne 1f
|
||||
b 2f
|
||||
1:
|
||||
b restore
|
||||
2:
|
||||
.endm
|
||||
|
||||
mrs x4, FPSR
|
||||
cmp x4, x3
|
||||
bne restore
|
||||
|
||||
check_vfp_register 0
|
||||
check_vfp_register 1
|
||||
check_vfp_register 2
|
||||
check_vfp_register 3
|
||||
check_vfp_register 4
|
||||
check_vfp_register 5
|
||||
check_vfp_register 6
|
||||
check_vfp_register 7
|
||||
check_vfp_register 8
|
||||
check_vfp_register 9
|
||||
check_vfp_register 10
|
||||
check_vfp_register 11
|
||||
check_vfp_register 12
|
||||
check_vfp_register 13
|
||||
check_vfp_register 14
|
||||
check_vfp_register 15
|
||||
check_vfp_register 16
|
||||
check_vfp_register 17
|
||||
check_vfp_register 18
|
||||
check_vfp_register 19
|
||||
check_vfp_register 20
|
||||
check_vfp_register 21
|
||||
check_vfp_register 22
|
||||
check_vfp_register 23
|
||||
check_vfp_register 24
|
||||
check_vfp_register 25
|
||||
check_vfp_register 26
|
||||
check_vfp_register 27
|
||||
check_vfp_register 28
|
||||
check_vfp_register 29
|
||||
check_vfp_register 30
|
||||
check_vfp_register 31
|
||||
|
||||
/* Restore x4 and x5 */
|
||||
mov x1, x0
|
||||
fill_register x4
|
||||
fill_register x5
|
||||
|
||||
b check
|
||||
#endif /* AARCH64_MULTILIB_VFP */
|
||||
100
cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S
Normal file
100
cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S
Normal file
@@ -0,0 +1,100 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief Implementation of _CPU_Context_volatile_clobber
|
||||
*
|
||||
* This file implements _CPU_Context_volatile_clobber for use in spcontext01.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/asm.h>
|
||||
|
||||
.section .text
|
||||
|
||||
FUNCTION_ENTRY(_CPU_Context_volatile_clobber)
|
||||
|
||||
.macro clobber_register reg
|
||||
sub x0, x0, #1
|
||||
mov \reg, x0
|
||||
.endm
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
mrs x1, FPCR
|
||||
ldr x2, =0xf000001f
|
||||
bic x1, x1, x2
|
||||
and x2, x2, x0
|
||||
orr x1, x1, x2
|
||||
msr FPCR, x1
|
||||
|
||||
.macro clobber_vfp_register reg
|
||||
sub x0, x0, #1
|
||||
fmov \reg, x0
|
||||
.endm
|
||||
|
||||
clobber_vfp_register d0
|
||||
clobber_vfp_register d1
|
||||
clobber_vfp_register d2
|
||||
clobber_vfp_register d3
|
||||
clobber_vfp_register d4
|
||||
clobber_vfp_register d5
|
||||
clobber_vfp_register d6
|
||||
clobber_vfp_register d7
|
||||
clobber_vfp_register d16
|
||||
clobber_vfp_register d17
|
||||
clobber_vfp_register d18
|
||||
clobber_vfp_register d19
|
||||
clobber_vfp_register d20
|
||||
clobber_vfp_register d21
|
||||
clobber_vfp_register d22
|
||||
clobber_vfp_register d23
|
||||
clobber_vfp_register d24
|
||||
clobber_vfp_register d25
|
||||
clobber_vfp_register d26
|
||||
clobber_vfp_register d27
|
||||
clobber_vfp_register d28
|
||||
clobber_vfp_register d29
|
||||
clobber_vfp_register d30
|
||||
clobber_vfp_register d31
|
||||
#endif /* AARCH64_MULTILIB_VFP */
|
||||
|
||||
clobber_register x1
|
||||
clobber_register x2
|
||||
clobber_register x3
|
||||
clobber_register x12
|
||||
|
||||
ret
|
||||
|
||||
FUNCTION_END(_CPU_Context_volatile_clobber)
|
||||
488
cpukit/score/cpu/aarch64/aarch64-exception-default.S
Normal file
488
cpukit/score/cpu/aarch64/aarch64-exception-default.S
Normal file
@@ -0,0 +1,488 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief Implementation of AArch64 exception vector table.
|
||||
*
|
||||
* This file implements the AArch64 exception vector table and its embedded
|
||||
* jump handlers along with the code necessary to call higher level C handlers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/asm.h>
|
||||
|
||||
.extern _AArch64_Exception_default
|
||||
|
||||
.globl bsp_start_vector_table_begin
|
||||
.globl bsp_start_vector_table_end
|
||||
.globl bsp_start_vector_table_size
|
||||
.globl bsp_vector_table_size
|
||||
|
||||
.section ".text"
|
||||
|
||||
/*
|
||||
* This is the exception vector table and the pointers to the default
|
||||
* exceptions handlers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* TODO(kmoore) The current implementation here assumes that SP is not
|
||||
* misaligned.
|
||||
*/
|
||||
.macro JUMP_HANDLER
|
||||
/* Mask to use in BIC, lower 7 bits */
|
||||
mov x0, #0x7f
|
||||
/* LR contains PC, mask off to the base of the current vector */
|
||||
bic x0, lr, x0
|
||||
/* Load address from the last word in the vector */
|
||||
ldr x0, [x0, #0x78]
|
||||
/*
|
||||
* Branch and link to the address in x0. There is no reason to save the current
|
||||
* LR since it has already been saved and the current contents are junk.
|
||||
*/
|
||||
blr x0
|
||||
/* Pop x0,lr from stack */
|
||||
ldp x0, lr, [sp], #16
|
||||
/* Return from exception */
|
||||
eret
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
.endm
|
||||
|
||||
.macro JUMP_TARGET_SP0
|
||||
/* Takes up the space of 2 instructions */
|
||||
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
.word .print_exception_dump_sp0
|
||||
.word 0x0
|
||||
#else
|
||||
.dword .print_exception_dump_sp0
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro JUMP_TARGET_SPx
|
||||
/* Takes up the space of 2 instructions */
|
||||
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
.word .print_exception_dump_spx
|
||||
.word 0x0
|
||||
#else
|
||||
.dword .print_exception_dump_spx
|
||||
#endif
|
||||
.endm
|
||||
|
||||
bsp_start_vector_table_begin:
|
||||
.balign 0x800
|
||||
Vector_table_el3:
|
||||
/*
|
||||
* The exception handler for synchronous exceptions from the current EL
|
||||
* using SP0.
|
||||
*/
|
||||
curr_el_sp0_sync:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_sp0_sync_get_pc /* Get current execution address */
|
||||
curr_el_sp0_sync_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SP0
|
||||
.balign 0x80
|
||||
/* The exception handler for IRQ exceptions from the current EL using SP0. */
|
||||
curr_el_sp0_irq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_sp0_irq_get_pc /* Get current execution address */
|
||||
curr_el_sp0_irq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SP0
|
||||
.balign 0x80
|
||||
/* The exception handler for FIQ exceptions from the current EL using SP0. */
|
||||
curr_el_sp0_fiq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_sp0_fiq_get_pc /* Get current execution address */
|
||||
curr_el_sp0_fiq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SP0
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for system error exceptions from the current EL using
|
||||
* SP0.
|
||||
*/
|
||||
curr_el_sp0_serror:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_sp0_serror_get_pc /* Get current execution address */
|
||||
curr_el_sp0_serror_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SP0
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for synchronous exceptions from the current EL using
|
||||
* the current SP.
|
||||
*/
|
||||
curr_el_spx_sync:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_spx_sync_get_pc /* Get current execution address */
|
||||
curr_el_spx_sync_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for IRQ exceptions from the current EL using the
|
||||
* current SP.
|
||||
*/
|
||||
curr_el_spx_irq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_spx_irq_get_pc /* Get current execution address */
|
||||
curr_el_spx_irq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for FIQ exceptions from the current EL using the
|
||||
* current SP.
|
||||
*/
|
||||
curr_el_spx_fiq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_spx_fiq_get_pc /* Get current execution address */
|
||||
curr_el_spx_fiq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for system error exceptions from the current EL using
|
||||
* the current SP.
|
||||
*/
|
||||
curr_el_spx_serror:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl curr_el_spx_serror_get_pc /* Get current execution address */
|
||||
curr_el_spx_serror_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for synchronous exceptions from a lower EL (AArch64).
|
||||
*/
|
||||
lower_el_aarch64_sync:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl lower_el_aarch64_sync_get_pc /* Get current execution address */
|
||||
lower_el_aarch64_sync_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/* The exception handler for IRQ exceptions from a lower EL (AArch64). */
|
||||
lower_el_aarch64_irq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl lower_el_aarch64_irq_get_pc /* Get current execution address */
|
||||
lower_el_aarch64_irq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/* The exception handler for FIQ exceptions from a lower EL (AArch64). */
|
||||
lower_el_aarch64_fiq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl lower_el_aarch64_fiq_get_pc /* Get current execution address */
|
||||
lower_el_aarch64_fiq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for system error exceptions from a lower EL(AArch64).
|
||||
*/
|
||||
lower_el_aarch64_serror:
|
||||
/* Push x0,lr on to the stack */
|
||||
stp x0, lr, [sp, #-16]!
|
||||
/* Get current execution address */
|
||||
bl lower_el_aarch64_serror_get_pc
|
||||
lower_el_aarch64_serror_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for the synchronous exception from a lower EL(AArch32).
|
||||
*/
|
||||
lower_el_aarch32_sync:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl lower_el_aarch32_sync_get_pc /* Get current execution address */
|
||||
lower_el_aarch32_sync_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/* The exception handler for the IRQ exception from a lower EL (AArch32). */
|
||||
lower_el_aarch32_irq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl lower_el_aarch32_irq_get_pc /* Get current execution address */
|
||||
lower_el_aarch32_irq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/* The exception handler for the FIQ exception from a lower EL (AArch32). */
|
||||
lower_el_aarch32_fiq:
|
||||
stp x0, lr, [sp, #-16]! /* Push x0,lr on to the stack */
|
||||
bl lower_el_aarch32_fiq_get_pc /* Get current execution address */
|
||||
lower_el_aarch32_fiq_get_pc: /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
.balign 0x80
|
||||
/*
|
||||
* The exception handler for the system error exception from a lower EL
|
||||
* (AArch32).
|
||||
*/
|
||||
lower_el_aarch32_serror:
|
||||
/* Push x0,lr on to the stack */
|
||||
stp x0, lr, [sp, #-16]!
|
||||
/* Get current execution address */
|
||||
bl lower_el_aarch32_serror_get_pc
|
||||
lower_el_aarch32_serror_get_pc : /* The current PC is now in LR */
|
||||
JUMP_HANDLER
|
||||
JUMP_TARGET_SPx
|
||||
|
||||
bsp_start_vector_table_end:
|
||||
|
||||
.set bsp_start_vector_table_size, bsp_start_vector_table_end - bsp_start_vector_table_begin
|
||||
.set bsp_vector_table_size, bsp_start_vector_table_size
|
||||
|
||||
/*
|
||||
* This involves switching a few things around. the real x0 and lr are on SPx
|
||||
* and need to be retrieved while the lr upon entry contains the pointer into
|
||||
* the AArch64 vector table
|
||||
*/
|
||||
.print_exception_dump_spx:
|
||||
/* Switch to exception stack (SP0) */
|
||||
msr spsel, #0
|
||||
/* Save space for exception context */
|
||||
sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
|
||||
/*
|
||||
* Push exception vector, LR currently points into the actual exception vector
|
||||
* table
|
||||
*/
|
||||
and lr, lr, #0x780
|
||||
lsr lr, lr, #7
|
||||
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
|
||||
/* Pop x0,lr from stack, saved by generic handler */
|
||||
/*
|
||||
* This modifies the stack pointer back to the pre-vector-handler value which is
|
||||
* safe because this will never return
|
||||
*/
|
||||
msr spsel, #1
|
||||
ldp x0, lr, [sp], #16
|
||||
msr spsel, #0
|
||||
/* Save LR */
|
||||
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
|
||||
/* Push the start of the context */
|
||||
bl .push_exception_context_start
|
||||
/* Save original sp in x0 for .push_exception_context_finish */
|
||||
msr spsel, #1
|
||||
mov x0, sp
|
||||
msr spsel, #0
|
||||
/* Push the remainder of the context */
|
||||
bl .push_exception_context_finish
|
||||
/* Save sp into x0 for handler */
|
||||
mov x0, sp
|
||||
/* Jump into the handler */
|
||||
bl _AArch64_Exception_default
|
||||
|
||||
/* Just in case */
|
||||
b twiddle
|
||||
|
||||
.print_exception_dump_sp0:
|
||||
/* Save space for exception context */
|
||||
sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
|
||||
/*
|
||||
* Push exception vector, LR currently points into the actual exception vector
|
||||
*/
|
||||
and lr, lr, #0x780
|
||||
lsr lr, lr, #7
|
||||
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
|
||||
/* Get x0,lr from stack, saved by generic handler */
|
||||
add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
|
||||
ldp x0, lr, [sp]
|
||||
sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
|
||||
/* Save LR */
|
||||
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
|
||||
/* Push the start of the context */
|
||||
bl .push_exception_context_start
|
||||
/* Save original sp in x0 for .push_exception_context_finish */
|
||||
add x0, sp, #(AARCH64_EXCEPTION_FRAME_SIZE + 16)
|
||||
/* Push the remainder of the context */
|
||||
bl .push_exception_context_finish
|
||||
/* Save sp (exception frame) into x0 for handler */
|
||||
mov x0, sp
|
||||
/* Jump into the handler */
|
||||
bl _AArch64_Exception_default
|
||||
|
||||
/* Just in case */
|
||||
twiddle:
|
||||
b twiddle
|
||||
|
||||
/* Assumes SP is at the base of the context and LR has already been pushed */
|
||||
.push_exception_context_start:
|
||||
/* Push x0-x29(fp) */
|
||||
stp x0, x1, [sp, #0x00]
|
||||
stp x2, x3, [sp, #0x10]
|
||||
stp x4, x5, [sp, #0x20]
|
||||
stp x6, x7, [sp, #0x30]
|
||||
stp x8, x9, [sp, #0x40]
|
||||
stp x10, x11, [sp, #0x50]
|
||||
stp x12, x13, [sp, #0x60]
|
||||
stp x14, x15, [sp, #0x70]
|
||||
stp x16, x17, [sp, #0x80]
|
||||
stp x18, x19, [sp, #0x90]
|
||||
stp x20, x21, [sp, #0xa0]
|
||||
stp x22, x23, [sp, #0xb0]
|
||||
stp x24, x25, [sp, #0xc0]
|
||||
stp x26, x27, [sp, #0xd0]
|
||||
stp x28, x29, [sp, #0xe0]
|
||||
ret
|
||||
|
||||
/* Expects original SP to be stored in x0 */
|
||||
.push_exception_context_finish:
|
||||
/* Get exception LR for PC */
|
||||
mrs x1, ELR_EL1
|
||||
/* Push sp and pc */
|
||||
stp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
|
||||
/* Get daif and spsr */
|
||||
mrs x0, DAIF
|
||||
mrs x1, SPSR_EL1
|
||||
/* Push daif and spsr */
|
||||
stp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
|
||||
/* Get ESR and FAR */
|
||||
mrs x0, ESR_EL1
|
||||
mrs x1, FAR_EL1
|
||||
/* Push FAR and ESR */
|
||||
stp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
|
||||
/* Get fpcr and fpsr */
|
||||
mrs x0, FPSR
|
||||
mrs x1, FPCR
|
||||
/* Push fpcr and fpsr */
|
||||
stp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
|
||||
/* Push VFP registers */
|
||||
stp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
|
||||
stp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
|
||||
stp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
|
||||
stp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
|
||||
stp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
|
||||
stp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
|
||||
stp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
|
||||
stp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
|
||||
stp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
|
||||
stp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
|
||||
stp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
|
||||
stp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
|
||||
stp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
|
||||
stp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
|
||||
stp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
|
||||
stp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
|
||||
/* Done, return to exception handler */
|
||||
ret
|
||||
|
||||
/*
|
||||
* Apply the exception frame to the current register status, SP points to the EF
|
||||
*/
|
||||
.pop_exception_context_and_ret:
|
||||
/* Pop daif and spsr */
|
||||
ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
|
||||
/* Restore daif and spsr */
|
||||
msr DAIF, x2
|
||||
msr SPSR_EL1, x3
|
||||
/* Pop FAR and ESR */
|
||||
ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
|
||||
/* Restore ESR and FAR */
|
||||
msr ESR_EL1, x2
|
||||
msr FAR_EL1, x3
|
||||
/* Pop fpcr and fpsr */
|
||||
ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
|
||||
/* Restore fpcr and fpsr */
|
||||
msr FPSR, x2
|
||||
msr FPCR, x3
|
||||
/* Restore LR */
|
||||
ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
|
||||
/* Pop VFP registers */
|
||||
ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
|
||||
ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
|
||||
ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
|
||||
ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
|
||||
ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
|
||||
ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
|
||||
ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
|
||||
ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
|
||||
ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
|
||||
ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
|
||||
ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
|
||||
ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
|
||||
ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
|
||||
ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
|
||||
ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
|
||||
ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
|
||||
/* Pop x0-x29(fp) */
|
||||
ldp x2, x3, [sp, #0x10]
|
||||
ldp x4, x5, [sp, #0x20]
|
||||
ldp x6, x7, [sp, #0x30]
|
||||
ldp x8, x9, [sp, #0x40]
|
||||
ldp x10, x11, [sp, #0x50]
|
||||
ldp x12, x13, [sp, #0x60]
|
||||
ldp x14, x15, [sp, #0x70]
|
||||
ldp x16, x17, [sp, #0x80]
|
||||
ldp x18, x19, [sp, #0x90]
|
||||
ldp x20, x21, [sp, #0xa0]
|
||||
ldp x22, x23, [sp, #0xb0]
|
||||
ldp x24, x25, [sp, #0xc0]
|
||||
ldp x26, x27, [sp, #0xd0]
|
||||
ldp x28, x29, [sp, #0xe0]
|
||||
/* Pop sp (ignored since sp should be shortly restored anyway) and ELR */
|
||||
ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
|
||||
/* Restore exception LR */
|
||||
msr ELR_EL1, x1
|
||||
ldp x0, x1, [sp, #0x00]
|
||||
add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
|
||||
|
||||
/* We must clear reservations to ensure consistency with atomic operations */
|
||||
clrex
|
||||
|
||||
ret
|
||||
50
cpukit/score/cpu/aarch64/aarch64-exception-default.c
Normal file
50
cpukit/score/cpu/aarch64/aarch64-exception-default.c
Normal file
@@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief Implementation of _AArch64_Exception_default
|
||||
*
|
||||
* This file implements _AArch64_Exception_default for use as part of the
|
||||
* default exception handling code which dumps all system registers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/score/cpu.h>
|
||||
#include <rtems/fatal.h>
|
||||
|
||||
void _AArch64_Exception_default( CPU_Exception_frame *frame )
|
||||
{
|
||||
rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
|
||||
}
|
||||
108
cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
Normal file
108
cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
Normal file
@@ -0,0 +1,108 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief Implementation of _CPU_Exception_frame_print.
|
||||
*
|
||||
* This file implements _CPU_Exception_frame_print for use in fatal output.
|
||||
* It dumps all standard integer and floating point registers as well as some
|
||||
* of the more important system registers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#include <rtems/score/cpu.h>
|
||||
#include <rtems/bspIo.h>
|
||||
|
||||
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame )
|
||||
{
|
||||
printk(
|
||||
"\n"
|
||||
"X0 = 0x%016" PRIx64 " X17 = 0x%016" PRIx64 "\n"
|
||||
"X1 = 0x%016" PRIx64 " X18 = 0x%016" PRIx64 "\n"
|
||||
"X2 = 0x%016" PRIx64 " X19 = 0x%016" PRIx64 "\n"
|
||||
"X3 = 0x%016" PRIx64 " X20 = 0x%016" PRIx64 "\n"
|
||||
"X4 = 0x%016" PRIx64 " X21 = 0x%016" PRIx64 "\n"
|
||||
"X5 = 0x%016" PRIx64 " X22 = 0x%016" PRIx64 "\n"
|
||||
"X6 = 0x%016" PRIx64 " X23 = 0x%016" PRIx64 "\n"
|
||||
"X7 = 0x%016" PRIx64 " X24 = 0x%016" PRIx64 "\n"
|
||||
"X8 = 0x%016" PRIx64 " X25 = 0x%016" PRIx64 "\n"
|
||||
"X9 = 0x%016" PRIx64 " X26 = 0x%016" PRIx64 "\n"
|
||||
"X10 = 0x%016" PRIx64 " X27 = 0x%016" PRIx64 "\n"
|
||||
"X11 = 0x%016" PRIx64 " X28 = 0x%016" PRIx64 "\n"
|
||||
"X12 = 0x%016" PRIx64 " FP = 0x%016" PRIx64 "\n"
|
||||
"X13 = 0x%016" PRIx64 " LR = 0x%016" PRIxPTR "\n"
|
||||
"X14 = 0x%016" PRIx64 " SP = 0x%016" PRIx64 "\n"
|
||||
"X15 = 0x%016" PRIx64 " PC = 0x%016" PRIxPTR "\n"
|
||||
"X16 = 0x%016" PRIx64 " DAIF = 0x%016" PRIx64 "\n"
|
||||
"VEC = 0x%016" PRIxPTR " CPSR = 0x%016" PRIx64 "\n"
|
||||
"ESR = 0x%016" PRIx64 " FAR = 0x%016" PRIx64 "\n",
|
||||
frame->register_x0, frame->register_x17,
|
||||
frame->register_x1, frame->register_x18,
|
||||
frame->register_x2, frame->register_x19,
|
||||
frame->register_x3, frame->register_x20,
|
||||
frame->register_x4, frame->register_x21,
|
||||
frame->register_x5, frame->register_x22,
|
||||
frame->register_x6, frame->register_x23,
|
||||
frame->register_x7, frame->register_x24,
|
||||
frame->register_x8, frame->register_x25,
|
||||
frame->register_x9, frame->register_x26,
|
||||
frame->register_x10, frame->register_x27,
|
||||
frame->register_x11, frame->register_x28,
|
||||
frame->register_x12, frame->register_fp,
|
||||
frame->register_x13, (intptr_t)frame->register_lr,
|
||||
frame->register_x14, frame->register_sp,
|
||||
frame->register_x15, (intptr_t)frame->register_pc,
|
||||
frame->register_x16, frame->register_daif,
|
||||
(intptr_t) frame->vector, frame->register_cpsr,
|
||||
frame->register_syndrome, frame->register_fault_address
|
||||
);
|
||||
|
||||
const uint128_t *qx = &frame->register_q0;
|
||||
int i;
|
||||
|
||||
printk(
|
||||
"FPCR = 0x%016" PRIx64 " FPSR = 0x%016" PRIx64 "\n",
|
||||
frame->register_fpcr, frame->register_fpsr
|
||||
);
|
||||
|
||||
for ( i = 0; i < 32; ++i ) {
|
||||
uint64_t low = (uint64_t) qx[i];
|
||||
uint64_t high = (uint64_t) (qx[i] >> 32);
|
||||
|
||||
printk( "Q%02i = 0x%016" PRIx64 "%016" PRIx64 "\n", i, high, low );
|
||||
}
|
||||
}
|
||||
322
cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
Normal file
322
cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
Normal file
@@ -0,0 +1,322 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief Implementation of AArch64 interrupt exception handling
|
||||
*
|
||||
* This file implements the SP0 and SPx interrupt exception handlers to
|
||||
* deal with nested and non-nested interrupts.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/asm.h>
|
||||
|
||||
.globl _AArch64_Exception_interrupt_no_nest
|
||||
.globl _AArch64_Exception_interrupt_nest
|
||||
|
||||
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
#define SELF_CPU_CONTROL_GET_REG w19
|
||||
#else
|
||||
#define SELF_CPU_CONTROL_GET_REG x19
|
||||
#endif
|
||||
#define SELF_CPU_CONTROL x19
|
||||
#define NON_VOLATILE_SCRATCH x20
|
||||
|
||||
/* It's understood that CPU state is saved prior to and restored after this */
|
||||
/*
|
||||
* NOTE: This function does not follow the AArch64 procedure call specification
|
||||
* because all relevant state is known to be saved in the interrupt context,
|
||||
* hence the blind usage of x19, x20, and x21
|
||||
*/
|
||||
.AArch64_Interrupt_Handler:
|
||||
/* Get per-CPU control of current processor */
|
||||
GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
|
||||
|
||||
/* Increment interrupt nest and thread dispatch disable level */
|
||||
ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
|
||||
ldr w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
|
||||
add w2, w2, #1
|
||||
add w3, w3, #1
|
||||
str w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
|
||||
str w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
|
||||
|
||||
/* Save LR */
|
||||
mov x21, LR
|
||||
|
||||
/* Call BSP dependent interrupt dispatcher */
|
||||
bl bsp_interrupt_dispatch
|
||||
|
||||
/* Restore LR */
|
||||
mov LR, x21
|
||||
|
||||
/* Load some per-CPU variables */
|
||||
ldr w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
|
||||
ldrb w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
|
||||
ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
|
||||
ldr w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
|
||||
|
||||
/* Decrement levels and determine thread dispatch state */
|
||||
eor w1, w1, w0
|
||||
sub w0, w0, #1
|
||||
orr w1, w1, w0
|
||||
orr w1, w1, w2
|
||||
sub w3, w3, #1
|
||||
|
||||
/* Store thread dispatch disable and ISR nest levels */
|
||||
str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
|
||||
str w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
|
||||
|
||||
/* Return should_skip_thread_dispatch in x0 */
|
||||
mov x0, x1
|
||||
/* Return from handler */
|
||||
ret
|
||||
|
||||
/* NOTE: This function does not follow the AArch64 procedure call specification
|
||||
* because all relevant state is known to be saved in the interrupt context,
|
||||
* hence the blind usage of x19, x20, and x21 */
|
||||
.AArch64_Perform_Thread_Dispatch:
|
||||
/* Get per-CPU control of current processor */
|
||||
GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
|
||||
|
||||
/* Thread dispatch */
|
||||
mrs NON_VOLATILE_SCRATCH, DAIF
|
||||
|
||||
.Ldo_thread_dispatch:
|
||||
|
||||
/* Set ISR dispatch disable and thread dispatch disable level to one */
|
||||
mov w0, #1
|
||||
str w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
|
||||
str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
|
||||
|
||||
/* Save LR */
|
||||
mov x21, LR
|
||||
|
||||
/* Call _Thread_Do_dispatch(), this function will enable interrupts */
|
||||
mov x0, SELF_CPU_CONTROL
|
||||
mov x1, NON_VOLATILE_SCRATCH
|
||||
mov x2, #0x80
|
||||
bic x1, x1, x2
|
||||
bl _Thread_Do_dispatch
|
||||
|
||||
/* Restore LR */
|
||||
mov LR, x21
|
||||
|
||||
/* Disable interrupts */
|
||||
msr DAIF, NON_VOLATILE_SCRATCH
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
|
||||
#endif
|
||||
|
||||
/* Check if we have to do the thread dispatch again */
|
||||
ldrb w0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
|
||||
cmp w0, #0
|
||||
bne .Ldo_thread_dispatch
|
||||
|
||||
/* We are done with thread dispatching */
|
||||
mov w0, #0
|
||||
str w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
|
||||
|
||||
/* Return from thread dispatch */
|
||||
ret
|
||||
|
||||
/*
|
||||
* Must save corruptible registers and non-corruptible registers expected to be
|
||||
* used, x0 and lr expected to be already saved on the stack
|
||||
*/
|
||||
.macro push_interrupt_context
|
||||
/*
|
||||
* Push x1-x21 on to the stack, need 19-21 because they're modified without
|
||||
* obeying PCS
|
||||
*/
|
||||
stp lr, x1, [sp, #-16]!
|
||||
stp x2, x3, [sp, #-16]!
|
||||
stp x4, x5, [sp, #-16]!
|
||||
stp x6, x7, [sp, #-16]!
|
||||
stp x8, x9, [sp, #-16]!
|
||||
stp x10, x11, [sp, #-16]!
|
||||
stp x12, x13, [sp, #-16]!
|
||||
stp x14, x15, [sp, #-16]!
|
||||
stp x16, x17, [sp, #-16]!
|
||||
stp x18, x19, [sp, #-16]!
|
||||
stp x20, x21, [sp, #-16]!
|
||||
/*
|
||||
* Push q0-q31 on to the stack, need everything because parts of every register
|
||||
* are volatile/corruptible
|
||||
*/
|
||||
stp q0, q1, [sp, #-32]!
|
||||
stp q2, q3, [sp, #-32]!
|
||||
stp q4, q5, [sp, #-32]!
|
||||
stp q6, q7, [sp, #-32]!
|
||||
stp q8, q9, [sp, #-32]!
|
||||
stp q10, q11, [sp, #-32]!
|
||||
stp q12, q13, [sp, #-32]!
|
||||
stp q14, q15, [sp, #-32]!
|
||||
stp q16, q17, [sp, #-32]!
|
||||
stp q18, q19, [sp, #-32]!
|
||||
stp q20, q21, [sp, #-32]!
|
||||
stp q22, q23, [sp, #-32]!
|
||||
stp q24, q25, [sp, #-32]!
|
||||
stp q26, q27, [sp, #-32]!
|
||||
stp q28, q29, [sp, #-32]!
|
||||
stp q30, q31, [sp, #-32]!
|
||||
/* Get exception LR for PC and spsr */
|
||||
mrs x0, ELR_EL1
|
||||
mrs x1, SPSR_EL1
|
||||
/* Push pc and spsr */
|
||||
stp x0, x1, [sp, #-16]!
|
||||
/* Get fpcr and fpsr */
|
||||
mrs x0, FPSR
|
||||
mrs x1, FPCR
|
||||
/* Push fpcr and fpsr */
|
||||
stp x0, x1, [sp, #-16]!
|
||||
.endm
|
||||
|
||||
/* Must match inverse order of .push_interrupt_context */
|
||||
.macro pop_interrupt_context
|
||||
/* Pop fpcr and fpsr */
|
||||
ldp x0, x1, [sp], #16
|
||||
/* Restore fpcr and fpsr */
|
||||
msr FPCR, x1
|
||||
msr FPSR, x0
|
||||
/* Pop pc and spsr */
|
||||
ldp x0, x1, [sp], #16
|
||||
/* Restore exception LR for PC and spsr */
|
||||
msr SPSR_EL1, x1
|
||||
msr ELR_EL1, x0
|
||||
/* Pop q0-q31 */
|
||||
ldp q30, q31, [sp], #32
|
||||
ldp q28, q29, [sp], #32
|
||||
ldp q26, q27, [sp], #32
|
||||
ldp q24, q25, [sp], #32
|
||||
ldp q22, q23, [sp], #32
|
||||
ldp q20, q21, [sp], #32
|
||||
ldp q18, q19, [sp], #32
|
||||
ldp q16, q17, [sp], #32
|
||||
ldp q14, q15, [sp], #32
|
||||
ldp q12, q13, [sp], #32
|
||||
ldp q10, q11, [sp], #32
|
||||
ldp q8, q9, [sp], #32
|
||||
ldp q6, q7, [sp], #32
|
||||
ldp q4, q5, [sp], #32
|
||||
ldp q2, q3, [sp], #32
|
||||
ldp q0, q1, [sp], #32
|
||||
/* Pop x1-x21 */
|
||||
ldp x20, x21, [sp], #16
|
||||
ldp x18, x19, [sp], #16
|
||||
ldp x16, x17, [sp], #16
|
||||
ldp x14, x15, [sp], #16
|
||||
ldp x12, x13, [sp], #16
|
||||
ldp x10, x11, [sp], #16
|
||||
ldp x8, x9, [sp], #16
|
||||
ldp x6, x7, [sp], #16
|
||||
ldp x4, x5, [sp], #16
|
||||
ldp x2, x3, [sp], #16
|
||||
ldp lr, x1, [sp], #16
|
||||
/* Must clear reservations here to ensure consistency with atomic operations */
|
||||
clrex
|
||||
.endm
|
||||
|
||||
_AArch64_Exception_interrupt_nest:
|
||||
|
||||
/* Execution template:
|
||||
Save volatile regs on interrupt stack
|
||||
Execute irq handler
|
||||
Restore volatile regs from interrupt stack
|
||||
Exception return
|
||||
*/
|
||||
|
||||
/* Push interrupt context */
|
||||
push_interrupt_context
|
||||
|
||||
/* Jump into the handler, ignore return value */
|
||||
bl .AArch64_Interrupt_Handler
|
||||
|
||||
/*
|
||||
* SP should be where it was pre-handler (pointing at the exception frame)
|
||||
* or something has leaked stack space
|
||||
*/
|
||||
/* Pop interrupt context */
|
||||
pop_interrupt_context
|
||||
/* Return to vector for final cleanup */
|
||||
ret
|
||||
|
||||
_AArch64_Exception_interrupt_no_nest:
|
||||
/* Execution template:
|
||||
Save volatile registers on thread stack(some x, all q, ELR, etc.)
|
||||
Switch to interrupt stack
|
||||
Execute interrupt handler
|
||||
Switch to thread stack
|
||||
Call thread dispatch
|
||||
Restore volatile registers from thread stack
|
||||
Return to dispatch
|
||||
*/
|
||||
|
||||
|
||||
/* Push interrupt context */
|
||||
push_interrupt_context
|
||||
|
||||
/*
|
||||
* Switch to interrupt stack, interrupt dispatch may enable interrupts causing
|
||||
* nesting
|
||||
*/
|
||||
msr spsel, #0
|
||||
|
||||
/* Jump into the handler */
|
||||
bl .AArch64_Interrupt_Handler
|
||||
|
||||
/*
|
||||
* Switch back to thread stack, interrupt dispatch should disable interrupts
|
||||
* before returning
|
||||
*/
|
||||
msr spsel, #1
|
||||
|
||||
/*
|
||||
* Check thread dispatch necessary, ISR dispatch disable and thread dispatch
|
||||
* disable level.
|
||||
*/
|
||||
cmp x0, #0
|
||||
bne .Lno_need_thread_dispatch
|
||||
bl .AArch64_Perform_Thread_Dispatch
|
||||
|
||||
.Lno_need_thread_dispatch:
|
||||
/*
|
||||
* SP should be where it was pre-handler (pointing at the exception frame)
|
||||
* or something has leaked stack space
|
||||
*/
|
||||
/* Pop interrupt context */
|
||||
pop_interrupt_context
|
||||
/* Return to vector for final cleanup */
|
||||
ret
|
||||
46
cpukit/score/cpu/aarch64/aarch64-thread-idle.c
Normal file
46
cpukit/score/cpu/aarch64/aarch64-thread-idle.c
Normal file
@@ -0,0 +1,46 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @brief CPU Thread Idle Body
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/score/cpu.h>
|
||||
|
||||
void *_CPU_Thread_Idle_body( uintptr_t ignored )
|
||||
{
|
||||
while ( true ) {
|
||||
__asm__ volatile ("wfi");
|
||||
}
|
||||
}
|
||||
197
cpukit/score/cpu/aarch64/cpu.c
Normal file
197
cpukit/score/cpu/aarch64/cpu.c
Normal file
@@ -0,0 +1,197 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief AArch64 architecture support implementation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/score/assert.h>
|
||||
#include <rtems/score/cpu.h>
|
||||
#include <rtems/score/thread.h>
|
||||
#include <rtems/score/tls.h>
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( Context_Control, register_d8 )
|
||||
== AARCH64_CONTEXT_CONTROL_D8_OFFSET,
|
||||
AARCH64_CONTEXT_CONTROL_D8_OFFSET
|
||||
);
|
||||
#endif
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( Context_Control, thread_id )
|
||||
== AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET,
|
||||
AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( Context_Control, isr_dispatch_disable )
|
||||
== AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE,
|
||||
AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE
|
||||
);
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( Context_Control, is_executing )
|
||||
== AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET,
|
||||
AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
|
||||
);
|
||||
#endif
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
sizeof( CPU_Exception_frame ) == AARCH64_EXCEPTION_FRAME_SIZE,
|
||||
AARCH64_EXCEPTION_FRAME_SIZE
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
sizeof( CPU_Exception_frame ) % CPU_STACK_ALIGNMENT == 0,
|
||||
CPU_Exception_frame_alignment
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, register_sp )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, register_lr )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, register_daif )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, register_syndrome )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, vector )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, register_fpsr )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET
|
||||
);
|
||||
|
||||
RTEMS_STATIC_ASSERT(
|
||||
offsetof( CPU_Exception_frame, register_q0 )
|
||||
== AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET,
|
||||
AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET
|
||||
);
|
||||
|
||||
|
||||
void _CPU_Context_Initialize(
|
||||
Context_Control *the_context,
|
||||
void *stack_area_begin,
|
||||
size_t stack_area_size,
|
||||
uint64_t new_level,
|
||||
void (*entry_point)( void ),
|
||||
bool is_fp,
|
||||
void *tls_area
|
||||
)
|
||||
{
|
||||
(void) new_level;
|
||||
|
||||
the_context->register_sp = (uintptr_t) stack_area_begin + stack_area_size;
|
||||
the_context->register_lr = (uintptr_t) entry_point;
|
||||
the_context->isr_dispatch_disable = 0;
|
||||
|
||||
the_context->thread_id = (uintptr_t) tls_area;
|
||||
|
||||
if ( tls_area != NULL ) {
|
||||
_TLS_TCB_at_area_begin_initialize( tls_area );
|
||||
}
|
||||
}
|
||||
|
||||
void _CPU_ISR_Set_level( uint64_t level )
|
||||
{
|
||||
/* Set the mask bit if interrupts are disabled */
|
||||
level = level ? AARCH64_PSTATE_I : 0;
|
||||
__asm__ volatile (
|
||||
"msr DAIF, %[level]\n"
|
||||
: : [level] "r" (level)
|
||||
);
|
||||
}
|
||||
|
||||
uint64_t _CPU_ISR_Get_level( void )
|
||||
{
|
||||
uint64_t level;
|
||||
|
||||
__asm__ volatile (
|
||||
"mrs %[level], DAIF\n"
|
||||
: [level] "=&r" (level)
|
||||
);
|
||||
|
||||
return level & AARCH64_PSTATE_I;
|
||||
}
|
||||
|
||||
void _CPU_ISR_install_vector(
|
||||
uint32_t vector,
|
||||
CPU_ISR_handler new_handler,
|
||||
CPU_ISR_handler *old_handler
|
||||
)
|
||||
{
|
||||
/* Redirection table starts at the end of the vector table */
|
||||
CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4);
|
||||
|
||||
CPU_ISR_handler current_handler = table [vector];
|
||||
|
||||
/* The current handler is now the old one */
|
||||
if (old_handler != NULL) {
|
||||
*old_handler = current_handler;
|
||||
}
|
||||
|
||||
/* Write only if necessary to avoid writes to a maybe read-only memory */
|
||||
if (current_handler != new_handler) {
|
||||
table [vector] = new_handler;
|
||||
}
|
||||
}
|
||||
|
||||
void _CPU_Initialize( void )
|
||||
{
|
||||
/* Do nothing */
|
||||
}
|
||||
134
cpukit/score/cpu/aarch64/cpu_asm.S
Normal file
134
cpukit/score/cpu/aarch64/cpu_asm.S
Normal file
@@ -0,0 +1,134 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief AArch64 architecture context switch implementation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <rtems/asm.h>
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* void _CPU_Context_switch( run_context, heir_context )
|
||||
* void _CPU_Context_restore( run_context, heir_context )
|
||||
*
|
||||
* This routine performs a normal non-FP context.
|
||||
*
|
||||
* X0 = run_context X1 = heir_context
|
||||
*
|
||||
* This function copies the current registers to where x0 points, then
|
||||
* restores the ones from where x1 points.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
#define reg_2 w2
|
||||
#else
|
||||
#define reg_2 x2
|
||||
#endif
|
||||
|
||||
DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
|
||||
/* Start saving context */
|
||||
GET_SELF_CPU_CONTROL reg_2
|
||||
ldr x3, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
|
||||
|
||||
stp x19, x20, [x0]
|
||||
stp x21, x22, [x0, #0x10]
|
||||
stp x23, x24, [x0, #0x20]
|
||||
stp x25, x26, [x0, #0x30]
|
||||
stp x27, x28, [x0, #0x40]
|
||||
stp fp, lr, [x0, #0x50]
|
||||
mov x4, sp
|
||||
str x4, [x0, #0x60]
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
add x5, x0, #AARCH64_CONTEXT_CONTROL_D8_OFFSET
|
||||
stp d8, d9, [x5]
|
||||
stp d10, d11, [x5, #16]
|
||||
stp d12, d13, [x5, #32]
|
||||
stp d14, d15, [x5, #48]
|
||||
#endif
|
||||
|
||||
str x3, [x0, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
#error SMP not yet supported
|
||||
#endif
|
||||
|
||||
/* Start restoring context */
|
||||
.L_restore:
|
||||
#if !defined(RTEMS_SMP) && defined(AARCH64_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE)
|
||||
clrex
|
||||
#endif
|
||||
|
||||
ldr x3, [x1, #AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET]
|
||||
|
||||
ldr x4, [x1, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
add x5, x1, #AARCH64_CONTEXT_CONTROL_D8_OFFSET
|
||||
ldp d8, d9, [x5]
|
||||
ldp d10, d11, [x5, #16]
|
||||
ldp d12, d13, [x5, #32]
|
||||
ldp d14, d15, [x5, #48]
|
||||
#endif
|
||||
|
||||
msr TPIDR_EL0, x3
|
||||
|
||||
str x4, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
|
||||
|
||||
ldp x19, x20, [x1]
|
||||
ldp x21, x22, [x1, #0x10]
|
||||
ldp x23, x24, [x1, #0x20]
|
||||
ldp x25, x26, [x1, #0x30]
|
||||
ldp x27, x28, [x1, #0x40]
|
||||
ldp fp, lr, [x1, #0x50]
|
||||
ldr x4, [x1, #0x60]
|
||||
mov sp, x4
|
||||
ret
|
||||
|
||||
/*
|
||||
* void _CPU_Context_restore( new_context )
|
||||
*
|
||||
* This function restores the registers from where x0 points.
|
||||
* It must match _CPU_Context_switch()
|
||||
*
|
||||
*/
|
||||
DEFINE_FUNCTION_AARCH64(_CPU_Context_restore)
|
||||
mov x1, x0
|
||||
GET_SELF_CPU_CONTROL reg_2
|
||||
b .L_restore
|
||||
100
cpukit/score/cpu/aarch64/include/libcpu/vectors.h
Normal file
100
cpukit/score/cpu/aarch64/include/libcpu/vectors.h
Normal file
@@ -0,0 +1,100 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief ARM AArch64 Exception API.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef LIBCPU_AARCH64_VECTORS_H
|
||||
#define LIBCPU_AARCH64_VECTORS_H
|
||||
|
||||
#ifndef ASM
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
/* VBAR, Vector Base Address Register, Security Extensions */
|
||||
|
||||
static inline void
|
||||
*AArch64_get_vector_base_address(void)
|
||||
{
|
||||
void *base;
|
||||
|
||||
__asm__ volatile (
|
||||
"mrs %[base], VBAR_EL1\n"
|
||||
: [base] "=&r" (base)
|
||||
);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
static inline void
|
||||
AArch64_set_vector_base_address(void *base)
|
||||
{
|
||||
__asm__ volatile (
|
||||
"msr VBAR_EL1, %[base]\n"
|
||||
: : [base] "r" (base)
|
||||
);
|
||||
}
|
||||
|
||||
static inline void
|
||||
*AArch64_get_hyp_vector_base_address(void)
|
||||
{
|
||||
void *base;
|
||||
|
||||
__asm__ volatile (
|
||||
"mrs %[base], VBAR_EL2\n"
|
||||
: [base] "=&r" (base)
|
||||
);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
static inline void
|
||||
AArch64_set_hyp_vector_base_address(void *base)
|
||||
{
|
||||
__asm__ volatile (
|
||||
"msr VBAR_EL2, %[base]\n"
|
||||
: : [base] "r" (base)
|
||||
);
|
||||
}
|
||||
|
||||
/** @} */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* ASM */
|
||||
|
||||
#endif /* LIBCPU_AARCH64_VECTORS_H */
|
||||
89
cpukit/score/cpu/aarch64/include/rtems/asm.h
Normal file
89
cpukit/score/cpu/aarch64/include/rtems/asm.h
Normal file
@@ -0,0 +1,89 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @brief AArch64 Assembler Support API
|
||||
*
|
||||
* This include file attempts to address the problems
|
||||
* caused by incompatible flavors of assemblers and
|
||||
* toolsets. It primarily addresses variations in the
|
||||
* use of leading underscores on symbols and the requirement
|
||||
* that register names be preceded by a %.
|
||||
*
|
||||
*
|
||||
* NOTE: The spacing in the use of these macros
|
||||
* is critical to them working as advertised.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RTEMS_ASM_H
|
||||
#define _RTEMS_ASM_H
|
||||
|
||||
/*
|
||||
* Indicate we are in an assembly file and get the basic CPU definitions.
|
||||
*/
|
||||
|
||||
#ifndef ASM
|
||||
#define ASM
|
||||
#endif
|
||||
#include <rtems/score/percpu.h>
|
||||
|
||||
/**
|
||||
* @defgroup RTEMSScoreCPUAArch64ASM AArch64 Assembler Support
|
||||
*
|
||||
* @ingroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @brief AArch64 Assembler Support
|
||||
*/
|
||||
/**@{**/
|
||||
|
||||
/*
|
||||
* Following must be tailor for a particular flavor of the C compiler.
|
||||
* They may need to put underscores in front of the symbols.
|
||||
*/
|
||||
|
||||
#define FUNCTION_ENTRY(name) \
|
||||
.align 8; \
|
||||
.globl name; \
|
||||
.type name, %function; \
|
||||
name:
|
||||
|
||||
#define FUNCTION_END(name) \
|
||||
.size name, . - name
|
||||
|
||||
#define DEFINE_FUNCTION_AARCH64(name) \
|
||||
.align 8 ; .globl name ; name: ; .globl name ## _aarch64 ; name ## _aarch64:
|
||||
|
||||
.macro GET_SELF_CPU_CONTROL REG
|
||||
ldr \REG, =_Per_CPU_Information
|
||||
.endm
|
||||
|
||||
/** @} */
|
||||
|
||||
#endif /* _RTEMS_ASM_H */
|
||||
83
cpukit/score/cpu/aarch64/include/rtems/score/aarch64.h
Normal file
83
cpukit/score/cpu/aarch64/include/rtems/score/aarch64.h
Normal file
@@ -0,0 +1,83 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPU
|
||||
*
|
||||
* @brief ARM AArch64 Assembler Support API
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RTEMS_SCORE_AARCH64_H
|
||||
#define _RTEMS_SCORE_AARCH64_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @addtogroup RTEMSScoreCPUAArch64
|
||||
*/
|
||||
/**@{**/
|
||||
|
||||
#if defined(__LP64__)
|
||||
#define CPU_MODEL_NAME "AArch64-LP64"
|
||||
#define AARCH64_MULTILIB_ARCH_V8
|
||||
#elif defined(__ILP32__)
|
||||
#define CPU_MODEL_NAME "AArch64-ILP32"
|
||||
#define AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
#endif
|
||||
|
||||
#define AARCH64_MULTILIB_HAS_WFI
|
||||
#define AARCH64_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
|
||||
#define AARCH64_MULTILIB_HAS_BARRIER_INSTRUCTIONS
|
||||
#define AARCH64_MULTILIB_HAS_CPACR
|
||||
|
||||
#define AARCH64_MULTILIB_CACHE_LINE_MAX_64
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
#define AARCH64_MULTILIB_VFP
|
||||
#else
|
||||
#error "FPU support not implemented"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Define the name of the CPU family.
|
||||
*/
|
||||
|
||||
#define CPU_NAME "AArch64"
|
||||
|
||||
/** @} */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _RTEMS_SCORE_AARCH64_H */
|
||||
554
cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
Normal file
554
cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
Normal file
@@ -0,0 +1,554 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPU
|
||||
*
|
||||
* @brief AArch64 Architecture Support API
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RTEMS_SCORE_CPU_H
|
||||
#define _RTEMS_SCORE_CPU_H
|
||||
|
||||
#include <rtems/score/basedefs.h>
|
||||
#if defined(RTEMS_PARAVIRT)
|
||||
#include <rtems/score/paravirt.h>
|
||||
#endif
|
||||
#include <rtems/score/aarch64.h>
|
||||
#include <libcpu/vectors.h>
|
||||
|
||||
/**
|
||||
* @addtogroup RTEMSScoreCPUAArch64
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @name Program State Registers
|
||||
*/
|
||||
/**@{**/
|
||||
|
||||
#define AARCH64_PSTATE_N (1LL << 31)
|
||||
#define AARCH64_PSTATE_Z (1LL << 30)
|
||||
#define AARCH64_PSTATE_C (1LL << 29)
|
||||
#define AARCH64_PSTATE_V (1LL << 28)
|
||||
#define AARCH64_PSTATE_D (1LL << 9)
|
||||
#define AARCH64_PSTATE_A (1LL << 8)
|
||||
#define AARCH64_PSTATE_I (1LL << 7)
|
||||
#define AARCH64_PSTATE_F (1LL << 6)
|
||||
|
||||
/** @} */
|
||||
|
||||
/*
|
||||
* AArch64 uses the PIC interrupt model.
|
||||
*/
|
||||
#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
|
||||
|
||||
#define CPU_ISR_PASSES_FRAME_POINTER FALSE
|
||||
|
||||
#define CPU_HARDWARE_FP FALSE
|
||||
|
||||
#define CPU_SOFTWARE_FP FALSE
|
||||
|
||||
#define CPU_ALL_TASKS_ARE_FP FALSE
|
||||
|
||||
#define CPU_IDLE_TASK_IS_FP FALSE
|
||||
|
||||
#define CPU_USE_DEFERRED_FP_SWITCH FALSE
|
||||
|
||||
#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
|
||||
|
||||
#define CPU_STACK_GROWS_UP FALSE
|
||||
|
||||
#if defined(AARCH64_MULTILIB_CACHE_LINE_MAX_64)
|
||||
#define CPU_CACHE_LINE_BYTES 64
|
||||
#else
|
||||
#define CPU_CACHE_LINE_BYTES 32
|
||||
#endif
|
||||
|
||||
#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
|
||||
|
||||
#define CPU_MODES_INTERRUPT_MASK 0x1
|
||||
|
||||
#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
|
||||
|
||||
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
|
||||
|
||||
#define CPU_STACK_MINIMUM_SIZE (1024 * 10)
|
||||
|
||||
/* This could be either 4 or 8, depending on the ABI in use.
|
||||
* Could also use __LP64__ or __ILP32__ */
|
||||
/* AAPCS64, section 5.1, Fundamental Data Types */
|
||||
#define CPU_SIZEOF_POINTER __SIZEOF_POINTER__
|
||||
|
||||
/* AAPCS64, section 5.1, Fundamental Data Types */
|
||||
#define CPU_ALIGNMENT 16
|
||||
|
||||
#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
|
||||
|
||||
/* AAPCS64, section 6.2.2, Stack constraints at a public interface */
|
||||
#define CPU_STACK_ALIGNMENT 16
|
||||
|
||||
#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
|
||||
|
||||
#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
|
||||
|
||||
#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE
|
||||
|
||||
#define CPU_MAXIMUM_PROCESSORS 32
|
||||
|
||||
#define AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET 112
|
||||
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
#define AARCH64_CONTEXT_CONTROL_D8_OFFSET 120
|
||||
#endif
|
||||
|
||||
#define AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 104
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
#if defined(AARCH64_MULTILIB_VFP)
|
||||
#define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
|
||||
#else
|
||||
#define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define AARCH64_EXCEPTION_FRAME_SIZE 848
|
||||
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET 248
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET 240
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET 264
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET 280
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET 296
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET 312
|
||||
#define AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET 336
|
||||
|
||||
#ifndef ASM
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef unsigned __int128 uint128_t;
|
||||
|
||||
typedef struct {
|
||||
uint64_t register_x19;
|
||||
uint64_t register_x20;
|
||||
uint64_t register_x21;
|
||||
uint64_t register_x22;
|
||||
uint64_t register_x23;
|
||||
uint64_t register_x24;
|
||||
uint64_t register_x25;
|
||||
uint64_t register_x26;
|
||||
uint64_t register_x27;
|
||||
uint64_t register_x28;
|
||||
uint64_t register_fp;
|
||||
uint64_t register_lr;
|
||||
uint64_t register_sp;
|
||||
uint64_t isr_dispatch_disable;
|
||||
uint64_t thread_id;
|
||||
#ifdef AARCH64_MULTILIB_VFP
|
||||
uint64_t register_d8;
|
||||
uint64_t register_d9;
|
||||
uint64_t register_d10;
|
||||
uint64_t register_d11;
|
||||
uint64_t register_d12;
|
||||
uint64_t register_d13;
|
||||
uint64_t register_d14;
|
||||
uint64_t register_d15;
|
||||
#endif
|
||||
#ifdef RTEMS_SMP
|
||||
volatile bool is_executing;
|
||||
#endif
|
||||
} Context_Control;
|
||||
|
||||
static inline void _AARCH64_Data_memory_barrier( void )
|
||||
{
|
||||
__asm__ volatile ( "dmb LD" : : : "memory" );
|
||||
}
|
||||
|
||||
static inline void _AARCH64_Data_synchronization_barrier( void )
|
||||
{
|
||||
__asm__ volatile ( "dsb LD" : : : "memory" );
|
||||
}
|
||||
|
||||
static inline void _AARCH64_Instruction_synchronization_barrier( void )
|
||||
{
|
||||
__asm__ volatile ( "isb" : : : "memory" );
|
||||
}
|
||||
|
||||
void _CPU_ISR_Set_level( uint64_t level );
|
||||
|
||||
uint64_t _CPU_ISR_Get_level( void );
|
||||
|
||||
#if defined(AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE)
|
||||
uint64_t AArch64_interrupt_disable( void );
|
||||
void AArch64_interrupt_enable( uint64_t level );
|
||||
void AArch64_interrupt_flash( uint64_t level );
|
||||
#else
|
||||
static inline uint64_t AArch64_interrupt_disable( void )
|
||||
{
|
||||
uint64_t level = _CPU_ISR_Get_level();
|
||||
__asm__ volatile (
|
||||
"msr DAIFSet, #0x2\n"
|
||||
);
|
||||
return level;
|
||||
}
|
||||
|
||||
static inline void AArch64_interrupt_enable( uint64_t level )
|
||||
{
|
||||
__asm__ volatile (
|
||||
"msr DAIF, %[level]\n"
|
||||
: : [level] "r" (level)
|
||||
);
|
||||
}
|
||||
|
||||
static inline void AArch64_interrupt_flash( uint64_t level )
|
||||
{
|
||||
AArch64_interrupt_enable(level);
|
||||
AArch64_interrupt_disable();
|
||||
}
|
||||
#endif /* !AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE */
|
||||
|
||||
#define _CPU_ISR_Disable( _isr_cookie ) \
|
||||
do { \
|
||||
_isr_cookie = AArch64_interrupt_disable(); \
|
||||
} while (0)
|
||||
|
||||
#define _CPU_ISR_Enable( _isr_cookie ) \
|
||||
AArch64_interrupt_enable( _isr_cookie )
|
||||
|
||||
#define _CPU_ISR_Flash( _isr_cookie ) \
|
||||
AArch64_interrupt_flash( _isr_cookie )
|
||||
|
||||
RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t level )
|
||||
{
|
||||
return ( level & AARCH64_PSTATE_I ) == 0;
|
||||
}
|
||||
|
||||
void _CPU_Context_Initialize(
|
||||
Context_Control *the_context,
|
||||
void *stack_area_begin,
|
||||
size_t stack_area_size,
|
||||
uint64_t new_level,
|
||||
void (*entry_point)( void ),
|
||||
bool is_fp,
|
||||
void *tls_area
|
||||
);
|
||||
|
||||
#define _CPU_Context_Get_SP( _context ) \
|
||||
(_context)->register_sp
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
static inline bool _CPU_Context_Get_is_executing(
|
||||
const Context_Control *context
|
||||
)
|
||||
{
|
||||
return context->is_executing;
|
||||
}
|
||||
|
||||
static inline void _CPU_Context_Set_is_executing(
|
||||
Context_Control *context,
|
||||
bool is_executing
|
||||
)
|
||||
{
|
||||
context->is_executing = is_executing;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define _CPU_Context_Restart_self( _the_context ) \
|
||||
_CPU_Context_restore( (_the_context) );
|
||||
|
||||
#define _CPU_Context_Initialize_fp( _destination ) \
|
||||
do { \
|
||||
*(*(_destination)) = _CPU_Null_fp_context; \
|
||||
} while (0)
|
||||
|
||||
#define _CPU_Fatal_halt( _source, _err ) \
|
||||
do { \
|
||||
uint64_t _level; \
|
||||
uint32_t _error = _err; \
|
||||
_CPU_ISR_Disable( _level ); \
|
||||
(void) _level; \
|
||||
__asm__ volatile ("mov x0, %0\n" \
|
||||
: "=r" (_error) \
|
||||
: "0" (_error) \
|
||||
: "x0" ); \
|
||||
while (1); \
|
||||
} while (0);
|
||||
|
||||
/**
|
||||
* @brief CPU initialization.
|
||||
*/
|
||||
void _CPU_Initialize( void );
|
||||
|
||||
typedef void ( *CPU_ISR_handler )( void );
|
||||
|
||||
void _CPU_ISR_install_vector(
|
||||
uint32_t vector,
|
||||
CPU_ISR_handler new_handler,
|
||||
CPU_ISR_handler *old_handler
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief CPU switch context.
|
||||
*/
|
||||
void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
|
||||
|
||||
void _CPU_Context_restore( Context_Control *new_context )
|
||||
RTEMS_NO_RETURN;
|
||||
|
||||
#ifdef RTEMS_SMP
|
||||
uint32_t _CPU_SMP_Initialize( void );
|
||||
|
||||
bool _CPU_SMP_Start_processor( uint32_t cpu_index );
|
||||
|
||||
void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
|
||||
|
||||
void _CPU_SMP_Prepare_start_multitasking( void );
|
||||
|
||||
static inline uint32_t _CPU_SMP_Get_current_processor( void )
|
||||
{
|
||||
uint32_t mpidr;
|
||||
|
||||
__asm__ volatile (
|
||||
"mrs %[mpidr], mpidr_el1\n"
|
||||
: [mpidr] "=&r" (mpidr)
|
||||
);
|
||||
|
||||
return mpidr & 0xffU;
|
||||
}
|
||||
|
||||
void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
|
||||
|
||||
static inline void _AARCH64_Send_event( void )
|
||||
{
|
||||
__asm__ volatile ( "sev" : : : "memory" );
|
||||
}
|
||||
|
||||
static inline void _AARCH64_Wait_for_event( void )
|
||||
{
|
||||
__asm__ volatile ( "wfe" : : : "memory" );
|
||||
}
|
||||
|
||||
static inline void _CPU_SMP_Processor_event_broadcast( void )
|
||||
{
|
||||
_AARCH64_Data_synchronization_barrier();
|
||||
_AARCH64_Send_event();
|
||||
}
|
||||
|
||||
static inline void _CPU_SMP_Processor_event_receive( void )
|
||||
{
|
||||
_AARCH64_Wait_for_event();
|
||||
_AARCH64_Data_memory_barrier();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static inline uint32_t CPU_swap_u32( uint32_t value )
|
||||
{
|
||||
uint32_t tmp = value; /* make compiler warnings go away */
|
||||
__asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
|
||||
"BIC %1, %1, #0xff0000\n"
|
||||
"MOV %0, %0, ROR #8\n"
|
||||
"EOR %0, %0, %1, LSR #8\n"
|
||||
: "=r" (value), "=r" (tmp)
|
||||
: "0" (value), "1" (tmp));
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline uint16_t CPU_swap_u16( uint16_t value )
|
||||
{
|
||||
return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
|
||||
}
|
||||
|
||||
typedef uint32_t CPU_Counter_ticks;
|
||||
|
||||
uint32_t _CPU_Counter_frequency( void );
|
||||
|
||||
CPU_Counter_ticks _CPU_Counter_read( void );
|
||||
|
||||
static inline CPU_Counter_ticks _CPU_Counter_difference(
|
||||
CPU_Counter_ticks second,
|
||||
CPU_Counter_ticks first
|
||||
)
|
||||
{
|
||||
return second - first;
|
||||
}
|
||||
|
||||
void *_CPU_Thread_Idle_body( uintptr_t ignored );
|
||||
|
||||
typedef enum {
|
||||
AARCH64_EXCEPTION_SP0_SYNCHRONOUS = 0,
|
||||
AARCH64_EXCEPTION_SP0_IRQ = 1,
|
||||
AARCH64_EXCEPTION_SP0_FIQ = 2,
|
||||
AARCH64_EXCEPTION_SP0_SERROR = 3,
|
||||
AARCH64_EXCEPTION_SPx_SYNCHRONOUS = 4,
|
||||
AARCH64_EXCEPTION_SPx_IRQ = 5,
|
||||
AARCH64_EXCEPTION_SPx_FIQ = 6,
|
||||
AARCH64_EXCEPTION_SPx_SERROR = 7,
|
||||
AARCH64_EXCEPTION_LEL64_SYNCHRONOUS = 8,
|
||||
AARCH64_EXCEPTION_LEL64_IRQ = 9,
|
||||
AARCH64_EXCEPTION_LEL64_FIQ = 10,
|
||||
AARCH64_EXCEPTION_LEL64_SERROR = 11,
|
||||
AARCH64_EXCEPTION_LEL32_SYNCHRONOUS = 12,
|
||||
AARCH64_EXCEPTION_LEL32_IRQ = 13,
|
||||
AARCH64_EXCEPTION_LEL32_FIQ = 14,
|
||||
AARCH64_EXCEPTION_LEL32_SERROR = 15,
|
||||
MAX_EXCEPTIONS = 16,
|
||||
AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff
|
||||
} AArch64_symbolic_exception_name;
|
||||
|
||||
#define VECTOR_POINTER_OFFSET 0x78
|
||||
#define VECTOR_ENTRY_SIZE 0x80
|
||||
void _AArch64_Exception_interrupt_no_nest( void );
|
||||
void _AArch64_Exception_interrupt_nest( void );
|
||||
static inline void* AArch64_set_exception_handler(
|
||||
AArch64_symbolic_exception_name exception,
|
||||
void (*handler)(void)
|
||||
)
|
||||
{
|
||||
/* get current table address */
|
||||
char *vbar = (char*)AArch64_get_vector_base_address();
|
||||
|
||||
/* calculate address of vector to be replaced */
|
||||
char *cvector_address = vbar + VECTOR_ENTRY_SIZE * exception
|
||||
+ VECTOR_POINTER_OFFSET;
|
||||
|
||||
/* get current vector pointer */
|
||||
void (**vector_address)(void) = (void(**)(void))cvector_address;
|
||||
void (*current_vector_pointer)(void);
|
||||
current_vector_pointer = *vector_address;
|
||||
|
||||
/* replace vector pointer */
|
||||
*vector_address = handler;
|
||||
|
||||
/* return now-previous vector pointer */
|
||||
return (void*)current_vector_pointer;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
uint64_t register_x0;
|
||||
uint64_t register_x1;
|
||||
uint64_t register_x2;
|
||||
uint64_t register_x3;
|
||||
uint64_t register_x4;
|
||||
uint64_t register_x5;
|
||||
uint64_t register_x6;
|
||||
uint64_t register_x7;
|
||||
uint64_t register_x8;
|
||||
uint64_t register_x9;
|
||||
uint64_t register_x10;
|
||||
uint64_t register_x11;
|
||||
uint64_t register_x12;
|
||||
uint64_t register_x13;
|
||||
uint64_t register_x14;
|
||||
uint64_t register_x15;
|
||||
uint64_t register_x16;
|
||||
uint64_t register_x17;
|
||||
uint64_t register_x18;
|
||||
uint64_t register_x19;
|
||||
uint64_t register_x20;
|
||||
uint64_t register_x21;
|
||||
uint64_t register_x22;
|
||||
uint64_t register_x23;
|
||||
uint64_t register_x24;
|
||||
uint64_t register_x25;
|
||||
uint64_t register_x26;
|
||||
uint64_t register_x27;
|
||||
uint64_t register_x28;
|
||||
uint64_t register_fp;
|
||||
void *register_lr;
|
||||
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
uint32_t _register_lr_top;
|
||||
#endif
|
||||
uint64_t register_sp;
|
||||
void *register_pc;
|
||||
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
|
||||
uint32_t _register_pc_top;
|
||||
#endif
|
||||
uint64_t register_daif;
|
||||
uint64_t register_cpsr;
|
||||
uint64_t register_syndrome;
|
||||
uint64_t register_fault_address;
|
||||
AArch64_symbolic_exception_name vector;
|
||||
uint64_t reserved_for_stack_alignment;
|
||||
uint64_t register_fpsr;
|
||||
uint64_t register_fpcr;
|
||||
uint128_t register_q0;
|
||||
uint128_t register_q1;
|
||||
uint128_t register_q2;
|
||||
uint128_t register_q3;
|
||||
uint128_t register_q4;
|
||||
uint128_t register_q5;
|
||||
uint128_t register_q6;
|
||||
uint128_t register_q7;
|
||||
uint128_t register_q8;
|
||||
uint128_t register_q9;
|
||||
uint128_t register_q10;
|
||||
uint128_t register_q11;
|
||||
uint128_t register_q12;
|
||||
uint128_t register_q13;
|
||||
uint128_t register_q14;
|
||||
uint128_t register_q15;
|
||||
uint128_t register_q16;
|
||||
uint128_t register_q17;
|
||||
uint128_t register_q18;
|
||||
uint128_t register_q19;
|
||||
uint128_t register_q20;
|
||||
uint128_t register_q21;
|
||||
uint128_t register_q22;
|
||||
uint128_t register_q23;
|
||||
uint128_t register_q24;
|
||||
uint128_t register_q25;
|
||||
uint128_t register_q26;
|
||||
uint128_t register_q27;
|
||||
uint128_t register_q28;
|
||||
uint128_t register_q29;
|
||||
uint128_t register_q30;
|
||||
uint128_t register_q31;
|
||||
} CPU_Exception_frame;
|
||||
|
||||
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
|
||||
|
||||
void _AArch64_Exception_default( CPU_Exception_frame *frame );
|
||||
|
||||
/** Type that can store a 32-bit integer or a pointer. */
|
||||
typedef uintptr_t CPU_Uint32ptr;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ASM */
|
||||
|
||||
/** @} */
|
||||
|
||||
#endif /* _RTEMS_SCORE_CPU_H */
|
||||
50
cpukit/score/cpu/aarch64/include/rtems/score/cpu_irq.h
Normal file
50
cpukit/score/cpu/aarch64/include/rtems/score/cpu_irq.h
Normal file
@@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPU
|
||||
*
|
||||
* @brief AArch64 IRQ definitions
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RTEMS_SCORE_CPU_IRQ_H
|
||||
#define _RTEMS_SCORE_CPU_IRQ_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
void bsp_interrupt_dispatch( void );
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _RTEMS_SCORE_CPU_IRQ_H */
|
||||
42
cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
Normal file
42
cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
Normal file
@@ -0,0 +1,42 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPU
|
||||
*
|
||||
* @brief AArch64 Atomics support
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
|
||||
#define _RTEMS_SCORE_ATOMIC_CPU_H
|
||||
|
||||
#include <rtems/score/cpustdatomic.h>
|
||||
|
||||
#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */
|
||||
83
cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
Normal file
83
cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
Normal file
@@ -0,0 +1,83 @@
|
||||
/* SPDX-License-Identifier: BSD-2-Clause */
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @ingroup RTEMSScoreCPU
|
||||
*
|
||||
* @brief CPU Port Implementation API
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
|
||||
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RTEMS_SCORE_CPUIMPL_H
|
||||
#define _RTEMS_SCORE_CPUIMPL_H
|
||||
|
||||
#include <rtems/score/cpu.h>
|
||||
|
||||
/**
|
||||
* @defgroup RTEMSScoreCPUAArch64 AArch64
|
||||
*
|
||||
* @ingroup RTEMSScoreCPU
|
||||
*
|
||||
* @brief ARM AArch64 Architecture Support
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
#define CPU_PER_CPU_CONTROL_SIZE 0
|
||||
#define CPU_INTERRUPT_FRAME_SIZE 240
|
||||
|
||||
#ifndef ASM
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void _CPU_Context_volatile_clobber( uintptr_t pattern );
|
||||
|
||||
void _CPU_Context_validate( uintptr_t pattern );
|
||||
|
||||
RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
|
||||
{
|
||||
__asm__ volatile ( ".inst 0x0" );
|
||||
}
|
||||
|
||||
RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
|
||||
{
|
||||
__asm__ volatile ( "nop" );
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ASM */
|
||||
|
||||
/** @} */
|
||||
|
||||
#endif /* _RTEMS_SCORE_CPUIMPL_H */
|
||||
36
spec/build/cpukit/cpuaarch64.yml
Normal file
36
spec/build/cpukit/cpuaarch64.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
|
||||
build-type: objects
|
||||
cflags: []
|
||||
copyrights:
|
||||
- Copyright (C) 2020 On-Line Applications Research (OAR)
|
||||
cppflags: []
|
||||
cxxflags: []
|
||||
enabled-by:
|
||||
- aarch64
|
||||
includes: []
|
||||
install:
|
||||
- destination: ${BSP_INCLUDEDIR}/libcpu
|
||||
source:
|
||||
- cpukit/score/cpu/aarch64/include/libcpu/vectors.h
|
||||
- destination: ${BSP_INCLUDEDIR}/rtems
|
||||
source:
|
||||
- cpukit/score/cpu/aarch64/include/rtems/asm.h
|
||||
- destination: ${BSP_INCLUDEDIR}/rtems/score
|
||||
source:
|
||||
- cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
|
||||
- cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
|
||||
- cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
|
||||
- cpukit/score/cpu/aarch64/include/rtems/score/aarch64.h
|
||||
- cpukit/score/cpu/aarch64/include/rtems/score/cpu_irq.h
|
||||
links: []
|
||||
source:
|
||||
- cpukit/score/cpu/aarch64/cpu.c
|
||||
- cpukit/score/cpu/aarch64/cpu_asm.S
|
||||
- cpukit/score/cpu/aarch64/aarch64-context-validate.S
|
||||
- cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S
|
||||
- cpukit/score/cpu/aarch64/aarch64-thread-idle.c
|
||||
- cpukit/score/cpu/aarch64/aarch64-exception-default.c
|
||||
- cpukit/score/cpu/aarch64/aarch64-exception-default.S
|
||||
- cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
|
||||
- cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
|
||||
type: build
|
||||
@@ -457,6 +457,8 @@ install:
|
||||
- cpukit/include/uuid/uuid.h
|
||||
install-path: ${BSP_LIBDIR}
|
||||
links:
|
||||
- role: build-dependency
|
||||
uid: cpuaarch64
|
||||
- role: build-dependency
|
||||
uid: cpuarm
|
||||
- role: build-dependency
|
||||
|
||||
Reference in New Issue
Block a user