bsps/powerpc: Move exceptions support to bsps

This patch is a part of the BSP source reorganization.

Update #3285.
This commit is contained in:
Sebastian Huber
2018-03-13 16:24:16 +01:00
parent ff3b9aabca
commit bd1508019c
37 changed files with 53 additions and 65 deletions

View File

@@ -166,8 +166,7 @@ libbsp_a_SOURCES += ../../shared/tod.c tod/todcfg.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/irq/ppc-irq-legacy.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/clock.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/mpc6xx/clock.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/altivec.rel
@@ -181,4 +180,5 @@ EXTRA_DIST += README LICENSE
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/beatnik/headers.am

View File

@@ -112,10 +112,10 @@ endif
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/gen5200/headers.am

View File

@@ -75,8 +75,7 @@ libbsp_a_SOURCES += spi/spi_init.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel \
../../../libcpu/@RTEMS_CPU@/mpc83xx/i2c.rel \
../../../libcpu/@RTEMS_CPU@/mpc83xx/spi.rel \
@@ -91,4 +90,5 @@ EXTRA_DIST += README.mpc8349eamds
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/gen83xx/headers.am

View File

@@ -45,10 +45,10 @@ endif
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/irq/ppc-irq-legacy.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/ppc403/clock.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/ppc403/clock.rel \
../../../libcpu/@RTEMS_CPU@/ppc403/timer.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/haleakala/headers.am

View File

@@ -120,7 +120,6 @@ libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/irq/ppc-irq-legacy.c
libbsp_a_LIBADD = \
polledIO.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/clock.rel \
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/altivec.rel
@@ -134,4 +133,5 @@ EXTRA_DIST += times.mcp750 times.mvme2307
include $(top_srcdir)/../../../../automake/local.am
include $(top_srcdir)/../../../../automake/subdirs.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/motorola_powerpc/headers.am

View File

@@ -114,9 +114,9 @@ libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@RTEMS_CPU_MODEL@/misc.rel \
../../../libcpu/@RTEMS_CPU@/@RTEMS_CPU_MODEL@/edma.rel \
../../../libcpu/@RTEMS_CPU@/@RTEMS_CPU_MODEL@/emios.rel \
../../../libcpu/@RTEMS_CPU@/@RTEMS_CPU_MODEL@/dspi.rel \
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/@RTEMS_CPU_MODEL@/timer.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/mpc55xxevb/headers.am

View File

@@ -56,8 +56,7 @@ endif
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc8260/console-generic.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/mpc8260/console-generic.rel \
../../../libcpu/@RTEMS_CPU@/mpc8260/cpm.rel \
../../../libcpu/@RTEMS_CPU@/mpc8260/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc8260/timer.rel
@@ -66,4 +65,5 @@ EXTRA_DIST += times
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/mpc8260ads/headers.am

View File

@@ -110,8 +110,7 @@ libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/irq/ppc-irq-legacy.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/e500/clock.rel \
../../../libcpu/@RTEMS_CPU@/e500/timer.rel \
../../../libcpu/@RTEMS_CPU@/e500/mmu.rel \
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel
../../../libcpu/@RTEMS_CPU@/e500/mmu.rel
if HAS_NETWORKING
libbsp_a_LIBADD += network.rel
@@ -121,4 +120,5 @@ EXTRA_DIST += LICENSE README KNOWN_PROBLEMS
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/mvme3100/headers.am

View File

@@ -88,7 +88,6 @@ libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/irq/ppc-irq-legacy.c
libbsp_a_LIBADD = \
../../../libcpu/@RTEMS_CPU@/mpc6xx/clock.rel \
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel\
../../../libcpu/@RTEMS_CPU@/mpc6xx/altivec.rel
@@ -99,4 +98,5 @@ endif
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/mvme5500/headers.am

View File

@@ -64,11 +64,11 @@ libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/irq/ppc-irq-legacy.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/mpc6xx/clock.rel \
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/altivec.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/psim/headers.am

View File

@@ -53,10 +53,10 @@ libbsp_a_SOURCES = $(startup_SOURCES) $(clock_SOURCES) $(console_SOURCES) \
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/mpc6xx/mmu.rel \
../../../libcpu/@RTEMS_CPU@/mpc6xx/timer.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/qemuppc/headers.am

View File

@@ -98,8 +98,7 @@ libbsp_a_SOURCES += shmsupp/lock.S \
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/e500/mmu.rel
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/e500/mmu.rel
if HAS_NETWORKING
libbsp_a_SOURCES += network/network.c \
@@ -113,4 +112,5 @@ endif
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/qoriq/headers.am

View File

@@ -36,14 +36,16 @@ libbsp_a_SOURCES += ../../shared/bspreset.c
libbsp_a_SOURCES += startup/tm27supp.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_print.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/ss555/start/vectors_init.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/ss555/start/vectors.S
libbsp_a_LIBADD = \
../../../libcpu/@RTEMS_CPU@/mpc5xx/clock.rel \
../../../libcpu/@RTEMS_CPU@/mpc5xx/console-generic.rel \
../../../libcpu/@RTEMS_CPU@/mpc5xx/exceptions.rel \
../../../libcpu/@RTEMS_CPU@/mpc5xx/irq.rel \
../../../libcpu/@RTEMS_CPU@/mpc5xx/timer.rel \
../../../libcpu/@RTEMS_CPU@/mpc5xx/vectors.rel
../../../libcpu/@RTEMS_CPU@/mpc5xx/timer.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am

View File

@@ -66,12 +66,11 @@ libbsp_a_SOURCES += console/console.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel
if HAS_SMP
libbsp_a_SOURCES += ../../shared/bspsmp.c
endif
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/t32mppc/headers.am

View File

@@ -65,7 +65,6 @@ endif
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = \
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/mpc8xx/console-generic.rel \
../../../libcpu/@RTEMS_CPU@/mpc8xx/cpm.rel \
../../../libcpu/@RTEMS_CPU@/mpc8xx/clock.rel \
@@ -74,4 +73,5 @@ libbsp_a_LIBADD = \
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/tqm8xx/headers.am

View File

@@ -68,8 +68,7 @@ libbsp_a_LIBADD += ../../../libcpu/@RTEMS_CPU@/ppc403/timer.rel
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD += ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/virtex/headers.am

View File

@@ -47,12 +47,12 @@ libbsp_a_SOURCES += mmu/mmu.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/ppc403/clock.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/ppc403/clock.rel \
../../../libcpu/@RTEMS_CPU@/ppc403/timer.rel
EXTRA_DIST += times
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/virtex4/headers.am

View File

@@ -49,12 +49,12 @@ libbsp_a_SOURCES += mmu/mmu.c
libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cache/cache.c
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel \
../../../libcpu/@RTEMS_CPU@/e500/clock.rel \
libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/e500/clock.rel \
../../../libcpu/@RTEMS_CPU@/e500/timer.rel
EXTRA_DIST += times
include $(top_srcdir)/../../../../automake/local.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/shared.am
include $(srcdir)/../../../../../../bsps/powerpc/shared/exceptions.am
include $(srcdir)/../../../../../../bsps/powerpc/virtex5/headers.am

View File

@@ -6,33 +6,6 @@ EXTRA_DIST =
noinst_PROGRAMS =
if !mpc5xx
noinst_PROGRAMS += new-exceptions/exc_bspsupport.rel
new_exceptions_exc_bspsupport_rel_SOURCES = \
new-exceptions/bspsupport/ppc_exc_async_normal.S \
new-exceptions/bspsupport/ppc_exc_fatal.S \
new-exceptions/bspsupport/ppc_exc_print.c
if !qoriq
new_exceptions_exc_bspsupport_rel_SOURCES += \
new-exceptions/bspsupport/ppc-code-copy.c \
new-exceptions/bspsupport/ppc_exc.S \
new-exceptions/bspsupport/ppc_exc_naked.S \
new-exceptions/bspsupport/ppc_exc_hdl.c \
new-exceptions/bspsupport/ppc_exc_initialize.c \
new-exceptions/bspsupport/ppc_exc_global_handler.c \
new-exceptions/bspsupport/ppc_exc_categories.c \
new-exceptions/bspsupport/ppc_exc_address.c \
new-exceptions/bspsupport/ppc_exc_alignment.c \
new-exceptions/bspsupport/ppc_exc_prologue.c
endif
new_exceptions_exc_bspsupport_rel_CPPFLAGS = $(AM_CPPFLAGS)
new_exceptions_exc_bspsupport_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
endif
EXTRA_DIST += new-exceptions/bspsupport/README
EXTRA_DIST += new-exceptions/bspsupport/ppc_exc_test.c
EXTRA_DIST += ppc403/README ppc403/vectors/README
if ppc4xx
# ppc403/clock
@@ -105,13 +78,6 @@ noinst_PROGRAMS += mpc5xx/timer.rel
mpc5xx_timer_rel_SOURCES = mpc5xx/timer/timer.c
mpc5xx_timer_rel_CPPFLAGS = $(AM_CPPFLAGS)
mpc5xx_timer_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
# mpc5xx/vectors
noinst_PROGRAMS += mpc5xx/vectors.rel
mpc5xx_vectors_rel_SOURCES = mpc5xx/vectors/vectors_init.c mpc5xx/vectors/vectors.S \
new-exceptions/bspsupport/ppc_exc_print.c
mpc5xx_vectors_rel_CPPFLAGS = $(AM_CPPFLAGS)
mpc5xx_vectors_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
endif
if mpc6xx

View File

@@ -1,201 +0,0 @@
/*
* vectors.S
*
* This file contains the assembly code for the PowerPC exception veneers
* for RTEMS.
*
*
* MPC5xx port sponsored by Defence Research and Development Canada - Suffield
* Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
*
* Derived from libbsp/powerpc/mbx8xx/vectors/vectors.S,
*
* (c) 1999, Eric Valette valette@crf.canon.fr
*/
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include <libcpu/vectors.h>
#define SYNC \
sync; \
isync
/*
* Hardware exception vector table.
*
* The MPC555 can be configured to use a compressed vector table with 8
* bytes per entry, rather than the usual 0x100 bytes of other PowerPC
* devices. The following macro uses this feature to save the better part
* of 8 kbytes of flash ROM.
*
* Each vector table entry has room for only a simple branch instruction
* which branches to a prologue specific to that exception. This
* exception-specific prologue begins the context save, loads the exception
* number into a register, and jumps to a common exception prologue, below.
*/
.macro vectors num=0, total=NUM_EXCEPTIONS /* create vector table */
/* vector table entry */
.section .vectors, "ax"
ba specific_prologue\@ /* run specific prologue */
.long 0 /* each entry is 8 bytes */
/* exception-specific prologue */
.text
specific_prologue\@:
stwu r1, -EXCEPTION_FRAME_END(r1) /* open stack frame */
stw r4, GPR4_OFFSET(r1) /* preserve register */
li r4, \num /* get exception number */
b common_prologue /* run common prologue */
/* invoke macro recursively to create remainder of table */
.if \total - (\num + 1)
vectors "(\num + 1)", \total
.endif
.endm
/* invoke macro to create entire vector table */
vectors
/*
* Common exception prologue.
*
* Because the MPC555 vector table is in flash ROM, it's not possible to
* change the exception handlers by overwriting them at run-time, so this
* common exception prologue uses a table of exception handler pointers to
* provide equivalent flexibility.
*
* When the actual exception handler is run, R1 points to the base of a new
* exception stack frame, in which R3, R4 and LR have been saved. R4 holds
* the exception number.
*/
.text
common_prologue:
stw r3, GPR3_OFFSET(r1) /* preserve registers */
mflr r3
stw r3, EXC_LR_OFFSET(r1)
slwi r3, r4, 2 /* make table offset */
addis r3, r3, exception_handler_table@ha /* point to entry */
addi r3, r3, exception_handler_table@l
lwz r3, 0(r3) /* get entry */
mtlr r3 /* run it */
blr
/*
* Default exception handler.
*
* The function initialize_exceptions() initializes all of the entries in
* the exception handler table with pointers to this routine, which saves
* the remainder of the interrupted code's state, then calls
* C_default_exception_handler() to dump registers.
*
* On entry, R1 points to a new exception stack frame in which R3, R4, and
* LR have been saved. R4 holds the exception number.
*/
.text
PUBLIC_VAR(default_exception_handler)
SYM (default_exception_handler):
/*
* Save the interrupted code's program counter and MSR. Beyond this
* point, all exceptions are recoverable. Use an RCPU-specific SPR
* to set the RI bit in the MSR to indicate the recoverable state.
*/
mfsrr0 r3
stw r3, SRR0_FRAME_OFFSET(r1)
mfsrr1 r3
stw r3, SRR1_FRAME_OFFSET(r1)
mtspr eid, r3 /* set MSR[RI], clear MSR[EE] */
SYNC
/*
* Save the remainder of the general-purpose registers.
*
* Compute the value of R1 at exception entry before storing it in
* the frame.
*
* Note that R2 should never change (it's the EABI pointer to
* .sdata2), but we save it just in case.
*
* Recall that R3 and R4 were saved by the specific- and
* common-exception handlers before entry to this routine.
*/
stw r0, GPR0_OFFSET(r1)
addi r0, r1, EXCEPTION_FRAME_END
stw r0, GPR1_OFFSET(r1)
stw r2, GPR2_OFFSET(r1)
stmw r5, GPR5_OFFSET(r1) /* save R5 to R31 */
/*
* Save the remainder of the UISA special-purpose registers. Recall
* that LR was saved before entry.
*/
mfcr r0
stw r0, EXC_CR_OFFSET(r1)
mfctr r0
stw r0, EXC_CTR_OFFSET(r1)
mfxer r0
stw r0, EXC_XER_OFFSET(r1)
/*
* Call C-language portion of the default exception handler, passing
* in the address of the frame.
*
* To simplify things a bit, we assume that the target routine is
* within +/- 32 Mbyte from here, which is a reasonable assumption
* on the MPC555.
*/
stw r4, EXCEPTION_NUMBER_OFFSET(r1) /* save exception number */
addi r3, r1, 0x8 /* get frame address */
bl C_default_exception_handler /* call handler */
/*
* Restore UISA special-purpose registers.
*/
lwz r0, EXC_XER_OFFSET(r1)
mtxer r0
lwz r0, EXC_CTR_OFFSET(r1)
mtctr r0
lwz r0, EXC_CR_OFFSET(r1)
mtcr r0
lwz r0, EXC_LR_OFFSET(r1)
mtlr r0
/*
* Restore most general-purpose registers.
*/
lmw r2, GPR2_OFFSET(r1)
/*
* Restore the interrupted code's program counter and MSR, but first
* use an RCPU-specific special-purpose register to clear the RI
* bit, indicating that exceptions are temporarily non-recoverable.
*/
mtspr nri, r0 /* clear MSR[RI] */
SYNC
lwz r0, SRR1_FRAME_OFFSET(r1)
mtsrr1 r0
lwz r0, SRR0_FRAME_OFFSET(r1)
mtsrr0 r0
/*
* Restore the final GPR, close the stack frame, and return to the
* interrupted code.
*/
lwz r0, GPR0_OFFSET(r1)
addi r1, r1, EXCEPTION_FRAME_END
SYNC
rfi

View File

@@ -1,137 +0,0 @@
/*
* vectors_init.c Exception hanlding initialisation (and generic handler).
*
* This include file describe the data structure and the functions implemented
* by rtems to handle exceptions.
*/
/*
* MPC5xx port sponsored by Defence Research and Development Canada - Suffield
* Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
*
* Derived from libbsp/powerpc/mbx8xx/vectors/vectors_init.c:
*
* CopyRight (C) 1999 valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <inttypes.h>
#include <rtems/bspIo.h>
#include <libcpu/vectors.h>
#include <libcpu/raw_exception.h>
#include <bsp/irq.h>
extern rtems_exception_handler_t default_exception_handler;
static rtems_raw_except_global_settings exception_config;
static rtems_raw_except_connect_data exception_table[NUM_EXCEPTIONS];
rtems_exception_handler_t* exception_handler_table[NUM_EXCEPTIONS];
void C_default_exception_handler(CPU_Exception_frame* excPtr)
{
int recoverable = 0;
printk("exception handler called for exception %" PRIu32 "\n",
excPtr->_EXC_number);
printk("\t Next PC or Address of fault = %" PRIxPTR "\n", excPtr->EXC_SRR0);
printk("\t Saved MSR = %" PRIxPTR "\n", excPtr->EXC_SRR1);
printk("\t R0 = %" PRIxPTR "\n", excPtr->GPR0);
printk("\t R1 = %" PRIxPTR "\n", excPtr->GPR1);
printk("\t R2 = %" PRIxPTR "\n", excPtr->GPR2);
printk("\t R3 = %" PRIxPTR "\n", excPtr->GPR3);
printk("\t R4 = %" PRIxPTR "\n", excPtr->GPR4);
printk("\t R5 = %" PRIxPTR "\n", excPtr->GPR5);
printk("\t R6 = %" PRIxPTR "\n", excPtr->GPR6);
printk("\t R7 = %" PRIxPTR "\n", excPtr->GPR7);
printk("\t R8 = %" PRIxPTR "\n", excPtr->GPR8);
printk("\t R9 = %" PRIxPTR "\n", excPtr->GPR9);
printk("\t R10 = %" PRIxPTR "\n", excPtr->GPR10);
printk("\t R11 = %" PRIxPTR "\n", excPtr->GPR11);
printk("\t R12 = %" PRIxPTR "\n", excPtr->GPR12);
printk("\t R13 = %" PRIxPTR "\n", excPtr->GPR13);
printk("\t R14 = %" PRIxPTR "\n", excPtr->GPR14);
printk("\t R15 = %" PRIxPTR "\n", excPtr->GPR15);
printk("\t R16 = %" PRIxPTR "\n", excPtr->GPR16);
printk("\t R17 = %" PRIxPTR "\n", excPtr->GPR17);
printk("\t R18 = %" PRIxPTR "\n", excPtr->GPR18);
printk("\t R19 = %" PRIxPTR "\n", excPtr->GPR19);
printk("\t R20 = %" PRIxPTR "\n", excPtr->GPR20);
printk("\t R21 = %" PRIxPTR "\n", excPtr->GPR21);
printk("\t R22 = %" PRIxPTR "\n", excPtr->GPR22);
printk("\t R23 = %" PRIxPTR "\n", excPtr->GPR23);
printk("\t R24 = %" PRIxPTR "\n", excPtr->GPR24);
printk("\t R25 = %" PRIxPTR "\n", excPtr->GPR25);
printk("\t R26 = %" PRIxPTR "\n", excPtr->GPR26);
printk("\t R27 = %" PRIxPTR "\n", excPtr->GPR27);
printk("\t R28 = %" PRIxPTR "\n", excPtr->GPR28);
printk("\t R29 = %" PRIxPTR "\n", excPtr->GPR29);
printk("\t R30 = %" PRIxPTR "\n", excPtr->GPR30);
printk("\t R31 = %" PRIxPTR "\n", excPtr->GPR31);
printk("\t CR = %" PRIx32 "\n", excPtr->EXC_CR);
printk("\t CTR = %" PRIxPTR "\n", excPtr->EXC_CTR);
printk("\t XER = %" PRIx32 "\n", excPtr->EXC_XER);
printk("\t LR = %" PRIxPTR "\n", excPtr->EXC_LR);
if (excPtr->_EXC_number == ASM_DEC_VECTOR)
recoverable = 1;
if (excPtr->_EXC_number == ASM_SYS_VECTOR)
#ifdef TEST_RAW_EXCEPTION_CODE
recoverable = 1;
#else
recoverable = 0;
#endif
if (!recoverable) {
printk("unrecoverable exception!!! Push reset button\n");
while(1);
}
}
static void nop_except_enable(const rtems_raw_except_connect_data* ptr)
{
}
static int except_always_enabled(const rtems_raw_except_connect_data* ptr)
{
return 1;
}
void initialize_exceptions(void)
{
int i;
/*
* Initialize all entries of the exception table with a description of the
* default exception handler.
*/
exception_config.exceptSize = NUM_EXCEPTIONS;
exception_config.rawExceptHdlTbl = &exception_table[0];
exception_config.defaultRawEntry.exceptIndex = 0;
exception_config.defaultRawEntry.hdl.vector = 0;
exception_config.defaultRawEntry.hdl.raw_hdl = default_exception_handler;
for (i = 0; i < exception_config.exceptSize; i++) {
printk("installing exception number %d\n", i);
exception_table[i].exceptIndex = i;
exception_table[i].hdl = exception_config.defaultRawEntry.hdl;
exception_table[i].hdl.vector = i;
exception_table[i].on = nop_except_enable;
exception_table[i].off = nop_except_enable;
exception_table[i].isOn = except_always_enabled;
}
/*
* Now pass the initialized exception table to the exceptions module which
* will install the handler pointers in the exception handler table.
*/
if (!mpc5xx_init_exceptions(&exception_config)) {
/*
* At this stage we may not call CPU_Panic because it uses exceptions!!!
*/
printk("Exception handling initialization failed\n");
printk("System locked\n"); while(1);
}
else {
printk("Exception handling initialization done\n");
}
}

View File

@@ -1,431 +0,0 @@
BSP support middleware for 'new-exception' style PPC.
T. Straumann, 12/2007
EXPLANATION OF SOME TERMS
=========================
In this README we refer to exceptions and sometimes
to 'interrupts'. Interrupts simply are asynchronous
exceptions such as 'external' exceptions or 'decrementer'
/'timer' exceptions.
Traditionally (in the libbsp/powerpc/shared implementation),
synchronous exceptions are handled entirely in the context
of the interrupted task, i.e., the exception handlers use
the task's stack and leave thread-dispatching enabled,
i.e., scheduling is allowed to happen 'in the middle'
of an exception handler.
Asynchronous exceptions/interrupts, OTOH, use a dedicated
interrupt stack and defer scheduling until after the last
nested ISR has finished.
RATIONALE
=========
The 'new-exception' processing API works at a rather
low level. It provides functions for
installing low-level code (which must be written in
assembly code) directly into the PPC vector area.
It is entirely left to the BSP to implement low-level
exception handlers and to implement an API for
C-level exception handlers and to implement the
RTEMS interrupt API defined in cpukit/include/rtems/irq.h.
The result has been a Darwinian evolution of variants
of this code which is very hard to maintain. Mostly,
the four files
libbsp/powerpc/shared/vectors/vectors.S
(low-level handlers for 'normal' or 'synchronous'
exceptions. This code saves all registers on
the interrupted task's stack and calls a
'global' C (high-level) exception handler.
libbsp/powerpc/shared/vectors/vectors_init.c
(default implementation of the 'global' C
exception handler and initialization of the
vector table with trampoline code that ends up
calling the 'global' handler.
libbsp/powerpc/shared/irq/irq_asm.S
(low-level handlers for 'IRQ'-type or 'asynchronous'
exceptions. This code is very similar to vectors.S
but does slightly more: after saving (only
the minimal set of) registers on the interrupted
task's stack it disables thread-dispatching, switches
to a dedicated ISR stack (if not already there which is
possible for nested interrupts) and then executes the high
level (C) interrupt dispatcher 'C_dispatch_irq_handler()'.
After 'C_dispatch_irq_handler()' returns the stack
is switched back (if not a nested IRQ), thread-dispatching
is re-enabled, signals are delivered and a context
switch is initiated if necessary.
libbsp/powerpc/shared/irq/irq.c
implementation of the RTEMS ('new') IRQ API defined
in cpukit/include/rtems/irq.h.
have been copied and modified by a myriad of BSPs leading
to many slightly different variants.
THE BSP-SUPORT MIDDLEWARE
=========================
The code in this directory is an attempt to provide the
functionality implemented by the aforementioned files
in a more generic way so that it can be shared by more
BSPs rather than being copied and modified.
Another important goal was eliminating all conditional
compilation which tested for specific CPU models by means
of C-preprocessor symbols (#ifdef ppcXYZ).
Instead, appropriate run-time checks for features defined
in cpuIdent.h are used.
The assembly code has been (almost completely) rewritten
and it tries to address a few problems while deliberately
trying to live with the existing APIs and semantics
(how these could be improved is beyond the scope but
that they could is beyond doubt...):
- some PPCs don't fit into the classic scheme where
the exception vector addresses all were multiples of
0x100 (some vectors are spaced as closely as 0x10).
The API should not expose vector offsets but only
vector numbers which can be considered an abstract
entity. The mapping from vector numbers to actual
address offsets is performed inside 'raw_exception.c'
- having to provide assembly prologue code in order to
hook an exception is cumbersome. The middleware
tries to free users and BSP writers from this issue
by dealing with assembly prologues entirely inside
the middleware. The user can hook ordinary C routines.
- the advent of BookE CPUs brought interrupts with
multiple priorities: non-critical and critical
interrupts. Unfortunately, these are not entirely
trivial to deal with (unless critical interrupts
are permanently disabled [which is still the case:
ATM rtems_interrupt_enable()/rtems_interrupt_disable()
only deal with EE]). See separate section titled
'race condition...' below for a detailed explanation.
STRUCTURE
=========
The middleware uses exception 'categories' or
'flavors' as defined in raw_exception.h.
The middleware consists of the following parts:
1 small 'prologue' snippets that encode the
vector information and jump to appropriate
'flavored-wrapper' code for further handling.
Some PPC exceptions are spaced only
16-bytes apart, so the generic
prologue snippets are only 16-bytes long.
Prologues for synchronuos and asynchronous
exceptions differ.
2 flavored-wrappers which sets up a stack frame
and do things that are specific for
different 'flavors' of exceptions which
currently are
- classic PPC exception
- ppc405 critical exception
- bookE critical exception
- e500 machine check exception
Assembler macros are provided and they can be
expanded to generate prologue templates and
flavored-wrappers for different flavors
of exceptions. Currently, there are two prologues
for all aforementioned flavors. One for synchronous
exceptions, the other for interrupts.
3 generic assembly-level code that does the bulk
of saving register context and calling C-code.
4 C-code (ppc_exc_hdl.c) for dispatching BSP/user
handlers.
5 Initialization code (vectors_init.c). All valid
exceptions for the detected CPU are determined
and a fitting prologue snippet for the exception
category (classic, critical, synchronous or IRQ, ...)
is generated from a template and the vector number
and then installed in the vector area.
The user/BSP only has to deal with installing
high-level handlers but by default, the standard
'C_dispatch_irq_handler' routine is hooked to
the external and 'decrementer' exceptions.
6 RTEMS IRQ API is implemented by 'irq.c'. It
relies on a few routines to be provided by
the BSP.
USAGE
=====
BSP writers must provide the following routines
(declared in irq_supp.h):
Interrupt controller (PIC) support:
BSP_setup_the_pic() - initialize PIC hardware
BSP_enable_irq_at_pic() - enable/disable given irq at PIC; IGNORE if
BSP_disable_irq_at_pic() irq number out of range!
C_dispatch_irq_handler() - handle irqs and dispatch user handlers
this routine SHOULD use the inline
fragment
bsp_irq_dispatch_list()
provided by irq_supp.h
for calling user handlers.
BSP initialization; call
rtems_status_code sc = ppc_exc_initialize(
PPC_INTERRUPT_DISABLE_MASK_DEFAULT,
interrupt_stack_begin,
interrupt_stack_size
);
if (sc != RTEMS_SUCCESSFUL) {
BSP_panic("cannot initialize exceptions");
}
BSP_rtems_irq_mngt_set();
Note that BSP_rtems_irq_mngt_set() hooks the C_dispatch_irq_handler()
to the external and decrementer (PIT exception for bookE; a decrementer
emulation is activated) exceptions for backwards compatibility reasons.
C_dispatch_irq_handler() must therefore be able to support these two
exceptions.
However, the BSP implementor is free to either disconnect
C_dispatch_irq_handler() from either of these exceptions, to connect
other handlers (e.g., for SYSMGMT exceptions) or to hook
C_dispatch_irq_handler() to yet more exceptions etc. *after*
BSP_rtems_irq_mngt_set() executed.
Hooking exceptions:
The API defined in vectors.h declares routines for connecting
a C-handler to any exception. Note that the execution environment
of the C-handler depends on the exception being synchronous or
asynchronous:
- synchronous exceptions use the task stack and do not
disable thread dispatching scheduling.
- asynchronous exceptions use a dedicated stack and do
defer thread dispatching until handling has (almost) finished.
By inspecting the vector number stored in the exception frame
the nature of the exception can be determined: asynchronous
exceptions have the most significant bit(s) set.
Any exception for which no dedicated handler is registered
ends up being handled by the routine addressed by the
(traditional) 'globalExcHdl' function pointer.
Makefile.am:
- make sure the Makefile.am does NOT use any of the files
vectors.S, vectors.h, vectors_init.c, irq_asm.S, irq.c
from 'libbsp/powerpc/shared' NOR must the BSP implement
any functionality that is provided by those files (and
now the middleware).
- (probably) remove 'vectors.rel' and anything related
- add
../../../libcpu/@RTEMS_CPU@/@exceptions@/bspsupport/vectors.h
../../../libcpu/@RTEMS_CPU@/@exceptions@/bspsupport/irq_supp.h
to 'include_bsp_HEADERS'
- add
../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel
../../../libcpu/@RTEMS_CPU@/@exceptions@/irq_bspsupport.rel
to 'libbsp_a_LIBADD'
(irq.c is in a separate '.rel' so that you can get support
for exceptions only).
CAVEATS
=======
On classic PPCs, early (and late) parts of the low-level
exception handling code run with the MMU disabled which mean
that the default caching attributes (write-back) are in effect
(thanks to Thomas Doerfler for bringing this up).
The code currently assumes that the MMU translations
for the task and interrupt stacks as well as some
variables in the data-area MATCH THE DEFAULT CACHING
ATTRIBUTES (this assumption also holds for the old code
in libbsp/powepc/shared/vectors ../irq).
During initialization of exception handling, a crude test
is performed to check if memory seems to have the write-back
attribute. The 'dcbz' instruction should - on most PPCs - cause
an alignment exception if the tested cache-line does not
have this attribute.
BSPs which entirely disable caching (e.g., by physically
disabling the cache(s)) should set the variable
ppc_exc_cache_wb_check = 0
prior to calling initialize_exceptions().
Note that this check does not catch all possible
misconfigurations (e.g., on the 860, the default attribute
is AFAIK [libcpu/powerpc/mpc8xx/mmu/mmu_init.c] set to
'caching-disabled' which is potentially harmful but
this situation is not detected).
RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
====================================================
The problematic race condition is as follows:
Usually, ISRs are allowed to use certain OS
primitives such as e.g., releasing a semaphore.
In order to prevent a context switch from happening
immediately (this would result in the ISR being
suspended), thread-dispatching must be disabled
around execution of the ISR. However, on the
PPC architecture it is neither possible to
atomically disable ALL interrupts nor is it
possible to atomically increment a variable
(the thread-dispatch-disable level).
Hence, the following sequence of events could
occur:
1) low-priority interrupt (LPI) is taken
2) before the LPI can increase the
thread-dispatch-disable level or disable
high-priority interupts, a high-priority
interrupt (HPI) happens
3) HPI increases dispatch-disable level
4) HPI executes high-priority ISR which e.g.,
posts a semaphore
5) HPI decreases dispatch-disable level and
realizes that a context switch is necessary
6) context switch is performed since LPI had
not gotten to the point where it could
increase the dispatch-disable level.
At this point, the LPI has been effectively
suspended which means that the low-priority
ISR will not be executed until the task
interupted in 1) is scheduled again!
The solution to this problem is letting the
first machine instruction of the low-priority
exception handler write a non-zero value to
a variable in memory:
ee_vector_offset:
stw r1, ee_lock@sdarel(r13)
.. save some registers etc..
.. increase thread-dispatch-disable-level
.. clear 'ee_lock' variable
After the HPI decrements the dispatch-disable level
it checks 'ee_lock' and refrains from performing
a context switch if 'ee_lock' is nonzero. Since
the LPI will complete execution subsequently it
will eventually do the context switch.
For the single-instruction write operation we must
a) write a register that is guaranteed to be
non-zero (e.g., R1 (stack pointer) or R13
(SVR4 short-data area).
b) use an addressing mode that doesn't require
loading any registers. The short-data area
pointer R13 is appropriate.
CAVEAT: unfortunately, this method by itself
is *NOT* enough because raising a low-priority
exception and executing the first instruction
of the handler is *NOT* atomic. Hence, the following
could occur:
1) LPI is taken
2) PC is saved in SRR0, PC is loaded with
address of 'locking instruction'
stw r1, ee_lock@sdarel(r13)
3) ==> critical interrupt happens
4) PC (containing address of locking instruction)
is saved in CSRR0
5) HPI is dispatched
For the HPI to correctly handle this situation
it does the following:
a) increase thread-dispatch disable level
b) do interrupt work
c) decrease thread-dispatch disable level
d) if ( dispatch-disable level == 0 )
d1) check ee_lock
d2) check instruction at *CSRR0
d3) do a context switch if necessary ONLY IF
ee_lock is NOT set AND *CSRR0 is NOT the
'locking instruction'
this works because the address of 'ee_lock'
is embedded in the locking instruction
'stw r1, ee_lock@sdarel(r13)' and because the
registers r1/r13 have a special purpose
(stack-pointer, SDA-pointer). Hence it is safe
to assume that the particular instruction
'stw r1,ee_lock&sdarel(r13)' never occurs
anywhere else.
Another note: this algorithm also makes sure
that ONLY nested ASYNCHRONOUS interrupts which
enable/disable thread-dispatching and check if
thread-dispatching is required before returning
control engage in this locking protocol. It is
important that when a critical, asynchronous
interrupt interrupts a 'synchronous' exception
(which does not disable thread-dispatching)
the thread-dispatching operation upon return of
the HPI is NOT deferred (because the synchronous
handler would not, eventually, check for a
dispatch requirement).
And one more note: We never want to disable
machine-check exceptions to avoid a checkstop.
This means that we cannot use enabling/disabling
this type of exception for protection of critical
OS data structures.
Therefore, calling OS primitives from a asynchronous
machine-check handler is ILLEGAL and not supported.
Since machine-checks can happen anytime it is not
legal to test if a deferred context switch should
be performed when the asynchronous machine-check
handler returns (since _Context_Switch_is_necessary
could have been set by a IRQ-protected section of
code that was hit by the machine-check).
Note that synchronous machine-checks can legally
use OS primitives and currently there are no
asynchronous machine-checks defined.
Epilogue:
You have to disable all asynchronous exceptions which may cause a context
switch before the restoring of the SRRs and the RFI. Reason:
Suppose we are in the epilogue code of an EE between the move to SRRs and
the RFI. Here EE is disabled but CE is enabled. Now a CE happens. The
handler decides that a thread dispatch is necessary. The CE checks if
this is possible:
o The thread dispatch disable level is 0, because the EE has already
decremented it.
o The EE lock variable is cleared.
o The EE executes not the first instruction.
Hence a thread dispatch is allowed. The CE issues a context switch to a
task with EE enabled (for example a task waiting for a semaphore). Now a
EE happens and the current content of the SRRs is lost.

View File

@@ -1,39 +0,0 @@
/**
* @file
*
* @ingroup powerpc_shared
*
* @brief Code copy implementation.
*/
/*
* Copyright (c) 2009
* embedded brains GmbH
* Obere Lagerstr. 30
* D-82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <string.h>
#include <rtems.h>
#include <libcpu/powerpc-utility.h>
void ppc_code_copy(void *dest, const void *src, size_t n)
{
if (memcmp(dest, src, n) != 0) {
memcpy(dest, src, n);
rtems_cache_flush_multiple_data_lines(dest, n);
ppc_synchronize_data();
rtems_cache_invalidate_multiple_instruction_lines(dest, n);
ppc_synchronize_instructions();
}
}

View File

@@ -1,177 +0,0 @@
/*
* (c) 1999, Eric Valette valette@crf.canon.fr
*
* Modified and partially rewritten by Till Straumann, 2007
*
* Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008.
*
* Low-level assembly code for PPC exceptions.
*
* This file was written with the goal to eliminate
* ALL #ifdef <cpu_flavor> conditionals -- please do not
* reintroduce such statements.
*/
/* Load macro definitions */
#include <rtems/asm.h>
#include <rtems/system.h>
#include <rtems/score/percpu.h>
#include "ppc_exc_asm_macros.h"
/******************************************************/
/* PROLOGUES */
/******************************************************/
/*
* Expand prologue snippets for classic, ppc405-critical, bookE-critical
* and E500 machine-check, synchronous and asynchronous exceptions
*/
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
.global ppc_exc_min_prolog_size
ppc_exc_min_prolog_size = 4 * 4
/* Special prologue for 603e-style CPUs.
*
* 603e shadows GPR0..GPR3 for certain exceptions. We must switch
* that off before we can use the stack pointer. Note that this is
* ONLY safe if the shadowing is actually active -- otherwise, r1
* is destroyed. We deliberately use r1 so problems become obvious
* if this is misused!
*/
.global ppc_exc_tgpr_clr_prolog
ppc_exc_tgpr_clr_prolog:
mfmsr r1
rlwinm r1,r1,0,15,13
mtmsr r1
isync
/* FALL THRU TO 'auto' PROLOG */
/* Determine vector dynamically/automatically
*
* BUT: - only standard exceptions (no critical ones)
* - vector offset must be on 256 Byte boundary.
*/
.global ppc_exc_min_prolog_auto
ppc_exc_min_prolog_auto:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
mflr VECTOR_REGISTER
/*
* We store the absolute branch target address here. It will be used
* to generate the branch operation in ppc_exc_make_prologue().
*
* We add one to request the link in the generated branch instruction.
*/
.int ppc_exc_wrap_auto + 1
.global ppc_exc_tgpr_clr_prolog_size
ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
/*
* Automatic vector, asynchronous exception; however,
* automatic vector calculation is less efficient than
* using an explicit vector in a minimal prolog snippet.
* The latter method is preferable since there usually
* are few asynchronous exceptions.
*
* For generic exceptions (which are the bulk) using
* the 'auto' prologue is OK since performance is not
* really an issue.
*/
.global ppc_exc_min_prolog_auto_async
ppc_exc_min_prolog_auto_async:
stw r1, ppc_exc_lock_std@sdarel(r13)
stw VECTOR_REGISTER, ppc_exc_vector_register_std@sdarel(r13)
mflr VECTOR_REGISTER
/*
* We store the absolute branch target address here. It will be used
* to generate the branch operation in ppc_exc_make_prologue().
*
* We add one to request the link in the generated branch instruction.
*/
.int ppc_exc_wrap_auto_async + 1
/******************************************************/
/* WRAPPERS */
/******************************************************/
/* Tag start and end of the wrappers.
* If exceptions are installed farther removed
* from the text area than 32M then the wrappers
* must be moved to an area that is reachable
* from where the prologues reside. Branches into
* C-code are far.
*/
.global __ppc_exc_wrappers_start
__ppc_exc_wrappers_start = .
/* Expand wrappers for different exception flavors */
/* Standard/classic powerpc */
WRAP _FLVR=std _PRI=std _SRR0=srr0 _SRR1=srr1 _RFI=rfi
/* ppc405 has a critical exception using srr2/srr3 */
WRAP _FLVR=p405_crit _PRI=crit _SRR0=srr2 _SRR1=srr3 _RFI=rfci
/* bookE has critical exception using csrr0 cssr1 */
WRAP _FLVR=bookE_crit _PRI=crit _SRR0=csrr0 _SRR1=csrr1 _RFI=rfci
/* e500 has machine-check exception using mcsrr0 mcssr1 */
WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
/* LR holds vector, VECTOR_REGISTER holds orig. LR */
.global ppc_exc_wrap_auto
ppc_exc_wrap_auto:
stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* Find address where we jumped from */
mflr FRAME_REGISTER
/* Restore LR */
mtlr VECTOR_REGISTER
/* Compute vector into R3 */
rlwinm VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/*
* We're now in almost the same state as if called by
* min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already
*/
b wrap_no_save_frame_register_std
.global ppc_exc_wrap_auto_async
ppc_exc_wrap_auto_async:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* find address where we jumped from */
mflr FRAME_REGISTER
/* restore LR */
mtlr VECTOR_REGISTER
/* set upper bits to indicate that non-volatile
* registers should not be saved/restored.
*/
li VECTOR_REGISTER, 0xffff8000
/* compute vector into R3 */
rlwimi VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/* we're now in almost the same state as if called by
* min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already
*/
b wrap_no_save_frame_register_std
.global __ppc_exc_wrappers_end
__ppc_exc_wrappers_end = .

View File

@@ -1,100 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* Copyright (C) 2009 embedded brains GmbH.
*
* Enhanced by Jay Kulpinski <jskulpin@eng01.gdds.com>
* to support 603, 603e, 604, 604e exceptions
*
* Moved to "libcpu/powerpc/new-exceptions" and consolidated
* by Thomas Doerfler <Thomas.Doerfler@embedded-brains.de>
* to be common for all PPCs with new exceptions.
*
* Derived from file "libcpu/powerpc/new-exceptions/raw_exception.c".
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems.h>
#include <bsp/vectors.h>
/*
* XXX: These values are choosen to directly generate the vector offsets for an
* e200z1 which has hard wired IVORs (IVOR0=0x00, IVOR1=0x10, IVOR2=0x20, ...).
*/
static const uint8_t ivor_values [] = {
[ASM_BOOKE_CRIT_VECTOR] = 0,
[ASM_MACH_VECTOR] = 1,
[ASM_PROT_VECTOR] = 2,
[ASM_ISI_VECTOR] = 3,
[ASM_EXT_VECTOR] = 4,
[ASM_ALIGN_VECTOR] = 5,
[ASM_PROG_VECTOR] = 6,
[ASM_FLOAT_VECTOR] = 7,
[ASM_SYS_VECTOR] = 8,
[ASM_BOOKE_APU_VECTOR] = 9,
[ASM_BOOKE_DEC_VECTOR] = 10,
[ASM_BOOKE_FIT_VECTOR] = 11,
[ASM_BOOKE_WDOG_VECTOR] = 12,
[ASM_BOOKE_DTLBMISS_VECTOR] = 13,
[ASM_BOOKE_ITLBMISS_VECTOR] = 14,
[ASM_BOOKE_DEBUG_VECTOR] = 15,
[ASM_E500_SPE_UNAVAILABLE_VECTOR] = 16,
[ASM_E500_EMB_FP_DATA_VECTOR] = 17,
[ASM_E500_EMB_FP_ROUND_VECTOR] = 18,
[ASM_E500_PERFMON_VECTOR] = 19
};
void *ppc_exc_vector_address(unsigned vector, void *vector_base)
{
uintptr_t vector_offset = vector << 8;
if (ppc_cpu_has_altivec()) {
if (vector == ASM_60X_VEC_VECTOR) {
vector_offset = ASM_60X_VEC_VECTOR_OFFSET;
}
}
if (ppc_cpu_is(PPC_405)) {
switch (vector) {
case ASM_BOOKE_FIT_VECTOR:
vector_offset = ASM_PPC405_FIT_VECTOR_OFFSET;
break;
case ASM_BOOKE_WDOG_VECTOR:
vector_offset = ASM_PPC405_WDOG_VECTOR_OFFSET;
break;
case ASM_TRACE_VECTOR:
vector_offset = ASM_PPC405_TRACE_VECTOR_OFFSET;
break;
case ASM_PPC405_APU_UNAVAIL_VECTOR:
vector_offset = ASM_60X_VEC_VECTOR_OFFSET;
default:
break;
}
}
if (
ppc_cpu_is_bookE() == PPC_BOOKE_STD
|| ppc_cpu_is_bookE() == PPC_BOOKE_E500
) {
if (vector < sizeof(ivor_values) / sizeof(ivor_values [0])) {
vector_offset = ((uintptr_t) ivor_values [vector]) << 4;
} else {
vector_offset = 0;
}
}
return (void *) ((uintptr_t) vector_base + vector_offset);
}

View File

@@ -1,43 +0,0 @@
/*
* Copyright (c) 2011 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Obere Lagerstr. 30
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems.h>
#include <bsp/vectors.h>
int ppc_exc_alignment_handler(BSP_Exception_frame *frame, unsigned excNum)
{
unsigned opcode = *(unsigned *) frame->EXC_SRR0;
/* Do we have a dcbz instruction? */
if ((opcode & 0xffe007ff) == 0x7c0007ec) {
unsigned clsz = rtems_cache_get_data_line_size();
unsigned a = (opcode >> 16) & 0x1f;
unsigned b = (opcode >> 11) & 0x1f;
PPC_GPR_TYPE *regs = &frame->GPR0;
unsigned *current = (unsigned *)
(((a == 0 ? 0 : (unsigned) regs[a]) + (unsigned) regs[b]) & (clsz - 1));
unsigned *end = current + clsz / sizeof(*current);
while (current != end) {
*current = 0;
++current;
}
frame->EXC_SRR0 += 4;
return 0;
} else {
return -1;
}
}

View File

@@ -1,471 +0,0 @@
/*
* Copyright (c) 2011, 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <bspopts.h>
#include <rtems/score/percpu.h>
#include <bsp/vectors.h>
#ifdef PPC_EXC_CONFIG_USE_FIXED_HANDLER
#define SCRATCH_0_REGISTER r0
#define SCRATCH_1_REGISTER r3
#define SCRATCH_2_REGISTER r4
#define SCRATCH_3_REGISTER r5
#define SCRATCH_4_REGISTER r6
#define SCRATCH_5_REGISTER r7
#define SCRATCH_6_REGISTER r8
#define SCRATCH_7_REGISTER r9
#define SCRATCH_8_REGISTER r10
#define SCRATCH_9_REGISTER r11
#define SCRATCH_10_REGISTER r12
#define FRAME_REGISTER r14
#define SCRATCH_0_OFFSET GPR0_OFFSET
#define SCRATCH_1_OFFSET GPR3_OFFSET
#define SCRATCH_2_OFFSET GPR4_OFFSET
#define SCRATCH_3_OFFSET GPR5_OFFSET
#define SCRATCH_4_OFFSET GPR6_OFFSET
#define SCRATCH_5_OFFSET GPR7_OFFSET
#define SCRATCH_6_OFFSET GPR8_OFFSET
#define SCRATCH_7_OFFSET GPR9_OFFSET
#define SCRATCH_8_OFFSET GPR10_OFFSET
#define SCRATCH_9_OFFSET GPR11_OFFSET
#define SCRATCH_10_OFFSET GPR12_OFFSET
#define FRAME_OFFSET PPC_EXC_INTERRUPT_FRAME_OFFSET
#ifdef RTEMS_PROFILING
.macro GET_TIME_BASE REG
#if defined(__PPC_CPU_E6500__)
mfspr \REG, FSL_EIS_ATBL
#elif defined(ppc8540)
mfspr \REG, TBRL
#else /* ppc8540 */
mftb \REG
#endif /* ppc8540 */
.endm
#endif /* RTEMS_PROFILING */
.global ppc_exc_min_prolog_async_tmpl_normal
.global ppc_exc_interrupt
ppc_exc_min_prolog_async_tmpl_normal:
stwu r1, -PPC_EXC_INTERRUPT_FRAME_SIZE(r1)
PPC_REG_STORE SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
li SCRATCH_1_REGISTER, 0xffff8000
/*
* We store the absolute branch target address here. It will be used
* to generate the branch operation in ppc_exc_make_prologue().
*/
.int ppc_exc_interrupt
ppc_exc_interrupt:
/* Save non-volatile FRAME_REGISTER */
PPC_REG_STORE FRAME_REGISTER, FRAME_OFFSET(r1)
#ifdef RTEMS_PROFILING
/* Get entry instant */
GET_TIME_BASE FRAME_REGISTER
stw FRAME_REGISTER, PPC_EXC_INTERRUPT_ENTRY_INSTANT_OFFSET(r1)
#endif /* RTEMS_PROFILING */
#ifdef __SPE__
/* Enable SPE */
mfmsr FRAME_REGISTER
oris FRAME_REGISTER, FRAME_REGISTER, MSR_SPE >> 16
mtmsr FRAME_REGISTER
isync
/*
* Save high order part of SCRATCH_1_REGISTER here. The low order part
* was saved in the minimal prologue.
*/
evmergehi SCRATCH_1_REGISTER, SCRATCH_1_REGISTER, FRAME_REGISTER
PPC_REG_STORE FRAME_REGISTER, GPR3_OFFSET(r1)
#endif
#if defined(PPC_MULTILIB_FPU) || defined(PPC_MULTILIB_ALTIVEC)
/* Enable FPU and/or AltiVec */
mfmsr FRAME_REGISTER
#ifdef PPC_MULTILIB_FPU
ori FRAME_REGISTER, FRAME_REGISTER, MSR_FP
#endif
#ifdef PPC_MULTILIB_ALTIVEC
oris FRAME_REGISTER, FRAME_REGISTER, MSR_VE >> 16
#endif
mtmsr FRAME_REGISTER
isync
#endif
/* Move frame pointer to non-volatile FRAME_REGISTER */
mr FRAME_REGISTER, r1
/*
* Save volatile registers. The SCRATCH_1_REGISTER has been saved in
* minimum prologue.
*/
PPC_GPR_STORE SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
#ifdef __powerpc64__
PPC_GPR_STORE r2, GPR2_OFFSET(r1)
LA32 r2, .TOC.
#endif
PPC_GPR_STORE SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
GET_SELF_CPU_CONTROL SCRATCH_2_REGISTER
PPC_GPR_STORE SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
PPC_GPR_STORE SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
PPC_GPR_STORE SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
PPC_GPR_STORE SCRATCH_6_REGISTER, SCRATCH_6_OFFSET(r1)
PPC_GPR_STORE SCRATCH_7_REGISTER, SCRATCH_7_OFFSET(r1)
PPC_GPR_STORE SCRATCH_8_REGISTER, SCRATCH_8_OFFSET(r1)
PPC_GPR_STORE SCRATCH_9_REGISTER, SCRATCH_9_OFFSET(r1)
PPC_GPR_STORE SCRATCH_10_REGISTER, SCRATCH_10_OFFSET(r1)
/* Load ISR nest level and thread dispatch disable level */
lwz SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_2_REGISTER)
lwz SCRATCH_4_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_2_REGISTER)
/* Save SRR0, SRR1, CR, XER, CTR, and LR */
mfsrr0 SCRATCH_0_REGISTER
mfsrr1 SCRATCH_5_REGISTER
mfcr SCRATCH_6_REGISTER
mfxer SCRATCH_7_REGISTER
mfctr SCRATCH_8_REGISTER
mflr SCRATCH_9_REGISTER
PPC_REG_STORE SCRATCH_0_REGISTER, SRR0_FRAME_OFFSET(r1)
PPC_REG_STORE SCRATCH_5_REGISTER, SRR1_FRAME_OFFSET(r1)
stw SCRATCH_6_REGISTER, EXC_CR_OFFSET(r1)
stw SCRATCH_7_REGISTER, EXC_XER_OFFSET(r1)
PPC_REG_STORE SCRATCH_8_REGISTER, EXC_CTR_OFFSET(r1)
PPC_REG_STORE SCRATCH_9_REGISTER, EXC_LR_OFFSET(r1)
#ifdef __SPE__
/* Save SPEFSCR and ACC */
mfspr SCRATCH_0_REGISTER, FSL_EIS_SPEFSCR
evxor SCRATCH_5_REGISTER, SCRATCH_5_REGISTER, SCRATCH_5_REGISTER
evmwumiaa SCRATCH_5_REGISTER, SCRATCH_5_REGISTER, SCRATCH_5_REGISTER
stw SCRATCH_0_REGISTER, PPC_EXC_SPEFSCR_OFFSET(r1)
evstdd SCRATCH_5_REGISTER, PPC_EXC_ACC_OFFSET(r1)
#endif
#ifdef PPC_MULTILIB_ALTIVEC
/* Save volatile AltiVec context */
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(0)
stvx v0, r1, SCRATCH_0_REGISTER
mfvscr v0
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(1)
stvx v1, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(2)
stvx v2, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(3)
stvx v3, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(4)
stvx v4, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(5)
stvx v5, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(6)
stvx v6, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(7)
stvx v7, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(8)
stvx v8, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(9)
stvx v9, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(10)
stvx v10, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(11)
stvx v11, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(12)
stvx v12, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(13)
stvx v13, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(14)
stvx v14, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(15)
stvx v15, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(16)
stvx v16, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(17)
stvx v17, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(18)
stvx v18, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(19)
stvx v19, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VSCR_OFFSET
stvewx v0, r1, SCRATCH_0_REGISTER
#endif
#ifdef PPC_MULTILIB_FPU
/* Save volatile FPU context */
stfd f0, PPC_EXC_MIN_FR_OFFSET(0)(r1)
mffs f0
stfd f1, PPC_EXC_MIN_FR_OFFSET(1)(r1)
stfd f2, PPC_EXC_MIN_FR_OFFSET(2)(r1)
stfd f3, PPC_EXC_MIN_FR_OFFSET(3)(r1)
stfd f4, PPC_EXC_MIN_FR_OFFSET(4)(r1)
stfd f5, PPC_EXC_MIN_FR_OFFSET(5)(r1)
stfd f6, PPC_EXC_MIN_FR_OFFSET(6)(r1)
stfd f7, PPC_EXC_MIN_FR_OFFSET(7)(r1)
stfd f8, PPC_EXC_MIN_FR_OFFSET(8)(r1)
stfd f9, PPC_EXC_MIN_FR_OFFSET(9)(r1)
stfd f10, PPC_EXC_MIN_FR_OFFSET(10)(r1)
stfd f11, PPC_EXC_MIN_FR_OFFSET(11)(r1)
stfd f12, PPC_EXC_MIN_FR_OFFSET(12)(r1)
stfd f13, PPC_EXC_MIN_FR_OFFSET(13)(r1)
stfd f0, PPC_EXC_MIN_FPSCR_OFFSET(r1)
#endif
/* Increment ISR nest level and thread dispatch disable level */
cmpwi SCRATCH_3_REGISTER, 0
#ifdef RTEMS_PROFILING
cmpwi cr2, SCRATCH_3_REGISTER, 0
#endif
addi SCRATCH_3_REGISTER, SCRATCH_3_REGISTER, 1
addi SCRATCH_4_REGISTER, SCRATCH_4_REGISTER, 1
stw SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_2_REGISTER)
stw SCRATCH_4_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_2_REGISTER)
/* Switch stack if necessary */
mfspr SCRATCH_0_REGISTER, SPRG1
iselgt r1, r1, SCRATCH_0_REGISTER
/* Call fixed high level handler */
bl bsp_interrupt_dispatch
PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
#ifdef RTEMS_PROFILING
/* Update profiling data if necessary */
bne cr2, .Lprofiling_done
GET_SELF_CPU_CONTROL r3
lwz r4, PPC_EXC_INTERRUPT_ENTRY_INSTANT_OFFSET(FRAME_REGISTER)
GET_TIME_BASE r5
bl _Profiling_Outer_most_interrupt_entry_and_exit
PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
.Lprofiling_done:
#endif /* RTEMS_PROFILING */
/* Load some per-CPU variables */
GET_SELF_CPU_CONTROL SCRATCH_1_REGISTER
lbz SCRATCH_0_REGISTER, PER_CPU_DISPATCH_NEEDED(SCRATCH_1_REGISTER)
lwz SCRATCH_5_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SCRATCH_1_REGISTER)
lwz SCRATCH_6_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_1_REGISTER)
lwz SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_1_REGISTER)
/*
* Switch back to original stack (FRAME_REGISTER == r1 if we are still
* on the IRQ stack) and restore FRAME_REGISTER.
*/
mr r1, FRAME_REGISTER
PPC_REG_LOAD FRAME_REGISTER, FRAME_OFFSET(r1)
/* Decrement levels and determine thread dispatch state */
xori SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, 1
or SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, SCRATCH_5_REGISTER
subi SCRATCH_4_REGISTER, SCRATCH_6_REGISTER, 1
or. SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, SCRATCH_4_REGISTER
subi SCRATCH_3_REGISTER, SCRATCH_3_REGISTER, 1
/* Store thread dispatch disable and ISR nest levels */
stw SCRATCH_4_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_1_REGISTER)
stw SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_1_REGISTER)
/*
* Check thread dispatch necessary, ISR dispatch disable and thread
* dispatch disable level.
*/
bne .Lthread_dispatch_done
/* Thread dispatch */
.Ldo_thread_dispatch:
/* Set ISR dispatch disable and thread dispatch disable level to one */
li SCRATCH_0_REGISTER, 1
stw SCRATCH_0_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SCRATCH_1_REGISTER)
stw SCRATCH_0_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_1_REGISTER)
/*
* Call _Thread_Do_dispatch(), this function will enable interrupts.
* The r3 is SCRATCH_1_REGISTER.
*/
mfmsr r4
ori r4, r4, MSR_EE
bl _Thread_Do_dispatch
PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
/* Disable interrupts */
wrteei 0
/* SCRATCH_1_REGISTER is volatile, we must set it again */
GET_SELF_CPU_CONTROL SCRATCH_1_REGISTER
/* Check if we have to do the thread dispatch again */
lbz SCRATCH_0_REGISTER, PER_CPU_DISPATCH_NEEDED(SCRATCH_1_REGISTER)
cmpwi SCRATCH_0_REGISTER, 0
bne .Ldo_thread_dispatch
/* We are done with thread dispatching */
li SCRATCH_0_REGISTER, 0
stw SCRATCH_0_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SCRATCH_1_REGISTER)
.Lthread_dispatch_done:
#ifdef PPC_MULTILIB_ALTIVEC
/* Restore volatile AltiVec context */
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VSCR_OFFSET
lvewx v0, r1, SCRATCH_0_REGISTER
mtvscr v0
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(0)
lvx v0, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(1)
lvx v1, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(2)
lvx v2, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(3)
lvx v3, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(4)
lvx v4, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(5)
lvx v5, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(6)
lvx v6, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(7)
lvx v7, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(8)
lvx v8, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(9)
lvx v9, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(10)
lvx v10, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(11)
lvx v11, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(12)
lvx v12, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(13)
lvx v13, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(14)
lvx v14, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(15)
lvx v15, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(16)
lvx v16, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(17)
lvx v17, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(18)
lvx v18, r1, SCRATCH_0_REGISTER
li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(19)
lvx v19, r1, SCRATCH_0_REGISTER
#endif
#ifdef PPC_MULTILIB_FPU
/* Restore volatile FPU context */
lfd f0, PPC_EXC_MIN_FPSCR_OFFSET(r1)
mtfsf 0xff, f0
lfd f0, PPC_EXC_MIN_FR_OFFSET(0)(r1)
lfd f1, PPC_EXC_MIN_FR_OFFSET(1)(r1)
lfd f2, PPC_EXC_MIN_FR_OFFSET(2)(r1)
lfd f3, PPC_EXC_MIN_FR_OFFSET(3)(r1)
lfd f4, PPC_EXC_MIN_FR_OFFSET(4)(r1)
lfd f5, PPC_EXC_MIN_FR_OFFSET(5)(r1)
lfd f6, PPC_EXC_MIN_FR_OFFSET(6)(r1)
lfd f7, PPC_EXC_MIN_FR_OFFSET(7)(r1)
lfd f8, PPC_EXC_MIN_FR_OFFSET(8)(r1)
lfd f9, PPC_EXC_MIN_FR_OFFSET(9)(r1)
lfd f10, PPC_EXC_MIN_FR_OFFSET(10)(r1)
lfd f11, PPC_EXC_MIN_FR_OFFSET(11)(r1)
lfd f12, PPC_EXC_MIN_FR_OFFSET(12)(r1)
lfd f13, PPC_EXC_MIN_FR_OFFSET(13)(r1)
#endif
#ifdef __SPE__
/* Load SPEFSCR and ACC */
lwz SCRATCH_3_REGISTER, PPC_EXC_SPEFSCR_OFFSET(r1)
evldd SCRATCH_4_REGISTER, PPC_EXC_ACC_OFFSET(r1)
#endif
/*
* We must clear reservations here, since otherwise compare-and-swap
* atomic operations with interrupts enabled may yield wrong results.
* A compare-and-swap atomic operation is generated by the compiler
* like this:
*
* .L1:
* lwarx r9, r0, r3
* cmpw r9, r4
* bne- .L2
* stwcx. r5, r0, r3
* bne- .L1
* .L2:
*
* Consider the following scenario. A thread is interrupted right
* before the stwcx. The interrupt updates the value using a
* compare-and-swap sequence. Everything is fine up to this point.
* The interrupt performs now a compare-and-swap sequence which fails
* with a branch to .L2. The current processor has now a reservation.
* The interrupt returns without further stwcx. The thread updates the
* value using the unrelated reservation of the interrupt.
*/
li SCRATCH_0_REGISTER, FRAME_OFFSET
stwcx. SCRATCH_0_REGISTER, r1, SCRATCH_0_REGISTER
/* Load SRR0, SRR1, CR, XER, CTR, and LR */
PPC_REG_LOAD SCRATCH_5_REGISTER, SRR0_FRAME_OFFSET(r1)
PPC_REG_LOAD SCRATCH_6_REGISTER, SRR1_FRAME_OFFSET(r1)
lwz SCRATCH_7_REGISTER, EXC_CR_OFFSET(r1)
lwz SCRATCH_8_REGISTER, EXC_XER_OFFSET(r1)
PPC_REG_LOAD SCRATCH_9_REGISTER, EXC_CTR_OFFSET(r1)
PPC_REG_LOAD SCRATCH_10_REGISTER, EXC_LR_OFFSET(r1)
/* Restore volatile registers */
PPC_GPR_LOAD SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
#ifdef __powerpc64__
PPC_GPR_LOAD r2, GPR2_OFFSET(r1)
#endif
PPC_GPR_LOAD SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
PPC_GPR_LOAD SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
#ifdef __SPE__
/* Restore SPEFSCR and ACC */
mtspr FSL_EIS_SPEFSCR, SCRATCH_3_REGISTER
evmra SCRATCH_4_REGISTER, SCRATCH_4_REGISTER
#endif
/* Restore volatile registers */
PPC_GPR_LOAD SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
PPC_GPR_LOAD SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
/* Restore SRR0, SRR1, CR, CTR, XER, and LR plus volatile registers */
mtsrr0 SCRATCH_5_REGISTER
PPC_GPR_LOAD SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
mtsrr1 SCRATCH_6_REGISTER
PPC_GPR_LOAD SCRATCH_6_REGISTER, SCRATCH_6_OFFSET(r1)
mtcr SCRATCH_7_REGISTER
PPC_GPR_LOAD SCRATCH_7_REGISTER, SCRATCH_7_OFFSET(r1)
mtxer SCRATCH_8_REGISTER
PPC_GPR_LOAD SCRATCH_8_REGISTER, SCRATCH_8_OFFSET(r1)
mtctr SCRATCH_9_REGISTER
PPC_GPR_LOAD SCRATCH_9_REGISTER, SCRATCH_9_OFFSET(r1)
mtlr SCRATCH_10_REGISTER
PPC_GPR_LOAD SCRATCH_10_REGISTER, SCRATCH_10_OFFSET(r1)
/* Pop stack */
addi r1, r1, PPC_EXC_INTERRUPT_FRAME_SIZE
/* Return */
rfi
/* Symbol provided for debugging and tracing */
ppc_exc_interrupt_end:
#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */

View File

@@ -1,325 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* Copyright (C) 2009-2011 embedded brains GmbH.
*
* Enhanced by Jay Kulpinski <jskulpin@eng01.gdds.com>
* to support 603, 603e, 604, 604e exceptions
*
* Moved to "libcpu/powerpc/new-exceptions" and consolidated
* by Thomas Doerfler <Thomas.Doerfler@embedded-brains.de>
* to be common for all PPCs with new exceptions.
*
* Derived from file "libcpu/powerpc/new-exceptions/raw_exception.c".
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <bsp/vectors.h>
#define PPC_BASIC_VECS_WO_SYS \
[ASM_RESET_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_MACH_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, \
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, \
[ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, \
[ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC
#define PPC_BASIC_VECS \
PPC_BASIC_VECS_WO_SYS, \
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC
static const ppc_exc_categories ppc_405_category_table = {
[ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_405_CRITICAL | PPC_EXC_ASYNC,
[ASM_MACH_VECTOR] = PPC_EXC_405_CRITICAL,
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PPC405_APU_UNAVAIL_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_405_CRITICAL | PPC_EXC_ASYNC,
[ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_TRACE_VECTOR] = PPC_EXC_405_CRITICAL,
};
static const ppc_exc_categories ppc_booke_category_table = {
[ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_MACH_VECTOR] = PPC_EXC_E500_MACHCHK,
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_APU_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
};
static const ppc_exc_categories mpc_5xx_category_table = {
[ASM_RESET_VECTOR] = PPC_EXC_CLASSIC,
[ASM_MACH_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_FLOATASSIST_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_SOFTEMUL_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_IPROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_DPROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_DBREAK_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_IBREAK_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_MEBREAK_VECTOR] = PPC_EXC_CLASSIC,
[ASM_5XX_NMEBREAK_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories mpc_603_category_table = {
PPC_BASIC_VECS,
[ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_60X_IMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories mpc_604_category_table = {
PPC_BASIC_VECS,
[ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories mpc_604_altivec_category_table = {
PPC_BASIC_VECS,
[ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories mpc_750_category_table = {
PPC_BASIC_VECS,
[ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_ITM_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories mpc_750_altivec_category_table = {
PPC_BASIC_VECS,
[ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_ITM_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories mpc_860_category_table = {
PPC_BASIC_VECS,
[ASM_8XX_FLOATASSIST_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_SOFTEMUL_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_ITLBERROR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_DTLBERROR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_DBREAK_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_IBREAK_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_PERIFBREAK_VECTOR] = PPC_EXC_CLASSIC,
[ASM_8XX_DEVPORT_VECTOR] = PPC_EXC_CLASSIC,
};
static const ppc_exc_categories e300_category_table = {
[ASM_RESET_VECTOR] = PPC_EXC_CLASSIC,
[ASM_MACH_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_NAKED,
[ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_E300_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_IMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_ADDR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
};
static const ppc_exc_categories e200_category_table = {
[ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_MACH_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
/* FIXME: Depending on HDI0 [DAPUEN] this is a critical or debug exception */
[ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
[ASM_E500_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E500_EMB_FP_DATA_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E500_EMB_FP_ROUND_VECTOR] = PPC_EXC_CLASSIC
};
static const ppc_exc_categories e500_category_table = {
[ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_MACH_VECTOR] = PPC_EXC_E500_MACHCHK,
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_APU_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
[ASM_E500_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E500_EMB_FP_DATA_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E500_EMB_FP_ROUND_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E500_PERFMON_VECTOR] = PPC_EXC_CLASSIC
};
static const ppc_exc_categories psim_category_table = {
PPC_BASIC_VECS_WO_SYS,
[ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_60X_IMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC,
[ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC,
};
const ppc_exc_categories *ppc_exc_categories_for_cpu(ppc_cpu_id_t cpu)
{
if (ppc_cpu_has_altivec()) {
switch (cpu) {
case PPC_7400:
return &mpc_750_altivec_category_table;
case PPC_7455:
case PPC_7457:
return &mpc_604_altivec_category_table;
default:
break;
}
}
switch (cpu) {
case PPC_7400:
case PPC_750:
case PPC_750_IBM:
return &mpc_750_category_table;
case PPC_7455:
case PPC_7457:
case PPC_604:
case PPC_604e:
case PPC_604r:
return &mpc_604_category_table;
case PPC_603:
case PPC_603e:
case PPC_603le:
case PPC_603ev:
/* case PPC_8240: same value as 8260 */
case PPC_8260:
case PPC_8245:
return &mpc_603_category_table;
case PPC_e300c1:
case PPC_e300c2:
case PPC_e300c3:
return &e300_category_table;
case PPC_PSIM:
return &psim_category_table;
case PPC_8540:
case PPC_e500v2:
case PPC_e6500:
return &e500_category_table;
case PPC_e200z0:
case PPC_e200z1:
case PPC_e200z4:
case PPC_e200z6:
case PPC_e200z7:
return &e200_category_table;
case PPC_5XX:
return &mpc_5xx_category_table;
case PPC_860:
return &mpc_860_category_table;
case PPC_405:
case PPC_405GP:
case PPC_405EX:
return &ppc_405_category_table;
case PPC_440:
return &ppc_booke_category_table;
default:
break;
}
return NULL;
}
ppc_exc_category ppc_exc_category_for_vector(const ppc_exc_categories *categories, unsigned vector)
{
if (vector <= LAST_VALID_EXC) {
return (*categories) [vector];
} else {
return PPC_EXC_INVALID;
}
}

View File

@@ -1,229 +0,0 @@
/*
* Copyright (c) 2011, 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <bspopts.h>
#include <rtems/score/percpu.h>
#include <bsp/vectors.h>
#define SCRATCH_REGISTER_0 r3
#define SCRATCH_REGISTER_1 r4
.global ppc_exc_fatal_normal
.global ppc_exc_fatal_critical
.global ppc_exc_fatal_machine_check
.global ppc_exc_fatal_debug
ppc_exc_fatal_critical:
PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
mfcsrr0 SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
mfcsrr1 SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
b .Lppc_exc_fatal
ppc_exc_fatal_machine_check:
PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
mfmcsrr0 SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
mfmcsrr1 SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
b .Lppc_exc_fatal
ppc_exc_fatal_debug:
PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
mfspr SCRATCH_REGISTER_1, BOOKE_DSRR0
PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
mfspr SCRATCH_REGISTER_1, BOOKE_DSRR1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
b .Lppc_exc_fatal
ppc_exc_fatal_normal:
PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
mfsrr0 SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
mfsrr1 SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
.Lppc_exc_fatal:
stw r3, EXCEPTION_NUMBER_OFFSET(r1)
mfcr SCRATCH_REGISTER_1
stw SCRATCH_REGISTER_1, EXC_CR_OFFSET(r1)
mfxer SCRATCH_REGISTER_1
stw SCRATCH_REGISTER_1, EXC_XER_OFFSET(r1)
mfctr SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, EXC_CTR_OFFSET(r1)
mflr SCRATCH_REGISTER_1
PPC_REG_STORE SCRATCH_REGISTER_1, EXC_LR_OFFSET(r1)
PPC_REG_STORE r0, GPR0_OFFSET(r1)
PPC_REG_STORE r1, GPR1_OFFSET(r1)
PPC_REG_STORE r2, GPR2_OFFSET(r1)
PPC_REG_STORE r5, GPR5_OFFSET(r1)
PPC_REG_STORE r6, GPR6_OFFSET(r1)
PPC_REG_STORE r7, GPR7_OFFSET(r1)
PPC_REG_STORE r8, GPR8_OFFSET(r1)
PPC_REG_STORE r9, GPR9_OFFSET(r1)
PPC_REG_STORE r10, GPR10_OFFSET(r1)
PPC_REG_STORE r11, GPR11_OFFSET(r1)
PPC_REG_STORE r12, GPR12_OFFSET(r1)
PPC_REG_STORE r13, GPR13_OFFSET(r1)
PPC_REG_STORE r14, GPR14_OFFSET(r1)
PPC_REG_STORE r15, GPR15_OFFSET(r1)
PPC_REG_STORE r16, GPR16_OFFSET(r1)
PPC_REG_STORE r17, GPR17_OFFSET(r1)
PPC_REG_STORE r18, GPR18_OFFSET(r1)
PPC_REG_STORE r19, GPR19_OFFSET(r1)
PPC_REG_STORE r20, GPR20_OFFSET(r1)
PPC_REG_STORE r21, GPR21_OFFSET(r1)
PPC_REG_STORE r22, GPR22_OFFSET(r1)
PPC_REG_STORE r23, GPR23_OFFSET(r1)
PPC_REG_STORE r24, GPR24_OFFSET(r1)
PPC_REG_STORE r25, GPR25_OFFSET(r1)
PPC_REG_STORE r26, GPR26_OFFSET(r1)
PPC_REG_STORE r27, GPR27_OFFSET(r1)
PPC_REG_STORE r28, GPR28_OFFSET(r1)
PPC_REG_STORE r29, GPR29_OFFSET(r1)
PPC_REG_STORE r30, GPR30_OFFSET(r1)
PPC_REG_STORE r31, GPR31_OFFSET(r1)
/* Enable FPU and/or AltiVec */
#if defined(PPC_MULTILIB_FPU) || defined(PPC_MULTILIB_ALTIVEC)
mfmsr SCRATCH_REGISTER_1
#ifdef PPC_MULTILIB_FPU
ori SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, MSR_FP
#endif
#ifdef PPC_MULTILIB_ALTIVEC
oris SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, MSR_VE >> 16
#endif
mtmsr SCRATCH_REGISTER_1
isync
#endif
#ifdef PPC_MULTILIB_ALTIVEC
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(0)
stvx v0, r1, SCRATCH_REGISTER_1
mfvscr v0
li SCRATCH_REGISTER_1, PPC_EXC_VSCR_OFFSET
stvewx v0, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(1)
stvx v1, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(2)
stvx v2, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(3)
stvx v3, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(4)
stvx v4, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(5)
stvx v5, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(6)
stvx v6, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(7)
stvx v7, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(8)
stvx v8, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(9)
stvx v9, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(10)
stvx v10, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(11)
stvx v11, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(12)
stvx v12, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(13)
stvx v13, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(14)
stvx v14, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(15)
stvx v15, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(16)
stvx v16, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(17)
stvx v17, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(18)
stvx v18, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(19)
stvx v19, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(20)
stvx v20, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(21)
stvx v21, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(22)
stvx v22, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(23)
stvx v23, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(24)
stvx v24, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(25)
stvx v25, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(26)
stvx v26, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(27)
stvx v27, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(28)
stvx v28, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(29)
stvx v29, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(30)
stvx v30, r1, SCRATCH_REGISTER_1
li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(31)
stvx v31, r1, SCRATCH_REGISTER_1
mfvrsave SCRATCH_REGISTER_1
stw SCRATCH_REGISTER_1, PPC_EXC_VRSAVE_OFFSET(r1)
#endif
#ifdef PPC_MULTILIB_FPU
stfd f0, PPC_EXC_FR_OFFSET(0)(r1)
mffs f0
stfd f0, PPC_EXC_FPSCR_OFFSET(r1)
stfd f1, PPC_EXC_FR_OFFSET(1)(r1)
stfd f2, PPC_EXC_FR_OFFSET(2)(r1)
stfd f3, PPC_EXC_FR_OFFSET(3)(r1)
stfd f4, PPC_EXC_FR_OFFSET(4)(r1)
stfd f5, PPC_EXC_FR_OFFSET(5)(r1)
stfd f6, PPC_EXC_FR_OFFSET(6)(r1)
stfd f7, PPC_EXC_FR_OFFSET(7)(r1)
stfd f8, PPC_EXC_FR_OFFSET(8)(r1)
stfd f9, PPC_EXC_FR_OFFSET(9)(r1)
stfd f10, PPC_EXC_FR_OFFSET(10)(r1)
stfd f11, PPC_EXC_FR_OFFSET(11)(r1)
stfd f12, PPC_EXC_FR_OFFSET(12)(r1)
stfd f13, PPC_EXC_FR_OFFSET(13)(r1)
stfd f14, PPC_EXC_FR_OFFSET(14)(r1)
stfd f15, PPC_EXC_FR_OFFSET(15)(r1)
stfd f16, PPC_EXC_FR_OFFSET(16)(r1)
stfd f17, PPC_EXC_FR_OFFSET(17)(r1)
stfd f18, PPC_EXC_FR_OFFSET(18)(r1)
stfd f19, PPC_EXC_FR_OFFSET(19)(r1)
stfd f20, PPC_EXC_FR_OFFSET(20)(r1)
stfd f21, PPC_EXC_FR_OFFSET(21)(r1)
stfd f22, PPC_EXC_FR_OFFSET(22)(r1)
stfd f23, PPC_EXC_FR_OFFSET(23)(r1)
stfd f24, PPC_EXC_FR_OFFSET(24)(r1)
stfd f25, PPC_EXC_FR_OFFSET(25)(r1)
stfd f26, PPC_EXC_FR_OFFSET(26)(r1)
stfd f27, PPC_EXC_FR_OFFSET(27)(r1)
stfd f28, PPC_EXC_FR_OFFSET(28)(r1)
stfd f29, PPC_EXC_FR_OFFSET(29)(r1)
stfd f30, PPC_EXC_FR_OFFSET(30)(r1)
stfd f31, PPC_EXC_FR_OFFSET(31)(r1)
#endif
li r3, 9
addi r4, r1, FRAME_LINK_SPACE
b _Terminate
PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE

View File

@@ -1,28 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <bsp/vectors.h>
void C_exception_handler(BSP_Exception_frame *excPtr)
{
rtems_fatal(
RTEMS_FATAL_SOURCE_EXCEPTION,
(rtems_fatal_code) excPtr
);
}

View File

@@ -1,116 +0,0 @@
/* PowerPC exception handling middleware; consult README for more
* information.
*
* Author: Till Straumann <strauman@slac.stanford.edu>, 2007
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <bsp/vectors.h>
#include <rtems/score/threaddispatch.h>
/* Provide temp. storage space for a few registers.
* This is used by the assembly code prior to setting up
* the stack.
* One set is needed for each exception type with its
* own SRR0/SRR1 pair since such exceptions may nest.
*
* NOTE: The assembly code needs these variables to
* be in the .sdata section and accesses them
* via R13.
*/
uint32_t ppc_exc_lock_std = 0;
uint32_t ppc_exc_lock_crit = 0;
uint32_t ppc_exc_lock_mchk = 0;
uint32_t ppc_exc_vector_register_std = 0;
uint32_t ppc_exc_vector_register_crit = 0;
uint32_t ppc_exc_vector_register_mchk = 0;
#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
/* MSR bits to enable once critical status info is saved and the stack
* is switched; must be set depending on CPU type
*
* Default is set here for classic PPC CPUs with a MMU
* but is overridden from vectors_init.c
*/
uint32_t ppc_exc_msr_bits = MSR_IR | MSR_DR | MSR_RI;
#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
int ppc_exc_handler_default(BSP_Exception_frame *f, unsigned int vector)
{
return -1;
}
#ifndef PPC_EXC_CONFIG_USE_FIXED_HANDLER
exception_handler_t globalExceptHdl = C_exception_handler;
/* Table of C-handlers */
ppc_exc_handler_t ppc_exc_handler_table [LAST_VALID_EXC + 1] = {
[0 ... LAST_VALID_EXC] = ppc_exc_handler_default
};
#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
ppc_exc_handler_t ppc_exc_get_handler(unsigned vector)
{
if (
vector <= LAST_VALID_EXC
&& ppc_exc_handler_table [vector] != ppc_exc_handler_default
) {
return ppc_exc_handler_table [vector];
} else {
return NULL;
}
}
rtems_status_code ppc_exc_set_handler(unsigned vector, ppc_exc_handler_t handler)
{
if (vector <= LAST_VALID_EXC) {
if (handler == NULL) {
handler = ppc_exc_handler_default;
}
if (ppc_exc_handler_table [vector] != handler) {
#ifndef PPC_EXC_CONFIG_USE_FIXED_HANDLER
ppc_exc_handler_table [vector] = handler;
#else /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
return RTEMS_RESOURCE_IN_USE;
#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
}
return RTEMS_SUCCESSFUL;
} else {
return RTEMS_INVALID_ID;
}
}
void ppc_exc_wrapup(BSP_Exception_frame *frame)
{
Per_CPU_Control *cpu_self;
cpu_self = _Per_CPU_Get();
if (cpu_self->isr_dispatch_disable) {
return;
}
while (cpu_self->dispatch_necessary) {
rtems_interrupt_level level;
cpu_self->isr_dispatch_disable = 1;
cpu_self->thread_dispatch_disable_level = 1;
_Thread_Do_dispatch(cpu_self, frame->EXC_SRR1);
rtems_interrupt_local_disable(level);
(void) level;
cpu_self = _Per_CPU_Get();
}
cpu_self->isr_dispatch_disable = 0;
}

View File

@@ -1,187 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
*
* Copyright (C) 2009-2012 embedded brains GmbH.
*
* Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
* Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems.h>
#include <bsp/vectors.h>
#include <bsp/fatal.h>
uint32_t ppc_exc_cache_wb_check = 1;
#define MTIVPR(prefix) __asm__ volatile ("mtivpr %0" : : "r" (prefix))
#define MTIVOR(x, vec) __asm__ volatile ("mtivor"#x" %0" : : "r" (vec))
static void ppc_exc_initialize_booke(void *vector_base)
{
/* Interupt vector prefix register */
MTIVPR((uint32_t) vector_base);
if (
ppc_cpu_is_specific_e200(PPC_e200z0)
|| ppc_cpu_is_specific_e200(PPC_e200z1)
) {
/*
* These cores have hard wired IVOR registers. An access will case a
* program exception.
*/
return;
}
/* Interupt vector offset registers */
MTIVOR(0, ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
MTIVOR(1, ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
MTIVOR(2, ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
MTIVOR(3, ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
MTIVOR(4, ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
MTIVOR(5, ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
MTIVOR(6, ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
MTIVOR(7, ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
MTIVOR(8, ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
MTIVOR(9, ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
}
if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
}
}
static void ppc_exc_fatal_error(void)
{
bsp_fatal(PPC_FATAL_EXCEPTION_INITIALIZATION);
}
void ppc_exc_initialize_with_vector_base(
uintptr_t interrupt_stack_begin,
uintptr_t interrupt_stack_size,
void *vector_base
)
{
rtems_status_code sc = RTEMS_SUCCESSFUL;
const ppc_exc_categories *const categories = ppc_exc_current_categories();
unsigned vector = 0;
uint32_t sda_base = 0;
uint32_t r13 = 0;
if (categories == NULL) {
ppc_exc_fatal_error();
}
/* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
* early init code put it there.
*/
__asm__ volatile (
"lis %0, _SDA_BASE_@h\n"
"ori %0, %0, _SDA_BASE_@l\n"
"mr %1, 13\n"
: "=r" (sda_base), "=r"(r13)
);
if (sda_base != r13) {
ppc_exc_fatal_error();
}
ppc_exc_initialize_interrupt_stack(interrupt_stack_begin, interrupt_stack_size);
#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
/* Use current MMU / RI settings when running C exception handlers */
ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);
#ifdef __ALTIVEC__
/* Need vector unit enabled to save/restore altivec context */
ppc_exc_msr_bits |= MSR_VE;
#endif
#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
ppc_exc_initialize_booke(vector_base);
}
for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);
if (category != PPC_EXC_INVALID) {
void *const vector_address = ppc_exc_vector_address(vector, vector_base);
uint32_t prologue [16];
size_t prologue_size = sizeof(prologue);
sc = ppc_exc_make_prologue(
vector,
vector_base,
category,
prologue,
&prologue_size
);
if (sc != RTEMS_SUCCESSFUL) {
ppc_exc_fatal_error();
}
ppc_code_copy(vector_address, prologue, prologue_size);
}
}
#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
/* If we are on a classic PPC with MSR_DR enabled then
* assert that the mapping for at least this task's
* stack is write-back-caching enabled (see README/CAVEATS)
* Do this only if the cache is physically enabled.
* Since it is not easy to figure that out in a
* generic way we need help from the BSP: BSPs
* which run entirely w/o the cache may set
* ppc_exc_cache_wb_check to zero prior to calling
* this routine.
*
* We run this check only after exception handling is
* initialized so that we have some chance to get
* information printed if it fails.
*
* Note that it is unsafe to ignore this issue; if
* the check fails, do NOT disable it unless caches
* are always physically disabled.
*/
if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
/* The size of 63 assumes cache lines are at most 32 bytes */
uint8_t dummy[63];
uintptr_t p = (uintptr_t) dummy;
/* If the dcbz instruction raises an alignment exception
* then the stack is mapped as write-thru or caching-disabled.
* The low-level code is not capable of dealing with this
* ATM.
*/
p = (p + 31U) & ~31U;
__asm__ volatile ("dcbz 0, %0"::"b" (p));
/* If we make it thru here then things seem to be OK */
}
#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
}

View File

@@ -1,207 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (c) 2009
* embedded brains GmbH
* Obere Lagerstr. 30
* D-82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include "ppc_exc_asm_macros.h"
.global ppc_exc_min_prolog_tmpl_naked
ppc_exc_min_prolog_tmpl_naked:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
li VECTOR_REGISTER, 0
/*
* We store the absolute branch target address here. It will be used
* to generate the branch operation in ppc_exc_make_prologue().
*/
.int ppc_exc_wrap_naked
.global ppc_exc_wrap_naked
ppc_exc_wrap_naked:
/* Save scratch registers */
stw SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(r1)
stw SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(r1)
stw SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(r1)
/* Save volatile registers */
stw r0, GPR0_OFFSET(r1)
stw r3, GPR3_OFFSET(r1)
stw r8, GPR8_OFFSET(r1)
stw r9, GPR9_OFFSET(r1)
stw r10, GPR10_OFFSET(r1)
stw r11, GPR11_OFFSET(r1)
stw r12, GPR12_OFFSET(r1)
/* Save CR */
mfcr SCRATCH_REGISTER_0
stw SCRATCH_REGISTER_0, EXC_CR_OFFSET(r1)
/* Save SRR0 */
mfspr SCRATCH_REGISTER_0, srr0
stw SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(r1)
/* Save SRR1 */
mfspr SCRATCH_REGISTER_0, srr1
stw SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(r1)
/* Save CTR */
mfctr SCRATCH_REGISTER_0
stw SCRATCH_REGISTER_0, EXC_CTR_OFFSET(r1)
/* Save XER */
mfxer SCRATCH_REGISTER_0
stw SCRATCH_REGISTER_0, EXC_XER_OFFSET(r1)
/* Save LR */
mflr SCRATCH_REGISTER_0
stw SCRATCH_REGISTER_0, EXC_LR_OFFSET(r1)
#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
/* Load MSR bit mask */
lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
/*
* Change the MSR if necessary (MMU, RI), remember decision in
* non-volatile CR_MSR.
*/
cmpwi CR_MSR, SCRATCH_REGISTER_0, 0
bne CR_MSR, wrap_change_msr_naked
wrap_change_msr_done_naked:
#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
/*
* Call high level exception handler
*/
/*
* Get the handler table index from the vector number. We have to
* discard the exception type. Take only the least significant five
* bits (= LAST_VALID_EXC + 1) from the vector register. Multiply by
* four (= size of function pointer).
*/
rlwinm SCRATCH_REGISTER_1, VECTOR_REGISTER, 2, 25, 29
/* Load handler table address */
LA SCRATCH_REGISTER_0, ppc_exc_handler_table
/* Load handler address */
lwzx SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
/*
* First parameter = exception frame pointer + FRAME_LINK_SPACE
*
* We add FRAME_LINK_SPACE to the frame pointer because the high level
* handler expects a BSP_Exception_frame structure.
*/
addi r3, r1, FRAME_LINK_SPACE
/*
* Second parameter = vector number (r4 is the VECTOR_REGISTER)
*
* Discard the exception type and store the vector number
* in the vector register. Take only the least significant
* five bits (= LAST_VALID_EXC + 1).
*/
rlwinm VECTOR_REGISTER, VECTOR_REGISTER, 0, 27, 31
/* Call handler */
mtctr SCRATCH_REGISTER_0
bctrl
#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
/* Restore MSR? */
bne CR_MSR, wrap_restore_msr_naked
wrap_restore_msr_done_naked:
#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
/* Restore XER and CTR */
lwz SCRATCH_REGISTER_0, EXC_XER_OFFSET(r1)
lwz SCRATCH_REGISTER_1, EXC_CTR_OFFSET(r1)
mtxer SCRATCH_REGISTER_0
mtctr SCRATCH_REGISTER_1
/* Restore CR and LR */
lwz SCRATCH_REGISTER_0, EXC_CR_OFFSET(r1)
lwz SCRATCH_REGISTER_1, EXC_LR_OFFSET(r1)
mtcr SCRATCH_REGISTER_0
mtlr SCRATCH_REGISTER_1
/* Restore volatile registers */
lwz r0, GPR0_OFFSET(r1)
lwz r3, GPR3_OFFSET(r1)
lwz r8, GPR8_OFFSET(r1)
lwz r9, GPR9_OFFSET(r1)
lwz r10, GPR10_OFFSET(r1)
lwz r11, GPR11_OFFSET(r1)
lwz r12, GPR12_OFFSET(r1)
/* Restore vector register */
lwz VECTOR_REGISTER, VECTOR_OFFSET(r1)
/* Restore scratch registers and SRRs */
lwz SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(r1)
lwz SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
lwz SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(r1)
mtspr srr0, SCRATCH_REGISTER_0
lwz SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(r1)
mtspr srr1, SCRATCH_REGISTER_1
lwz SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(r1)
/*
* We restore r1 from the frame rather than just popping (adding to
* current r1) since the exception handler might have done strange
* things (e.g. a debugger moving and relocating the stack).
*/
lwz r1, 0(r1)
/* Return */
rfi
#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
wrap_change_msr_naked:
mfmsr SCRATCH_REGISTER_1
or SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
mtmsr SCRATCH_REGISTER_1
sync
isync
b wrap_change_msr_done_naked
wrap_restore_msr_naked:
lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
mfmsr SCRATCH_REGISTER_1
andc SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
mtmsr SCRATCH_REGISTER_1
sync
isync
b wrap_restore_msr_done_naked
#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */

View File

@@ -1,236 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <bsp/vectors.h>
#include <rtems/score/threaddispatch.h>
#include <inttypes.h>
#ifndef __SPE__
#define GET_GPR(gpr) (gpr)
#else
#define GET_GPR(gpr) ((uintptr_t) ((gpr) >> 32))
#endif
/* T. Straumann: provide a stack trace
* <strauman@slac.stanford.edu>, 6/26/2001
*/
typedef struct LRFrameRec_ {
struct LRFrameRec_ *frameLink;
unsigned long *lr;
} LRFrameRec, *LRFrame;
#define STACK_CLAMP 50 /* in case we have a corrupted bottom */
static uint32_t ppc_exc_get_DAR_dflt(void)
{
if (ppc_cpu_is_60x())
return PPC_SPECIAL_PURPOSE_REGISTER(PPC_DAR);
else
switch (ppc_cpu_is_bookE()) {
default:
break;
case PPC_BOOKE_STD:
case PPC_BOOKE_E500:
return PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_DEAR);
case PPC_BOOKE_405:
return PPC_SPECIAL_PURPOSE_REGISTER(PPC405_DEAR);
}
return 0xdeadbeef;
}
uint32_t (*ppc_exc_get_DAR)(void) = ppc_exc_get_DAR_dflt;
void BSP_printStackTrace(const BSP_Exception_frame *excPtr)
{
LRFrame f;
int i;
LRFrame sp;
void *lr;
printk("Stack Trace: \n ");
if (excPtr) {
printk("IP: 0x%08" PRIxPTR ", ", excPtr->EXC_SRR0);
sp = (LRFrame) GET_GPR(excPtr->GPR1);
lr = (void *) excPtr->EXC_LR;
} else {
/* there's no macro for this */
__asm__ __volatile__("mr %0, 1":"=r"(sp));
lr = (LRFrame) ppc_link_register();
}
printk("LR: 0x%08" PRIxPTR "\n", (uintptr_t) lr);
for (f = (LRFrame) sp, i = 0; f->frameLink && i < STACK_CLAMP; f = f->frameLink) {
printk("--^ 0x%08" PRIxPTR "", (uintptr_t) (f->frameLink->lr));
if (!(++i % 5))
printk("\n");
}
if (i >= STACK_CLAMP) {
printk("Too many stack frames (stack possibly corrupted), giving up...\n");
} else {
if (i % 5)
printk("\n");
}
}
void _CPU_Exception_frame_print(const CPU_Exception_frame *excPtr)
{
const Thread_Control *executing = _Thread_Executing;
bool synch = (int) excPtr->_EXC_number >= 0;
unsigned n = excPtr->_EXC_number & 0x7fff;
printk("exception vector %d (0x%x)\n", n, n);
printk(" next PC or address of fault = 0x%08" PRIxPTR "\n", excPtr->EXC_SRR0);
printk(" saved MSR = 0x%08" PRIxPTR "\n", excPtr->EXC_SRR1);
/* Try to find out more about the context where this happened */
printk(
" context = %s, ISR nest level = %" PRIu32 "\n",
_ISR_Nest_level == 0 ? "task" : "interrupt",
_ISR_Nest_level
);
printk(
" thread dispatch disable level = %" PRIu32 "\n",
_Thread_Dispatch_disable_level
);
/* Dump registers */
printk(" R0 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR0));
if (synch) {
printk(" R1 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR1));
printk(" R2 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR2));
} else {
printk(" ");
printk(" ");
}
printk(" R3 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR3));
printk(" R4 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR4));
printk(" R5 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR5));
printk(" R6 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR6));
printk(" R7 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR7));
printk(" R8 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR8));
printk(" R9 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR9));
printk(" R10 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR10));
printk(" R11 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR11));
printk(" R12 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR12));
if (synch) {
printk(" R13 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR13));
printk(" R14 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR14));
printk(" R15 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR15));
printk(" R16 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR16));
printk(" R17 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR17));
printk(" R18 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR18));
printk(" R19 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR19));
printk(" R20 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR20));
printk(" R21 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR21));
printk(" R22 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR22));
printk(" R23 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR23));
printk(" R24 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR24));
printk(" R25 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR25));
printk(" R26 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR26));
printk(" R27 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR27));
printk(" R28 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR28));
printk(" R29 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR29));
printk(" R30 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR30));
printk(" R31 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR31));
} else {
printk("\n");
}
printk(" CR = 0x%08" PRIx32 "\n", excPtr->EXC_CR);
printk(" CTR = 0x%08" PRIxPTR "\n", excPtr->EXC_CTR);
printk(" XER = 0x%08" PRIx32 "\n", excPtr->EXC_XER);
printk(" LR = 0x%08" PRIxPTR "\n", excPtr->EXC_LR);
/* Would be great to print DAR but unfortunately,
* that is not portable across different CPUs.
* AFAIK on classic PPC DAR is SPR 19, on the
* 405 we have DEAR = SPR 0x3d5 and bookE says
* DEAR = SPR 61 :-(
*/
if (ppc_exc_get_DAR != NULL) {
char* reg = ppc_cpu_is_60x() ? " DAR" : "DEAR";
printk(" %s = 0x%08" PRIx32 "\n", reg, ppc_exc_get_DAR());
}
if (ppc_cpu_is_bookE()) {
unsigned esr, mcsr;
if (ppc_cpu_is_bookE() == PPC_BOOKE_405) {
esr = PPC_SPECIAL_PURPOSE_REGISTER(PPC405_ESR);
mcsr = PPC_SPECIAL_PURPOSE_REGISTER(PPC405_MCSR);
} else {
esr = PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_ESR);
mcsr = PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_MCSR);
}
printk(" ESR = 0x%08x\n", esr);
printk(" MCSR = 0x%08x\n", mcsr);
}
#ifdef PPC_MULTILIB_ALTIVEC
{
unsigned char *v = (unsigned char *) &excPtr->V0;
int i;
int j;
printk(" VSCR = 0x%08" PRIx32 "\n", excPtr->VSCR);
printk("VRSAVE = 0x%08" PRIx32 "\n", excPtr->VRSAVE);
for (i = 0; i < 32; ++i) {
printk(" V%02i = 0x", i);
for (j = 0; j < 16; ++j) {
printk("%02x", v[j]);
}
printk("\n");
v += 16;
}
}
#endif
#ifdef PPC_MULTILIB_FPU
{
uint64_t *f = (uint64_t *) &excPtr->F0;
int i;
printk("FPSCR = 0x%08" PRIu64 "\n", excPtr->FPSCR);
for (i = 0; i < 32; ++i) {
printk(" F%02i = 0x%016" PRIu64 "\n", i, f[i]);
}
}
#endif
if (executing != NULL) {
const char *name = (const char *) &executing->Object.name;
printk(
" executing thread ID = 0x%08" PRIx32 ", name = %c%c%c%c\n",
executing->Object.id,
name [0],
name [1],
name [2],
name [3]
);
} else {
printk(" executing thread pointer is NULL");
}
BSP_printStackTrace(excPtr);
}

View File

@@ -1,181 +0,0 @@
/**
* @file
*
* @ingroup ppc_exc
*
* @brief PowerPC Exceptions implementation.
*/
/*
* Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
*
* Copyright (C) 2009-2012 embedded brains GmbH.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <string.h>
#include <bsp/vectors.h>
/* Offset into minimal prolog where vector number is hardcoded */
#define PPC_EXC_PROLOG_VEC_OFFSET 2
/* Symbols are defined by the linker */
extern const char ppc_exc_min_prolog_size [];
extern const char ppc_exc_tgpr_clr_prolog_size [];
/* Special prologue for handling register shadowing on 603-style CPUs */
extern const uint32_t ppc_exc_tgpr_clr_prolog [];
/*
* Classic prologue which determines the vector dynamically from the offset
* address. This must only be used for classic, synchronous exceptions with a
* vector offset aligned on a 256-byte boundary.
*/
extern const uint32_t ppc_exc_min_prolog_auto [];
/* Minimal prologue templates */
extern const uint32_t ppc_exc_min_prolog_async_tmpl_std [];
extern const uint32_t ppc_exc_min_prolog_sync_tmpl_std [];
extern const uint32_t ppc_exc_min_prolog_async_tmpl_p405_crit [];
extern const uint32_t ppc_exc_min_prolog_sync_tmpl_p405_crit [];
extern const uint32_t ppc_exc_min_prolog_async_tmpl_bookE_crit [];
extern const uint32_t ppc_exc_min_prolog_sync_tmpl_bookE_crit [];
extern const uint32_t ppc_exc_min_prolog_sync_tmpl_e500_mchk [];
extern const uint32_t ppc_exc_min_prolog_async_tmpl_e500_mchk [];
extern const uint32_t ppc_exc_min_prolog_tmpl_naked [];
extern const uint32_t ppc_exc_min_prolog_async_tmpl_normal [];
static const uint32_t *const ppc_exc_prologue_templates [] = {
[PPC_EXC_CLASSIC] = ppc_exc_min_prolog_sync_tmpl_std,
[PPC_EXC_CLASSIC_ASYNC] = ppc_exc_min_prolog_async_tmpl_std,
[PPC_EXC_405_CRITICAL] = ppc_exc_min_prolog_sync_tmpl_p405_crit,
[PPC_EXC_405_CRITICAL_ASYNC] = ppc_exc_min_prolog_async_tmpl_p405_crit,
[PPC_EXC_BOOKE_CRITICAL] = ppc_exc_min_prolog_sync_tmpl_bookE_crit,
[PPC_EXC_BOOKE_CRITICAL_ASYNC] = ppc_exc_min_prolog_async_tmpl_bookE_crit,
[PPC_EXC_E500_MACHCHK] = ppc_exc_min_prolog_sync_tmpl_e500_mchk,
[PPC_EXC_E500_MACHCHK_ASYNC] = ppc_exc_min_prolog_async_tmpl_e500_mchk,
[PPC_EXC_NAKED] = ppc_exc_min_prolog_tmpl_naked
};
static bool ppc_exc_create_branch_op(
unsigned vector,
void *vector_base,
uint32_t *prologue,
size_t prologue_size
)
{
static const uintptr_t BRANCH_OP_CODE = 18 << 26;
/* static const uintptr_t BRANCH_OP_LINK = 0x1; */
static const uintptr_t BRANCH_OP_ABS = 0x2;
static const uintptr_t BRANCH_OP_MSK = 0x3ffffff;
size_t branch_op_index = prologue_size / 4 - 1;
uintptr_t vector_address =
(uintptr_t) ppc_exc_vector_address(vector, vector_base);
uintptr_t branch_op_address = vector_address + 4 * branch_op_index;
/* This value may have BRANCH_OP_LINK set */
uintptr_t target_address = prologue [branch_op_index];
uintptr_t branch_target_address = target_address - branch_op_address;
/*
* We prefer to use a relative branch. This has the benefit that custom
* minimal prologues in a read-only area are relocatable.
*/
if ((branch_target_address & ~BRANCH_OP_MSK) != 0) {
/* Target to far for relative branch (PC ± 32M) */
if (target_address >= 0xfe000001 || target_address < 0x01fffffd) {
/* Can use an absolute branch */
branch_target_address = (target_address | BRANCH_OP_ABS) & BRANCH_OP_MSK;
} else {
return false;
}
}
prologue [branch_op_index] = BRANCH_OP_CODE | branch_target_address;
return true;
}
rtems_status_code ppc_exc_make_prologue(
unsigned vector,
void *vector_base,
ppc_exc_category category,
uint32_t *prologue,
size_t *prologue_size
)
{
const uint32_t *prologue_template = NULL;
size_t prologue_template_size = 0;
bool fixup_vector = false;
if (!ppc_exc_is_valid_category(category)) {
return RTEMS_INVALID_NUMBER;
}
if (
ppc_cpu_has_shadowed_gprs()
&& (vector == ASM_60X_IMISS_VECTOR
|| vector == ASM_60X_DLMISS_VECTOR
|| vector == ASM_60X_DSMISS_VECTOR)
) {
prologue_template = ppc_exc_tgpr_clr_prolog;
prologue_template_size = (size_t) ppc_exc_tgpr_clr_prolog_size;
} else if (
category == PPC_EXC_CLASSIC
&& ppc_cpu_is_bookE() != PPC_BOOKE_STD
&& ppc_cpu_is_bookE() != PPC_BOOKE_E500
) {
prologue_template = ppc_exc_min_prolog_auto;
prologue_template_size = (size_t) ppc_exc_min_prolog_size;
#ifdef PPC_EXC_CONFIG_USE_FIXED_HANDLER
} else if (
category == PPC_EXC_CLASSIC_ASYNC
&& ppc_cpu_is_bookE() == PPC_BOOKE_E500
&& (ppc_interrupt_get_disable_mask() & MSR_CE) == 0
) {
prologue_template = ppc_exc_min_prolog_async_tmpl_normal;
prologue_template_size = 16;
fixup_vector = true;
#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
} else {
prologue_template = ppc_exc_prologue_templates [category];
prologue_template_size = (size_t) ppc_exc_min_prolog_size;
fixup_vector = true;
}
if (prologue_template_size <= *prologue_size) {
*prologue_size = prologue_template_size;
memcpy(prologue, prologue_template, prologue_template_size);
if (
!ppc_exc_create_branch_op(
vector,
vector_base,
prologue,
prologue_template_size
)
) {
return RTEMS_INVALID_ADDRESS;
}
if (fixup_vector) {
if (vector <= 0x7fffU) {
prologue [PPC_EXC_PROLOG_VEC_OFFSET] =
(prologue [PPC_EXC_PROLOG_VEC_OFFSET] & 0xffff8000U)
| (vector & 0x7fffU);
} else {
return RTEMS_INVALID_ID;
}
}
} else {
return RTEMS_INVALID_SIZE;
}
return RTEMS_SUCCESSFUL;
}