adapted powerpc exception code

This commit is contained in:
Thomas Doerfler
2008-07-11 10:02:12 +00:00
parent a86f3aac96
commit 25a92bc1ed
15 changed files with 1001 additions and 820 deletions

View File

@@ -1,3 +1,57 @@
2008-07-11 Sebastian Huber <sebastian.huber@embedded-brains.de>
* Makefile.am: Install powerpc-utility.h.
* shared/include/cpuIdent.h, shared/include/cpuIdent.c: Added e200 and
e300 features.
* new-exceptions/cpu.c: Removed PR288 bugfix check.
* new-exceptions/e500_raw_exc_init.c: Added initialization for e200.
Set IVPR register for e200 and e500 to ppc_exc_vector_base.
* new-exceptions/raw_exception.c, new-exceptions/raw_exception.h: Added
vector categories for e200 and e300 cores. Added global variable
ppc_exc_vector_base for CPUs with IVPR register.
* new-exceptions/bspsupport/ppc_exc.S,
new-exceptions/bspsupport/ppc_exc_asm_macros.h,
new-exceptions/bspsupport/ppc_exc_bspsupp.h,
new-exceptions/bspsupport/ppc_exc_hdl.c,
new-exceptions/bspsupport/vectors.h,
new-exceptions/bspsupport/vectors_init.c: Conistent code layout in most
assember code sections and usage of defines for registers. Usage of
standard header files to avoid multiple definitions.
Optimized exception code: Removed many branches and exploit branch
prediction for asynchronous exceptions, moved common wrap code into
WRAP macro to eliminate branch, static initialization of the handler
table with a default handler to eliminate the test if a handler is
present. Register CR6 is no more cleared because the exeption handler
functions are not variadic.
New method to switch to the interrupt stack. It will be tested if the
exception stack pointer is already inside the interrupt stack area. It
is no more necessary to disable interrupts. The SPRG1 and SPRG2 are
used to store the initial interrupt stack pointer and the interrupt
stack memory area start.
Removed variable ppc_exc_msr_irq_mask and use general interrupt disable
mask from SPRG0 instead.
New initialization routine ppc_exc_initialize() for bsp_start(). It
takes the interrupt disable mask, interrupt stack start and size as
parameters.
Added packed prologues for CPUs with IVPR and IVOR registers to save
memory space.
Reverted ppc_exc_crit_always_enabled change from yesterday.
WARNING: Tests with critical interrupt exceptions crash the system at
least on MPC8313ERDB and MPC8349EAMDS. There may be somewhere a
serious bug with the new code.
2008-07-10 Till Straumann <strauman@slac.stanford.edu> 2008-07-10 Till Straumann <strauman@slac.stanford.edu>
* mpc6xx/mmu/pte121.c: use general "memory" clobber * mpc6xx/mmu/pte121.c: use general "memory" clobber

View File

@@ -13,7 +13,7 @@ include_rtems_powerpc_HEADERS = rtems/powerpc/cache.h \
include_rtems_scoredir = $(includedir)/rtems/score include_rtems_scoredir = $(includedir)/rtems/score
include_libcpudir = $(includedir)/libcpu include_libcpudir = $(includedir)/libcpu
include_libcpu_HEADERS = include_libcpu_HEADERS = shared/include/powerpc-utility.h
EXTRA_DIST = EXTRA_DIST =

View File

@@ -404,3 +404,22 @@ RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
Note that synchronous machine-checks can legally Note that synchronous machine-checks can legally
use OS primitives and currently there are no use OS primitives and currently there are no
asynchronous machine-checks defined. asynchronous machine-checks defined.
Epilogue:
You have to disable all asynchronous exceptions which may cause a context
switch before the restoring of the SRRs and the RFI. Reason:
Suppose we are in the epilogue code of an EE between the move to SRRs and
the RFI. Here EE is disabled but CE is enabled. Now a CE happens. The
handler decides that a thread dispatch is necessary. The CE checks if
this is possible:
o The thread dispatch disable level is 0, because the EE has already
decremented it.
o The EE lock variable is cleared.
o The EE executes not the first instruction.
Hence a thread dispatch is allowed. The CE issues a context switch to a
task with EE enabled (for example a task waiting for a semaphore). Now a
EE happens and the current content of the SRRs is lost.

View File

@@ -3,6 +3,8 @@
* *
* Modified and partially rewritten by Till Straumann, 2007 * Modified and partially rewritten by Till Straumann, 2007
* *
* Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008.
*
* Low-level assembly code for PPC exceptions. * Low-level assembly code for PPC exceptions.
* *
* This file was written with the goal to eliminate * This file was written with the goal to eliminate
@@ -34,7 +36,7 @@
.global ppc_exc_min_prolog_size .global ppc_exc_min_prolog_size
ppc_exc_min_prolog_size = 4 * 4 ppc_exc_min_prolog_size = 4 * 4
/* Special prologue for 603e-style CPUs. /* Special prologue for 603e-style CPUs.
* *
* 603e shadows GPR0..GPR3 for certain exceptions. We must switch * 603e shadows GPR0..GPR3 for certain exceptions. We must switch
* that off before we can use the stack pointer. Note that this is * that off before we can use the stack pointer. Note that this is
@@ -58,10 +60,22 @@ ppc_exc_tgpr_clr_prolog:
.global ppc_exc_min_prolog_auto .global ppc_exc_min_prolog_auto
ppc_exc_min_prolog_auto: ppc_exc_min_prolog_auto:
stwu r1, -EXCEPTION_FRAME_END(r1) stwu r1, -EXCEPTION_FRAME_END(r1)
stw r3, GPR3_OFFSET(r1) stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
mflr r3 mflr VECTOR_REGISTER
bla wrap_auto bla wrap_auto
/**
* @brief Use vector offsets with 16 byte boundaries.
*
* @see ppc_exc_min_prolog_auto();
*/
.global ppc_exc_min_prolog_auto_packed
ppc_exc_min_prolog_auto_packed:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
mflr VECTOR_REGISTER
bla wrap_auto_packed
.global ppc_exc_tgpr_clr_prolog_size .global ppc_exc_tgpr_clr_prolog_size
ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
@@ -79,8 +93,8 @@ ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
.global ppc_exc_min_prolog_auto_async .global ppc_exc_min_prolog_auto_async
ppc_exc_min_prolog_auto_async: ppc_exc_min_prolog_auto_async:
stw r1, ppc_exc_lock_std@sdarel(r13) stw r1, ppc_exc_lock_std@sdarel(r13)
stw r3, ppc_exc_gpr3_std@sdarel(r13) stw VECTOR_REGISTER, ppc_exc_vector_register_std@sdarel(r13)
mflr r3 mflr VECTOR_REGISTER
bla wrap_auto_async bla wrap_auto_async
/******************************************************/ /******************************************************/
@@ -112,283 +126,52 @@ __ppc_exc_wrappers_start = .
/* e500 has machine-check exception using mcsrr0 mcssr1 */ /* e500 has machine-check exception using mcsrr0 mcssr1 */
WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
/* LR holds vector, VECTOR_REGISTER holds orig. LR */
/* LR holds vector, r3 holds orig. LR */
wrap_auto: wrap_auto:
stw r14, GPR14_OFFSET(r1) stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* find address where we jumped from */
mflr r14 /* Find address where we jumped from */
/* restore LR */ mflr FRAME_REGISTER
mtlr r3
/* compute vector into R3 */ /* Restore LR */
rlwinm r3, r14, 24, 26, 31 mtlr VECTOR_REGISTER
/* we're now in almost the same state as if called by
* min_prolog_std but we must skip saving r14 /* Compute vector into R3 */
rlwinm VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/*
* We're now in almost the same state as if called by
* min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already * since that's done already
*/ */
b wrap_no_save_r14_std b wrap_no_save_frame_register_std
/* See: wrap_auto */
wrap_auto_packed:
stw FRAME_REGISTER, FRAME_OFFSET(r1)
mflr FRAME_REGISTER
mtlr VECTOR_REGISTER
rlwinm VECTOR_REGISTER, FRAME_REGISTER, 28, 26, 31
b wrap_no_save_frame_register_std
wrap_auto_async: wrap_auto_async:
stwu r1, -EXCEPTION_FRAME_END(r1) stwu r1, -EXCEPTION_FRAME_END(r1)
stw r14, GPR14_OFFSET(r1) stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* find address where we jumped from */ /* find address where we jumped from */
mflr r14 mflr FRAME_REGISTER
/* restore LR */ /* restore LR */
mtlr r3 mtlr VECTOR_REGISTER
/* set upper bits to indicate that non-volatile /* set upper bits to indicate that non-volatile
* registers should not be saved/restored. * registers should not be saved/restored.
*/ */
li r3, 0xffff8000 li VECTOR_REGISTER, 0xffff8000
/* compute vector into R3 */ /* compute vector into R3 */
rlwimi r3, r14, 24, 26, 31 rlwimi VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/* we're now in almost the same state as if called by /* we're now in almost the same state as if called by
* min_prolog_std but we must skip saving r14 * min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already * since that's done already
*/ */
b wrap_no_save_r14_std b wrap_no_save_frame_register_std
/*
* Common code for all flavors of exception and whether
* they are synchronous or asynchronous.
*
* Call with
* r3 : vector
* r4 : srr0
* r5 : srr1
* r14: exception frame
* cr4: OR of lower-priority locks
* cr2: exception type (asyn/isr [<0] or synchronous [>=0])
* lr : is updated by 'bl'
* all others: original state
*
* If this is an asynchronous exception ( cr2 < 0 ):
* - save volatile registers only,
* - disable thread dispatching,
* - switch to interrupt stack (if necessary),
* - call the C-dispatcher,
* - switch back the stack,
* - decrement the dispatch-disable level
* - check if it is safe to dispatch (disable-level must be 0
* AND no lower-priority asynchronous exception must be under
* way (as indicated by the lock variables).
* - If it would be OK to dispatch, call the C-wrapup code.
* - restore volatile registers
*
* Otherwise, i.e., if we are dealing with a synchronous exception
* then:
* - save all registers
* - call the C-dispatcher
* - restore registers
*/
wrap_common:
stw r4, SRR0_FRAME_OFFSET(r14)
stw r5, SRR1_FRAME_OFFSET(r14)
/* prepare for calling C code; */
/* use non-volatile r15 for remembering lr */
stw r15, GPR15_OFFSET(r14)
/* save vector; negative if only scratch regs. are valid */
stw r3, EXCEPTION_NUMBER_OFFSET(r14)
/* save scratch registers */
/* r2 should be unused or fixed anyways (eabi sdata2) */
stw r0, GPR0_OFFSET(r14)
stw r2, GPR2_OFFSET(r14)
stw r6, GPR6_OFFSET(r14)
stw r7, GPR7_OFFSET(r14)
stw r8, GPR8_OFFSET(r14)
stw r9, GPR9_OFFSET(r14)
stw r10, GPR10_OFFSET(r14)
stw r11, GPR11_OFFSET(r14)
stw r12, GPR12_OFFSET(r14)
/* r13 must be fixed anyways (sysv sdata) */
/* save LR */
mflr r15
mfctr r4
mfxer r5
stw r4, EXC_CTR_OFFSET(r14)
stw r5, EXC_XER_OFFSET(r14)
/*
* Switch MMU / RI on if necessary;
* remember decision in cr3
*/
lwz r4, ppc_exc_msr_bits@sdarel(r13)
cmpwi cr3, r4, 0
beq cr3, 1f
mfmsr r5
or r5, r5, r4
mtmsr r5
sync
isync
1:
/* If this is a asynchronous exception we skip ahead */
blt cr2, skip_save_nonvolatile_regs
/* YES; they want everything ('normal exception') */
/* save original stack pointer */
lwz r5, EXC_MIN_GPR1(r14)
stw r5, GPR1_OFFSET(r14)
stw r13, GPR13_OFFSET(r14)
/* store r16..r31 into the exception frame */
stmw r16, GPR16_OFFSET(r14)
skip_save_nonvolatile_regs:
/* store address of exception frame in r4; vector is in r3 */
addi r4, r14, FRAME_LINK_SPACE
/* load hi-halfword of C wrapper address */
lis r5, ppc_exc_C_wrapper@h
/* clear CR[6] to make sure no vararg callee assumes that
* there are any valid FP regs
*/
crxor 6,6,6
/* merge lo-halfword of C wrapper address */
ori r5, r5, ppc_exc_C_wrapper@l
/* Far branch to ppc_C_wrapper */
mtlr r5
blrl
/* do not clobber r3 since we pass the return value
* of ppc_exc_C_wrapper on to ppc_exc_wrapup
*/
/* skip decrementing the thread-dispatch disable level
* and calling ppc_exc_wrapup if this is a synchronous
* exception.
*/
bge cr2, restore_nonvolatile_regs
/* decrement ISR nest level;
* disable all interrupts.
* (Disabling IRQs here is not necessary if we
* use the stack-switching strategy which tests
* if we are alreay on the ISR-stack as opposed
* to test the nesting level; see ppc_exc_asm_macros.h)
*/
lwz r4, ppc_exc_msr_irq_mask@sdarel(r13)
mfmsr r5
andc r4, r5, r4
mtmsr r4
lwz r4, _ISR_Nest_level@sdarel(r13)
addi r4, r4, -1
stw r4, _ISR_Nest_level@sdarel(r13)
/*
* switch back to original stack (r14 == r1 if we are
* still on the IRQ stack).
*/
mr r1, r14
/* restore interrupt mask */
mtmsr r5
/* decrement thread_dispatch level and check
* if we have to run the dispatcher.
*/
lwz r5, _Thread_Dispatch_disable_level@sdarel(r13)
addic. r5, r5, -1
stw r5, _Thread_Dispatch_disable_level@sdarel(r13)
/* test _Thread_Dispatch_disable nesting level AND
* lower priority locks (in cr4); ONLY if
* _Thread_Dispatch_disable_level == 0 AND no lock is set
* then call ppc_exc_wrapup which may do a context switch.
*/
crand EQ(cr0), EQ(cr0), EQ(cr4)
bne 2f
crxor 6,6,6
/* Far branch to ppc_exc_wrapup */
lis r5, ppc_exc_wrapup@h
addi r4, r14, FRAME_LINK_SPACE
ori r5, r5, ppc_exc_wrapup@l
mtlr r5
blrl
2:
lwz r14, GPR14_OFFSET(r1)
/* we can skip restoring r16..r31 */
b skip_restore_nonvolatile_regs
restore_nonvolatile_regs:
/* synchronous exc: restore everything from the exception frame */
lwz r14, GPR14_OFFSET(r1)
/* restore stack pointer */
lwz r5, GPR1_OFFSET(r1)
stw r5, EXC_MIN_GPR1(r1)
/* restore non-volatile regs */
lwz r13, GPR13_OFFSET(r1)
lmw r16, GPR16_OFFSET(r1)
skip_restore_nonvolatile_regs:
lwz r3, EXC_XER_OFFSET(r1)
lwz r4, EXC_CTR_OFFSET(r1)
mtxer r3
mtctr r4
/* restore lr, r15 */
mtlr r15
lwz r15, GPR15_OFFSET(r1)
/* restore scratch regs */
lwz r12, GPR12_OFFSET(r1)
lwz r11, GPR11_OFFSET(r1)
lwz r10, GPR10_OFFSET(r1)
lwz r9, GPR9_OFFSET(r1)
lwz r8, GPR8_OFFSET(r1)
lwz r7, GPR7_OFFSET(r1)
lwz r6, GPR6_OFFSET(r1)
/* r4, r5 are eventually restored by caller */
lwz r3, GPR3_OFFSET(r1)
lwz r2, GPR2_OFFSET(r1)
/* r1, is eventually restored by caller */
lwz r0, GPR0_OFFSET(r1)
beq cr3, 2f
/* restore MSR settings */
lwz r5, ppc_exc_msr_bits@sdarel(r13)
mfmsr r4
andc r4, r4, r5
mtmsr r4
sync
isync
2:
lwz r4, EXC_CR_OFFSET(r1)
mtcr r4
/* Must disable interrupts prior to restoring SSRs.
* Here's a scenario discovered by Sebastian Huber:
* 1) CE happens between writing to SRR and RFI
* 2) CE handler does something which requires a task switch
* 3) CE wrapper returns and determines that task switch
* is OK since EE lock is not held, dispatch-disable level
* is zero etc.
* 4) switch to other task enables EE
* 5) eventually, switch back to task interrupted by 1)
* 6) RFI happens but SRR contents have been clobbered.
*/
lwz r4, ppc_exc_msr_irq_mask@sdarel(r13)
mfmsr r5
andc r4, r5, r4
mtmsr r4
/* restore SRR and stack */
lwz r4, SRR0_FRAME_OFFSET(r1)
lwz r5, SRR1_FRAME_OFFSET(r1)
blr
.global __ppc_exc_wrappers_end .global __ppc_exc_wrappers_end
__ppc_exc_wrappers_end = . __ppc_exc_wrappers_end = .

View File

@@ -13,6 +13,10 @@
#ifndef PPC_EXC_SHARED_H #ifndef PPC_EXC_SHARED_H
#define PPC_EXC_SHARED_H #define PPC_EXC_SHARED_H
#include <stdint.h>
#include "vectors.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
@@ -33,7 +37,6 @@ extern "C" {
* *
* Other return values are reserved. * Other return values are reserved.
*/ */
typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector); typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector);
/* /*
@@ -46,52 +49,6 @@ typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector);
*/ */
extern uint32_t ppc_exc_msr_bits; extern uint32_t ppc_exc_msr_bits;
/*
* Set of MSR bits required to disable all
* asynchronous exceptions (depends on CPU type;
* must be set during initialization).
* Interrupt are disabled by writing the
* one's complement of this mask to msr:
* msr &= ~ppc_exc_msr_irq_mask;
*/
extern uint32_t ppc_exc_msr_irq_mask;
/*
* Cache size of the interrupt stack in a SDA variable
*/
extern uint32_t ppc_exc_intr_stack_size;
/*
* This variable defines the semantics of asynchronous
* critical exceptions ("critical interrupts")
* on BookE-style CPUs.
*
* There are the following ways of using these interrupts
*
* 1) permanently disabled; no support
* 2) permanently enabled; handlers for critical interrupts
* MUST NOT use any RTEMS primitives at all. They cannot
* directly e.g., release a semaphore.
* 3) enabled/disabled by the OS during critical sections.
* In this scenario critical interrupts are not much
* different from 'normal' interrupts but handlers may
* safely use RTEMS primitives (i.e., the subset which
* is OK to use from ISR context).
*
* The BSP (or application) may initialize this
* variable PRIOR to calling 'initialize_exceptions'
* to any of the following values:
*
* NOTE: so far, OS_SUPPORT is not supported by the cpukit
* yet since the IRQ/enabling-disabling primitives
* do not mask MSR_CE yet.
*/
#define PPC_EXC_CRIT_NO_OS_SUPPORT 1
#define PPC_EXC_CRIT_OS_SUPPORT 0
#define PPC_EXC_CRIT_DISABLED (-1)
extern int32_t ppc_exc_crit_always_enabled;
/* (See README under CAVEATS). During initialization /* (See README under CAVEATS). During initialization
* a check is performed to assert that write-back * a check is performed to assert that write-back
* caching is enabled for memory accesses. If a BSP * caching is enabled for memory accesses. If a BSP
@@ -169,6 +126,8 @@ extern void ppc_exc_tgpr_clr_prolog();
*/ */
extern void ppc_exc_min_prolog_auto(); extern void ppc_exc_min_prolog_auto();
extern void ppc_exc_min_prolog_auto_packed();
/* CPU support may store the address of a function here /* CPU support may store the address of a function here
* that can be used by the default exception handler to * that can be used by the default exception handler to

View File

@@ -39,11 +39,9 @@ uint32_t ppc_exc_lock_std = 0;
uint32_t ppc_exc_lock_crit = 0; uint32_t ppc_exc_lock_crit = 0;
uint32_t ppc_exc_lock_mchk = 0; uint32_t ppc_exc_lock_mchk = 0;
uint32_t ppc_exc_gpr3_std = 0; uint32_t ppc_exc_vector_register_std = 0;
uint32_t ppc_exc_gpr3_crit = 0; uint32_t ppc_exc_vector_register_crit = 0;
uint32_t ppc_exc_gpr3_mchk = 0; uint32_t ppc_exc_vector_register_mchk = 0;
uint32_t ppc_exc_msr_irq_mask = MSR_EE;
/* MSR bits to enable once critical status info is saved and the stack /* MSR bits to enable once critical status info is saved and the stack
* is switched; must be set depending on CPU type * is switched; must be set depending on CPU type
@@ -53,74 +51,44 @@ uint32_t ppc_exc_msr_irq_mask = MSR_EE;
*/ */
uint32_t ppc_exc_msr_bits = MSR_IR | MSR_DR | MSR_RI; uint32_t ppc_exc_msr_bits = MSR_IR | MSR_DR | MSR_RI;
uint32_t ppc_exc_intr_stack_size = 0; int ppc_exc_handler_default( BSP_Exception_frame *f, unsigned int vector)
{
int32_t ppc_exc_crit_always_enabled = PPC_EXC_CRIT_NO_OS_SUPPORT; return 1;
}
/* Table of C-handlers */ /* Table of C-handlers */
static ppc_exc_handler_t ppc_exc_handlers[LAST_VALID_EXC + 1] = {0, }; ppc_exc_handler_t ppc_exc_handler_table [LAST_VALID_EXC + 1] = {
[0 ... LAST_VALID_EXC] = ppc_exc_handler_default
};
ppc_exc_handler_t ppc_exc_handler_t ppc_exc_get_handler( unsigned vector)
ppc_exc_get_handler(unsigned vector)
{ {
if ( vector > LAST_VALID_EXC ) ppc_exc_handler_t handler = NULL;
if (vector > LAST_VALID_EXC) {
return 0; return 0;
return ppc_exc_handlers[vector]; }
if (ppc_exc_handler_table [vector] != ppc_exc_handler_default) {
handler = ppc_exc_handler_table [vector];
}
return handler;
} }
int int ppc_exc_set_handler( unsigned vector, ppc_exc_handler_t handler)
ppc_exc_set_handler(unsigned vector, ppc_exc_handler_t hdl)
{ {
if ( vector > LAST_VALID_EXC ) if (vector > LAST_VALID_EXC) {
return -1; return -1;
ppc_exc_handlers[vector] = hdl; }
if (handler == NULL) {
ppc_exc_handler_table [vector] = ppc_exc_handler_default;
} else {
ppc_exc_handler_table [vector] = handler;
}
return 0; return 0;
} }
/* This routine executes on the interrupt stack (if vect < 0) */
int
ppc_exc_C_wrapper(int vect, BSP_Exception_frame *f)
{
unsigned int i = vect & 0x3f;
int rval = 1;
if ( i <= LAST_VALID_EXC && ppc_exc_handlers[i] ) {
rval = ppc_exc_handlers[i](f, i);
}
if ( rval ) {
/* not handled, so far ... */
if ( globalExceptHdl ) {
/*
* global handler must be prepared to
* deal with asynchronous exceptions!
*/
globalExceptHdl(f);
}
rval = 0;
}
if ( (ppc_exc_msr_bits ^ f->EXC_SRR1) & MSR_RI ) {
printk("unrecoverable exception (RI was clear), spinning to death.\n");
while (1)
;
}
return rval;
}
void void
ppc_exc_wrapup(int ll_rval, BSP_Exception_frame *f) ppc_exc_wrapup( BSP_Exception_frame *f)
{ {
/* Check if we need to run the global handler now */
if ( ll_rval ) {
/* We get here if ppc_exc_C_wrapper() returned nonzero.
* This could be useful if we need to do something
* with thread-dispatching enabled (at this point it is)
* after handling an asynchronous exception.
*/
}
/* dispatch_disable level is decremented from assembly code. */ /* dispatch_disable level is decremented from assembly code. */
if ( _Context_Switch_necessary ) { if ( _Context_Switch_necessary ) {
/* FIXME: I believe it should be OK to re-enable /* FIXME: I believe it should be OK to re-enable

View File

@@ -14,6 +14,7 @@
*/ */
#ifndef LIBCPU_POWERPC_BSPSUPP_VECTORS_H #ifndef LIBCPU_POWERPC_BSPSUPP_VECTORS_H
#define LIBCPU_POWERPC_BSPSUPP_VECTORS_H #define LIBCPU_POWERPC_BSPSUPP_VECTORS_H
#include <libcpu/raw_exception.h> #include <libcpu/raw_exception.h>
/* /*
@@ -61,6 +62,10 @@
#define EXC_CTR_OFFSET 152 #define EXC_CTR_OFFSET 152
#define EXC_XER_OFFSET 156 #define EXC_XER_OFFSET 156
#define EXC_LR_OFFSET 160 #define EXC_LR_OFFSET 160
/* Exception stack frame -> BSP_Exception_frame */
#define FRAME_LINK_SPACE 8
/* /*
* maintain the EABI requested 8 bytes aligment * maintain the EABI requested 8 bytes aligment
* As SVR4 ABI requires 16, make it 16 (as some * As SVR4 ABI requires 16, make it 16 (as some
@@ -70,6 +75,8 @@
#ifndef ASM #ifndef ASM
#include <stdint.h>
/* codemove is like memmove, but it also gets the cache line size /* codemove is like memmove, but it also gets the cache line size
* as 4th parameter to synchronize them. If this last parameter is * as 4th parameter to synchronize them. If this last parameter is
* zero, it performs more or less like memmove. No copy is performed if * zero, it performs more or less like memmove. No copy is performed if
@@ -80,7 +87,12 @@
extern void * codemove(void *, const void *, unsigned int, unsigned long); extern void * codemove(void *, const void *, unsigned int, unsigned long);
extern void exception_nop_enable(const rtems_raw_except_connect_data* ptr); extern void exception_nop_enable(const rtems_raw_except_connect_data* ptr);
extern int exception_always_enabled(const rtems_raw_except_connect_data* ptr); extern int exception_always_enabled(const rtems_raw_except_connect_data* ptr);
extern void initialize_exceptions();
void ppc_exc_initialize(
uint32_t interrupt_disable_mask,
uint32_t interrupt_stack_start,
uint32_t interrupt_stack_size
);
typedef struct _BSP_Exception_frame { typedef struct _BSP_Exception_frame {
unsigned EXC_SRR0; unsigned EXC_SRR0;

View File

@@ -47,6 +47,8 @@ typedef struct LRFrameRec_ {
#define STACK_CLAMP 50 /* in case we have a corrupted bottom */ #define STACK_CLAMP 50 /* in case we have a corrupted bottom */
SPR_RW(SPRG1)
SPR_RW(SPRG2)
SPR_RO(LR) SPR_RO(LR)
SPR_RO(DAR) SPR_RO(DAR)
#define DEAR_BOOKE 61 #define DEAR_BOOKE 61
@@ -250,11 +252,11 @@ ppc_exc_min_prolog_template_t tmpl;
return (rtems_raw_except_func)prologues[n_prolog++]; return (rtems_raw_except_func)prologues[n_prolog++];
} }
void ppc_exc_init( void ppc_exc_table_init(
rtems_raw_except_connect_data *exception_table, rtems_raw_except_connect_data *exception_table,
int nEntries) int nEntries)
{ {
int i,v; unsigned i,v;
ppc_raw_exception_category cat; ppc_raw_exception_category cat;
uintptr_t vaddr; uintptr_t vaddr;
@@ -270,11 +272,17 @@ uintptr_t vaddr;
exception_config.rawExceptHdlTbl = exception_table; exception_config.rawExceptHdlTbl = exception_table;
exception_config.defaultRawEntry.exceptIndex = 0; exception_config.defaultRawEntry.exceptIndex = 0;
exception_config.defaultRawEntry.hdl.vector = 0; exception_config.defaultRawEntry.hdl.vector = 0;
if (ppc_cpu_has_ivpr_and_ivor()) {
/* Use packed version with 16-byte boundaries for CPUs with IVPR and IVOR registers */
exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto_packed;
} else {
/* Note that the 'auto' handler cannot be used for everything; in particular, /* Note that the 'auto' handler cannot be used for everything; in particular,
* it assumes classic exceptions with a vector offset aligned on a 256-byte * it assumes classic exceptions with a vector offset aligned on a 256-byte
* boundary. * boundary.
*/ */
exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto; exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto;
}
/* /*
* Note that the cast of an array address to an unsigned * Note that the cast of an array address to an unsigned
@@ -306,7 +314,7 @@ uintptr_t vaddr;
* default prolog can handle classic, synchronous exceptions * default prolog can handle classic, synchronous exceptions
* with a vector offset aligned on a 256-byte boundary. * with a vector offset aligned on a 256-byte boundary.
*/ */
if ( PPC_EXC_CLASSIC == cat && 0 == ( vaddr & 0xff ) ) { if (cat == PPC_EXC_CLASSIC && ((vaddr & 0xff) == 0 || (ppc_cpu_has_ivpr_and_ivor() && (vaddr & 0xf) == 0))) {
exception_table[i].hdl.raw_hdl_size = exception_config.defaultRawEntry.hdl.raw_hdl_size; exception_table[i].hdl.raw_hdl_size = exception_config.defaultRawEntry.hdl.raw_hdl_size;
exception_table[i].hdl.raw_hdl = exception_config.defaultRawEntry.hdl.raw_hdl; exception_table[i].hdl.raw_hdl = exception_config.defaultRawEntry.hdl.raw_hdl;
} else { } else {
@@ -329,44 +337,45 @@ uintptr_t vaddr;
#endif #endif
} }
void initialize_exceptions()
void ppc_exc_initialize(
uint32_t interrupt_disable_mask,
uint32_t interrupt_stack_start,
uint32_t interrupt_stack_size
)
{ {
int i; int i;
int n = sizeof(exception_table)/sizeof(exception_table[0]); int n = sizeof(exception_table)/sizeof(exception_table[0]);
uint32_t interrupt_stack_end = 0;
uint32_t interrupt_stack_pointer = 0;
uint32_t *p = NULL;
/* Ensure proper interrupt stack alignment */
interrupt_stack_start &= ~(CPU_STACK_ALIGNMENT - 1);
interrupt_stack_size &= ~(CPU_STACK_ALIGNMENT - 1);
/* Interrupt stack end and pointer */
interrupt_stack_end = interrupt_stack_start + interrupt_stack_size;
interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE;
/* Tag interrupt stack bottom */
p = (uint32_t *) interrupt_stack_pointer;
*p = 0;
/* Move interrupt stack values to special purpose registers */
_write_SPRG1( interrupt_stack_pointer);
_write_SPRG2( interrupt_stack_start);
/* Interrupt disable mask */
ppc_interrupt_set_disable_mask( interrupt_disable_mask);
/* Use current MMU / RI settings when running C exception handlers */ /* Use current MMU / RI settings when running C exception handlers */
ppc_exc_msr_bits = _read_MSR() & ( MSR_DR | MSR_IR | MSR_RI ); ppc_exc_msr_bits = _read_MSR() & ( MSR_DR | MSR_IR | MSR_RI );
/* Cache size of the interrupt stack in a SDA variable */
ppc_exc_intr_stack_size = rtems_configuration_get_interrupt_stack_size();
/* Copy into a SDA variable that is easy to access from
* assembly code
*/
if ( ppc_cpu_is_bookE() ) {
ppc_exc_msr_irq_mask = MSR_EE | MSR_CE | MSR_DE ;
switch (ppc_exc_crit_always_enabled) {
case PPC_EXC_CRIT_NO_OS_SUPPORT:
_write_MSR(_read_MSR() | (MSR_CE | MSR_DE));
break;
case PPC_EXC_CRIT_OS_SUPPORT:
printk("ppc_exc: PPC_EXC_CRIT_OS_SUPPORT not yet implemented\n");
/* fall thru */
case PPC_EXC_CRIT_DISABLED:
default:
ppc_exc_crit_always_enabled = PPC_EXC_CRIT_DISABLED;
_write_MSR(_read_MSR() & ~(MSR_CE | MSR_DE));
break;
}
} else {
ppc_exc_msr_irq_mask = MSR_EE ;
}
for ( i=0; i<n; i++ ) for ( i=0; i<n; i++ )
exception_table[i].hdl.vector = i; exception_table[i].hdl.vector = i;
ppc_exc_init(exception_table, n); ppc_exc_table_init(exception_table, n);
/* If we are on a classic PPC with MSR_DR enabled then /* If we are on a classic PPC with MSR_DR enabled then
* assert that the mapping for at least this task's * assert that the mapping for at least this task's

View File

@@ -47,15 +47,7 @@ void _CPU_Initialize(
void (*thread_dispatch) /* ignored on this CPU */ void (*thread_dispatch) /* ignored on this CPU */
) )
{ {
{ /* Do nothing */
unsigned hasFixed = 0;
/* assert that our BSP has fixed PR288 */
__asm__ __volatile__ ("mfspr %0, %2":
"=r"(hasFixed):"0"(hasFixed),"i"(SPRG0));
if ( PPC_BSP_HAS_FIXED_PR288 != hasFixed ) {
BSP_panic("This BSP needs to fix PR#288");
}
}
} }
/*PAGE /*PAGE

View File

@@ -1,16 +1,20 @@
#include <libcpu/cpuIdent.h> #include <libcpu/cpuIdent.h>
#include <libcpu/raw_exception.h> #include <libcpu/raw_exception.h>
#define MTIVPR(prefix) asm volatile("mtivpr %0"::"r"(prefix));
#define MTIVOR(x, vec) asm volatile("mtivor"#x" %0"::"r"(vec)); #define MTIVOR(x, vec) asm volatile("mtivor"#x" %0"::"r"(vec));
/* Use during early init for initializing the e500 IVOR/IVPR registers */ /* Use during early init for initializing the e500 IVOR/IVPR registers */
void void
e500_setup_raw_exceptions() e500_setup_raw_exceptions()
{ {
unsigned c; unsigned c;
if ( ! (c = ppc_cpu_is_bookE()) || PPC_BOOKE_405 == c ) if ( ! (c = ppc_cpu_is_bookE()) || PPC_BOOKE_405 == c )
return; return;
asm volatile("mtivpr %0"::"r"(0));
/* Set interupt vector prefix register */
MTIVPR( ppc_exc_vector_base);
/* setup vectors to be compatible with classic PPC */ /* setup vectors to be compatible with classic PPC */
MTIVOR(0, ppc_get_vector_addr(ASM_BOOKE_CRIT_VECTOR)); /* Critical input not (yet) supported; use reset vector */ MTIVOR(0, ppc_get_vector_addr(ASM_BOOKE_CRIT_VECTOR)); /* Critical input not (yet) supported; use reset vector */
MTIVOR(1, ppc_get_vector_addr(ASM_MACH_VECTOR)); MTIVOR(1, ppc_get_vector_addr(ASM_MACH_VECTOR));
@@ -33,3 +37,34 @@ unsigned c;
MTIVOR(34, ppc_get_vector_addr(0x15)); MTIVOR(34, ppc_get_vector_addr(0x15));
MTIVOR(35, ppc_get_vector_addr(ASM_60X_PERFMON_VECTOR)); MTIVOR(35, ppc_get_vector_addr(ASM_60X_PERFMON_VECTOR));
} }
void e200_setup_raw_exceptions()
{
if (current_ppc_cpu != PPC_e200z6) {
return;
}
/* Interupt vector prefix register */
MTIVPR( ppc_exc_vector_base);
/* Interupt vector offset register */
MTIVOR( 0, 0); /* Critical input */
MTIVOR( 1, ppc_get_vector_addr( ASM_MACH_VECTOR));
MTIVOR( 2, ppc_get_vector_addr( ASM_PROT_VECTOR));
MTIVOR( 3, ppc_get_vector_addr( ASM_ISI_VECTOR));
MTIVOR( 4, ppc_get_vector_addr( ASM_EXT_VECTOR));
MTIVOR( 5, ppc_get_vector_addr( ASM_ALIGN_VECTOR));
MTIVOR( 6, ppc_get_vector_addr( ASM_PROG_VECTOR));
MTIVOR( 7, ppc_get_vector_addr( ASM_FLOAT_VECTOR));
MTIVOR( 8, ppc_get_vector_addr( ASM_SYS_VECTOR));
MTIVOR( 9, 0); /* APU unavailable */
MTIVOR( 10, ppc_get_vector_addr( ASM_BOOKE_DEC_VECTOR));
MTIVOR( 11, ppc_get_vector_addr( ASM_BOOKE_FIT_VECTOR));
MTIVOR( 12, ppc_get_vector_addr( ASM_BOOKE_WDOG_VECTOR));
MTIVOR( 13, ppc_get_vector_addr( ASM_BOOKE_ITLBMISS_VECTOR));
MTIVOR( 14, ppc_get_vector_addr( ASM_BOOKE_DTLBMISS_VECTOR));
MTIVOR( 15, ppc_get_vector_addr( ASM_TRACE_VECTOR));
MTIVOR( 32, ppc_get_vector_addr( ASM_E200_SPE_UNAVAILABLE_VECTOR));
MTIVOR( 33, ppc_get_vector_addr( ASM_E200_SPE_DATA_VECTOR));
MTIVOR( 34, ppc_get_vector_addr( ASM_E200_SPE_ROUND_VECTOR));
}

View File

@@ -56,6 +56,8 @@ boolean bsp_exceptions_in_RAM = TRUE;
*/ */
uint32_t bsp_raw_vector_is_405_critical = 0; uint32_t bsp_raw_vector_is_405_critical = 0;
uint32_t ppc_exc_vector_base = 0;
void* ppc_get_vector_addr(rtems_vector vector) void* ppc_get_vector_addr(rtems_vector vector)
{ {
unsigned vaddr; unsigned vaddr;
@@ -94,13 +96,18 @@ void* ppc_get_vector_addr(rtems_vector vector)
default: default:
break; break;
} }
if ( bsp_exceptions_in_RAM ) if (bsp_exceptions_in_RAM) {
return ((void*) vaddr); if (ppc_cpu_has_ivpr_and_ivor()) {
return ((void*) ((vaddr >> 4) + ppc_exc_vector_base));
} else {
return ((void*) (vaddr + ppc_exc_vector_base));
}
}
return ((void*) (vaddr + 0xfff00000)); return ((void*) (vaddr + 0xfff00000));
} }
static cat_ini_t mpc_860_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t mpc_860_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_PROT_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_PROT_VECTOR ] = PPC_EXC_CLASSIC,
@@ -128,7 +135,7 @@ static cat_ini_t mpc_860_vector_categories[LAST_VALID_EXC + 1] = {
}; };
static cat_ini_t mpc_5xx_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t mpc_5xx_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC,
@@ -153,7 +160,7 @@ static cat_ini_t mpc_5xx_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_5XX_NMEBREAK_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_5XX_NMEBREAK_VECTOR ] = PPC_EXC_CLASSIC,
}; };
static cat_ini_t ppc_405_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t ppc_405_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_EXT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ ASM_EXT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ ASM_BOOKE_DEC_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ ASM_BOOKE_DEC_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
@@ -197,14 +204,14 @@ static ppc_raw_exception_category altivec_vector_is_valid(rtems_vector vector)
return PPC_EXC_INVALID; return PPC_EXC_INVALID;
} }
static cat_ini_t mpc_750_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t mpc_750_vector_categories[LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS, PPC_BASIC_VECS,
[ ASM_60X_SYSMGMT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ ASM_60X_SYSMGMT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ ASM_60X_ADDR_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_60X_ADDR_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_60X_ITM_VECTOR ] = PPC_EXC_CLASSIC,
}; };
static cat_ini_t psim_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t psim_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_PROT_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_PROT_VECTOR ] = PPC_EXC_CLASSIC,
@@ -225,7 +232,7 @@ static cat_ini_t psim_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID, [ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID,
}; };
static cat_ini_t mpc_603_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t mpc_603_vector_categories[LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS, PPC_BASIC_VECS,
[ ASM_60X_PERFMON_VECTOR ] = PPC_EXC_INVALID, [ ASM_60X_PERFMON_VECTOR ] = PPC_EXC_INVALID,
[ ASM_60X_SYSMGMT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ ASM_60X_SYSMGMT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
@@ -236,7 +243,7 @@ static cat_ini_t mpc_603_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID, [ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID,
}; };
static cat_ini_t mpc_604_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t mpc_604_vector_categories[LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS, PPC_BASIC_VECS,
[ ASM_60X_PERFMON_VECTOR ] = PPC_EXC_CLASSIC, [ ASM_60X_PERFMON_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_60X_IMISS_VECTOR ] = PPC_EXC_INVALID, [ ASM_60X_IMISS_VECTOR ] = PPC_EXC_INVALID,
@@ -247,7 +254,41 @@ static cat_ini_t mpc_604_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID, [ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID,
}; };
static cat_ini_t e500_vector_categories[LAST_VALID_EXC + 1] = { static const cat_ini_t e200_vector_categories [LAST_VALID_EXC + 1] = {
[ASM_MACH_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
[ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
[ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
[ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
[ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
[ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
/* FIXME: Depending on HDI0[DAPUEN] this is a critical or debug exception */
[ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_BOOKE_CRITICAL,
[ASM_E200_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E200_SPE_DATA_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E200_SPE_ROUND_VECTOR] = PPC_EXC_CLASSIC,
};
static const cat_ini_t e300_vector_categories [LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS,
[ASM_E300_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
[ASM_E300_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_IMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_ADDR_VECTOR] = PPC_EXC_CLASSIC,
[ASM_E300_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
};
static const cat_ini_t e500_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_MACH_VECTOR ] = PPC_EXC_E500_MACHCHK, [ ASM_MACH_VECTOR ] = PPC_EXC_E500_MACHCHK,
[ ASM_BOOKE_CRIT_VECTOR ] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ ASM_BOOKE_CRIT_VECTOR ] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
@@ -307,10 +348,12 @@ ppc_raw_exception_category rval = PPC_EXC_INVALID;
case PPC_8260: case PPC_8260:
/* case PPC_8240: -- same value as 8260 */ /* case PPC_8240: -- same value as 8260 */
case PPC_8245: case PPC_8245:
rval = mpc_603_vector_categories[vector];
break;
case PPC_e300c1: case PPC_e300c1:
case PPC_e300c2: case PPC_e300c2:
case PPC_e300c3: case PPC_e300c3:
rval = mpc_603_vector_categories[vector]; rval = e300_vector_categories[vector];
break; break;
case PPC_PSIM: case PPC_PSIM:
rval = psim_vector_categories[vector]; rval = psim_vector_categories[vector];
@@ -318,6 +361,9 @@ ppc_raw_exception_category rval = PPC_EXC_INVALID;
case PPC_8540: case PPC_8540:
rval = e500_vector_categories[vector]; rval = e500_vector_categories[vector];
break; break;
case PPC_e200z6:
rval = e200_vector_categories[vector];
break;
case PPC_5XX: case PPC_5XX:
rval = mpc_5xx_vector_categories[vector]; rval = mpc_5xx_vector_categories[vector];
break; break;
@@ -454,7 +500,10 @@ int ppc_init_exceptions (rtems_raw_except_global_settings* config)
rtems_interrupt_disable(k); rtems_interrupt_disable(k);
if ( (c = ppc_cpu_is_bookE()) && PPC_BOOKE_405 != c ) { /* FIXME special case selection method */
if (current_ppc_cpu == PPC_e200z6) {
e200_setup_raw_exceptions();
} else if ( (c = ppc_cpu_is_bookE()) && PPC_BOOKE_405 != c ) {
e500_setup_raw_exceptions(); e500_setup_raw_exceptions();
} }

View File

@@ -43,7 +43,6 @@
#define ASM_PROG_VECTOR 0x07 #define ASM_PROG_VECTOR 0x07
#define ASM_FLOAT_VECTOR 0x08 #define ASM_FLOAT_VECTOR 0x08
#define ASM_DEC_VECTOR 0x09 #define ASM_DEC_VECTOR 0x09
#define ASM_60X_VEC_VECTOR 0x0A
#define ASM_SYS_VECTOR 0x0C #define ASM_SYS_VECTOR 0x0C
#define ASM_TRACE_VECTOR 0x0D #define ASM_TRACE_VECTOR 0x0D
@@ -80,6 +79,7 @@
#define ASM_5XX_NMEBREAK_VECTOR 0x1F #define ASM_5XX_NMEBREAK_VECTOR 0x1F
#define ASM_60X_VEC_VECTOR 0x0A
#define ASM_60X_PERFMON_VECTOR 0x0F #define ASM_60X_PERFMON_VECTOR 0x0F
#define ASM_60X_IMISS_VECTOR 0x10 #define ASM_60X_IMISS_VECTOR 0x10
#define ASM_60X_DLMISS_VECTOR 0x11 #define ASM_60X_DLMISS_VECTOR 0x11
@@ -89,6 +89,24 @@
#define ASM_60X_VEC_ASSIST_VECTOR 0x16 #define ASM_60X_VEC_ASSIST_VECTOR 0x16
#define ASM_60X_ITM_VECTOR 0x17 #define ASM_60X_ITM_VECTOR 0x17
/* e200 */
#define ASM_E200_SPE_UNAVAILABLE_VECTOR 0x15
#define ASM_E200_SPE_DATA_VECTOR 0x16
#define ASM_E200_SPE_ROUND_VECTOR 0x17
/* e300 */
#define ASM_E300_CRIT_VECTOR 0x0A
#define ASM_E300_PERFMON_VECTOR 0x0F
#define ASM_E300_IMISS_VECTOR ASM_60X_IMISS_VECTOR /* Special case: Shadowed GPRs */
#define ASM_E300_DLMISS_VECTOR ASM_60X_DLMISS_VECTOR /* Special case: Shadowed GPRs */
#define ASM_E300_DSMISS_VECTOR ASM_60X_DSMISS_VECTOR /* Special case: Shadowed GPRs */
#define ASM_E300_ADDR_VECTOR 0x13
#define ASM_E300_SYSMGMT_VECTOR 0x14
/*
* If you change that number make sure to adjust the wrapper code in ppc_exc.S
* and that ppc_exc_handler_table will be correctly initialized.
*/
#define LAST_VALID_EXC 0x1F #define LAST_VALID_EXC 0x1F
/* DO NOT USE -- this symbol is DEPRECATED /* DO NOT USE -- this symbol is DEPRECATED
@@ -227,6 +245,12 @@ void e500_setup_raw_exceptions();
*/ */
extern boolean bsp_exceptions_in_RAM; extern boolean bsp_exceptions_in_RAM;
/**
* @brief Vector base address for CPUs (for example e200 and e500) with IVPR
* and IVOR registers.
*/
extern uint32_t ppc_exc_vector_base;
# endif /* ASM */ # endif /* ASM */
#endif #endif

View File

@@ -49,6 +49,7 @@ char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu)
case PPC_8245: return "MPC8245"; case PPC_8245: return "MPC8245";
case PPC_8540: return "MPC8540"; case PPC_8540: return "MPC8540";
case PPC_PSIM: return "PSIM"; case PPC_PSIM: return "PSIM";
case PPC_e200z6: return "e200z6";
default: default:
printk("Unknown CPU value of 0x%x. Please add it to " printk("Unknown CPU value of 0x%x. Please add it to "
"<libcpu/powerpc/shared/include/cpuIdent.c>\n", cpu ); "<libcpu/powerpc/shared/include/cpuIdent.c>\n", cpu );
@@ -86,6 +87,10 @@ ppc_cpu_id_t get_ppc_cpu_type()
case PPC_8245: case PPC_8245:
case PPC_PSIM: case PPC_PSIM:
case PPC_8540: case PPC_8540:
case PPC_e200z6:
case PPC_e300c1:
case PPC_e300c2:
case PPC_e300c3:
break; break;
default: default:
printk("Unknown PVR value of 0x%x. Please add it to " printk("Unknown PVR value of 0x%x. Please add it to "
@@ -126,6 +131,7 @@ ppc_cpu_id_t get_ppc_cpu_type()
current_ppc_features.is_bookE = PPC_BOOKE_405; current_ppc_features.is_bookE = PPC_BOOKE_405;
break; break;
case PPC_8540: case PPC_8540:
case PPC_e200z6:
current_ppc_features.is_bookE = PPC_BOOKE_E500; current_ppc_features.is_bookE = PPC_BOOKE_E500;
default: default:
break; break;
@@ -151,6 +157,14 @@ ppc_cpu_id_t get_ppc_cpu_type()
break; break;
} }
switch (current_ppc_cpu) {
case PPC_e200z6:
current_ppc_features.has_ivpr_and_ivor = 1;
break;
default:
break;
}
return current_ppc_cpu; return current_ppc_cpu;
} }

View File

@@ -16,6 +16,8 @@
#ifndef _LIBCPU_CPUIDENT_H #ifndef _LIBCPU_CPUIDENT_H
#define _LIBCPU_CPUIDENT_H #define _LIBCPU_CPUIDENT_H
#include <stdbool.h>
#ifndef ASM #ifndef ASM
typedef enum typedef enum
{ {
@@ -44,6 +46,7 @@ typedef enum
PPC_e300c1 = 0x8083, /* e300c1 core, in MPC83xx*/ PPC_e300c1 = 0x8083, /* e300c1 core, in MPC83xx*/
PPC_e300c2 = 0x8084, /* e300c2 core */ PPC_e300c2 = 0x8084, /* e300c2 core */
PPC_e300c3 = 0x8085, /* e300c3 core */ PPC_e300c3 = 0x8085, /* e300c3 core */
PPC_e200z6 = 0x8115,
PPC_PSIM = 0xfffe, /* GDB PowerPC simulator -- fake version */ PPC_PSIM = 0xfffe, /* GDB PowerPC simulator -- fake version */
PPC_UNKNOWN = 0xffff PPC_UNKNOWN = 0xffff
} ppc_cpu_id_t; } ppc_cpu_id_t;
@@ -67,6 +70,7 @@ typedef struct {
unsigned has_8_bats : 1; unsigned has_8_bats : 1;
unsigned has_epic : 1; unsigned has_epic : 1;
unsigned has_shadowed_gprs : 1; unsigned has_shadowed_gprs : 1;
unsigned has_ivpr_and_ivor : 1;
} ppc_feature_t; } ppc_feature_t;
extern ppc_feature_t current_ppc_features; extern ppc_feature_t current_ppc_features;
@@ -81,7 +85,7 @@ extern ppc_cpu_revision_t current_ppc_revision;
/* PUBLIC ACCESS ROUTINES */ /* PUBLIC ACCESS ROUTINES */
#define _PPC_FEAT_DECL(x) \ #define _PPC_FEAT_DECL(x) \
static inline ppc_cpu_##x() { \ static inline unsigned ppc_cpu_##x() { \
if ( PPC_UNKNOWN == current_ppc_cpu ) \ if ( PPC_UNKNOWN == current_ppc_cpu ) \
get_ppc_cpu_type(); \ get_ppc_cpu_type(); \
return current_ppc_features.x; \ return current_ppc_features.x; \
@@ -95,6 +99,17 @@ _PPC_FEAT_DECL(is_60x)
_PPC_FEAT_DECL(has_8_bats) _PPC_FEAT_DECL(has_8_bats)
_PPC_FEAT_DECL(has_epic) _PPC_FEAT_DECL(has_epic)
_PPC_FEAT_DECL(has_shadowed_gprs) _PPC_FEAT_DECL(has_shadowed_gprs)
_PPC_FEAT_DECL(has_ivpr_and_ivor)
static inline bool ppc_cpu_is_e300()
{
if (current_ppc_cpu == PPC_UNKNOWN) {
get_ppc_cpu_type();
}
return current_ppc_cpu == PPC_e300c1
|| current_ppc_cpu == PPC_e300c2
|| current_ppc_cpu == PPC_e300c3;
}
#undef _PPC_FEAT_DECL #undef _PPC_FEAT_DECL
#endif /* ASM */ #endif /* ASM */