adapted powerpc exception code

This commit is contained in:
Thomas Doerfler
2008-07-11 10:02:12 +00:00
parent a86f3aac96
commit 25a92bc1ed
15 changed files with 1001 additions and 820 deletions

View File

@@ -389,7 +389,7 @@ RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
dispatch requirement).
And one more note: We never want to disable
machine-check exceptions to avoid a checkstop.
machine-check exceptions to avoid a checkstop.
This means that we cannot use enabling/disabling
this type of exception for protection of critical
OS data structures.
@@ -404,3 +404,22 @@ RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
Note that synchronous machine-checks can legally
use OS primitives and currently there are no
asynchronous machine-checks defined.
Epilogue:
You have to disable all asynchronous exceptions which may cause a context
switch before the restoring of the SRRs and the RFI. Reason:
Suppose we are in the epilogue code of an EE between the move to SRRs and
the RFI. Here EE is disabled but CE is enabled. Now a CE happens. The
handler decides that a thread dispatch is necessary. The CE checks if
this is possible:
o The thread dispatch disable level is 0, because the EE has already
decremented it.
o The EE lock variable is cleared.
o The EE executes not the first instruction.
Hence a thread dispatch is allowed. The CE issues a context switch to a
task with EE enabled (for example a task waiting for a semaphore). Now a
EE happens and the current content of the SRRs is lost.

View File

@@ -3,6 +3,8 @@
*
* Modified and partially rewritten by Till Straumann, 2007
*
* Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008.
*
* Low-level assembly code for PPC exceptions.
*
* This file was written with the goal to eliminate
@@ -21,10 +23,10 @@
* Expand prologue snippets for classic, ppc405-critical, bookE-critical
* and E500 machine-check, synchronous and asynchronous exceptions
*/
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
@@ -34,20 +36,20 @@
.global ppc_exc_min_prolog_size
ppc_exc_min_prolog_size = 4 * 4
/* Special prologue for 603e-style CPUs.
*
* 603e shadows GPR0..GPR3 for certain exceptions. We must switch
* that off before we can use the stack pointer. Note that this is
* ONLY safe if the shadowing is actually active -- otherwise, r1
* is destroyed. We deliberately use r1 so problems become obvious
* if this is misused!
*/
/* Special prologue for 603e-style CPUs.
*
* 603e shadows GPR0..GPR3 for certain exceptions. We must switch
* that off before we can use the stack pointer. Note that this is
* ONLY safe if the shadowing is actually active -- otherwise, r1
* is destroyed. We deliberately use r1 so problems become obvious
* if this is misused!
*/
.global ppc_exc_tgpr_clr_prolog
ppc_exc_tgpr_clr_prolog:
mfmsr r1
rlwinm r1,r1,0,15,13
mtmsr r1
isync
mfmsr r1
rlwinm r1,r1,0,15,13
mtmsr r1
isync
/* FALL THRU TO 'auto' PROLOG */
/* Determine vector dynamically/automatically
@@ -58,9 +60,21 @@ ppc_exc_tgpr_clr_prolog:
.global ppc_exc_min_prolog_auto
ppc_exc_min_prolog_auto:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw r3, GPR3_OFFSET(r1)
mflr r3
bla wrap_auto
stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
mflr VECTOR_REGISTER
bla wrap_auto
/**
* @brief Use vector offsets with 16 byte boundaries.
*
* @see ppc_exc_min_prolog_auto();
*/
.global ppc_exc_min_prolog_auto_packed
ppc_exc_min_prolog_auto_packed:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
mflr VECTOR_REGISTER
bla wrap_auto_packed
.global ppc_exc_tgpr_clr_prolog_size
ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
@@ -78,10 +92,10 @@ ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
*/
.global ppc_exc_min_prolog_auto_async
ppc_exc_min_prolog_auto_async:
stw r1, ppc_exc_lock_std@sdarel(r13)
stw r3, ppc_exc_gpr3_std@sdarel(r13)
mflr r3
bla wrap_auto_async
stw r1, ppc_exc_lock_std@sdarel(r13)
stw VECTOR_REGISTER, ppc_exc_vector_register_std@sdarel(r13)
mflr VECTOR_REGISTER
bla wrap_auto_async
/******************************************************/
/* WRAPPERS */
@@ -101,294 +115,63 @@ __ppc_exc_wrappers_start = .
/* Expand wrappers for different exception flavors */
/* Standard/classic powerpc */
WRAP _FLVR=std _PRI=std _SRR0=srr0 _SRR1=srr1 _RFI=rfi
WRAP _FLVR=std _PRI=std _SRR0=srr0 _SRR1=srr1 _RFI=rfi
/* ppc405 has a critical exception using srr2/srr3 */
WRAP _FLVR=p405_crit _PRI=crit _SRR0=srr2 _SRR1=srr3 _RFI=rfci
WRAP _FLVR=p405_crit _PRI=crit _SRR0=srr2 _SRR1=srr3 _RFI=rfci
/* bookE has critical exception using csrr0 cssr1 */
WRAP _FLVR=bookE_crit _PRI=crit _SRR0=csrr0 _SRR1=csrr1 _RFI=rfci
WRAP _FLVR=bookE_crit _PRI=crit _SRR0=csrr0 _SRR1=csrr1 _RFI=rfci
/* e500 has machine-check exception using mcsrr0 mcssr1 */
WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
/* LR holds vector, r3 holds orig. LR */
/* LR holds vector, VECTOR_REGISTER holds orig. LR */
wrap_auto:
stw r14, GPR14_OFFSET(r1)
/* find address where we jumped from */
mflr r14
/* restore LR */
mtlr r3
/* compute vector into R3 */
rlwinm r3, r14, 24, 26, 31
/* we're now in almost the same state as if called by
* min_prolog_std but we must skip saving r14
stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* Find address where we jumped from */
mflr FRAME_REGISTER
/* Restore LR */
mtlr VECTOR_REGISTER
/* Compute vector into R3 */
rlwinm VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/*
* We're now in almost the same state as if called by
* min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already
*/
b wrap_no_save_r14_std
b wrap_no_save_frame_register_std
/* See: wrap_auto */
wrap_auto_packed:
stw FRAME_REGISTER, FRAME_OFFSET(r1)
mflr FRAME_REGISTER
mtlr VECTOR_REGISTER
rlwinm VECTOR_REGISTER, FRAME_REGISTER, 28, 26, 31
b wrap_no_save_frame_register_std
wrap_auto_async:
stwu r1, -EXCEPTION_FRAME_END(r1)
stw r14, GPR14_OFFSET(r1)
stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* find address where we jumped from */
mflr r14
mflr FRAME_REGISTER
/* restore LR */
mtlr r3
mtlr VECTOR_REGISTER
/* set upper bits to indicate that non-volatile
* registers should not be saved/restored.
*/
li r3, 0xffff8000
li VECTOR_REGISTER, 0xffff8000
/* compute vector into R3 */
rlwimi r3, r14, 24, 26, 31
rlwimi VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/* we're now in almost the same state as if called by
* min_prolog_std but we must skip saving r14
* min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already
*/
b wrap_no_save_r14_std
b wrap_no_save_frame_register_std
/*
* Common code for all flavors of exception and whether
* they are synchronous or asynchronous.
*
* Call with
* r3 : vector
* r4 : srr0
* r5 : srr1
* r14: exception frame
* cr4: OR of lower-priority locks
* cr2: exception type (asyn/isr [<0] or synchronous [>=0])
* lr : is updated by 'bl'
* all others: original state
*
* If this is an asynchronous exception ( cr2 < 0 ):
* - save volatile registers only,
* - disable thread dispatching,
* - switch to interrupt stack (if necessary),
* - call the C-dispatcher,
* - switch back the stack,
* - decrement the dispatch-disable level
* - check if it is safe to dispatch (disable-level must be 0
* AND no lower-priority asynchronous exception must be under
* way (as indicated by the lock variables).
* - If it would be OK to dispatch, call the C-wrapup code.
* - restore volatile registers
*
* Otherwise, i.e., if we are dealing with a synchronous exception
* then:
* - save all registers
* - call the C-dispatcher
* - restore registers
*/
wrap_common:
stw r4, SRR0_FRAME_OFFSET(r14)
stw r5, SRR1_FRAME_OFFSET(r14)
/* prepare for calling C code; */
/* use non-volatile r15 for remembering lr */
stw r15, GPR15_OFFSET(r14)
/* save vector; negative if only scratch regs. are valid */
stw r3, EXCEPTION_NUMBER_OFFSET(r14)
/* save scratch registers */
/* r2 should be unused or fixed anyways (eabi sdata2) */
stw r0, GPR0_OFFSET(r14)
stw r2, GPR2_OFFSET(r14)
stw r6, GPR6_OFFSET(r14)
stw r7, GPR7_OFFSET(r14)
stw r8, GPR8_OFFSET(r14)
stw r9, GPR9_OFFSET(r14)
stw r10, GPR10_OFFSET(r14)
stw r11, GPR11_OFFSET(r14)
stw r12, GPR12_OFFSET(r14)
/* r13 must be fixed anyways (sysv sdata) */
/* save LR */
mflr r15
mfctr r4
mfxer r5
stw r4, EXC_CTR_OFFSET(r14)
stw r5, EXC_XER_OFFSET(r14)
/*
* Switch MMU / RI on if necessary;
* remember decision in cr3
*/
lwz r4, ppc_exc_msr_bits@sdarel(r13)
cmpwi cr3, r4, 0
beq cr3, 1f
mfmsr r5
or r5, r5, r4
mtmsr r5
sync
isync
1:
/* If this is a asynchronous exception we skip ahead */
blt cr2, skip_save_nonvolatile_regs
/* YES; they want everything ('normal exception') */
/* save original stack pointer */
lwz r5, EXC_MIN_GPR1(r14)
stw r5, GPR1_OFFSET(r14)
stw r13, GPR13_OFFSET(r14)
/* store r16..r31 into the exception frame */
stmw r16, GPR16_OFFSET(r14)
skip_save_nonvolatile_regs:
/* store address of exception frame in r4; vector is in r3 */
addi r4, r14, FRAME_LINK_SPACE
/* load hi-halfword of C wrapper address */
lis r5, ppc_exc_C_wrapper@h
/* clear CR[6] to make sure no vararg callee assumes that
* there are any valid FP regs
*/
crxor 6,6,6
/* merge lo-halfword of C wrapper address */
ori r5, r5, ppc_exc_C_wrapper@l
/* Far branch to ppc_C_wrapper */
mtlr r5
blrl
/* do not clobber r3 since we pass the return value
* of ppc_exc_C_wrapper on to ppc_exc_wrapup
*/
/* skip decrementing the thread-dispatch disable level
* and calling ppc_exc_wrapup if this is a synchronous
* exception.
*/
bge cr2, restore_nonvolatile_regs
/* decrement ISR nest level;
* disable all interrupts.
* (Disabling IRQs here is not necessary if we
* use the stack-switching strategy which tests
* if we are alreay on the ISR-stack as opposed
* to test the nesting level; see ppc_exc_asm_macros.h)
*/
lwz r4, ppc_exc_msr_irq_mask@sdarel(r13)
mfmsr r5
andc r4, r5, r4
mtmsr r4
lwz r4, _ISR_Nest_level@sdarel(r13)
addi r4, r4, -1
stw r4, _ISR_Nest_level@sdarel(r13)
/*
* switch back to original stack (r14 == r1 if we are
* still on the IRQ stack).
*/
mr r1, r14
/* restore interrupt mask */
mtmsr r5
/* decrement thread_dispatch level and check
* if we have to run the dispatcher.
*/
lwz r5, _Thread_Dispatch_disable_level@sdarel(r13)
addic. r5, r5, -1
stw r5, _Thread_Dispatch_disable_level@sdarel(r13)
/* test _Thread_Dispatch_disable nesting level AND
* lower priority locks (in cr4); ONLY if
* _Thread_Dispatch_disable_level == 0 AND no lock is set
* then call ppc_exc_wrapup which may do a context switch.
*/
crand EQ(cr0), EQ(cr0), EQ(cr4)
bne 2f
crxor 6,6,6
/* Far branch to ppc_exc_wrapup */
lis r5, ppc_exc_wrapup@h
addi r4, r14, FRAME_LINK_SPACE
ori r5, r5, ppc_exc_wrapup@l
mtlr r5
blrl
2:
lwz r14, GPR14_OFFSET(r1)
/* we can skip restoring r16..r31 */
b skip_restore_nonvolatile_regs
restore_nonvolatile_regs:
/* synchronous exc: restore everything from the exception frame */
lwz r14, GPR14_OFFSET(r1)
/* restore stack pointer */
lwz r5, GPR1_OFFSET(r1)
stw r5, EXC_MIN_GPR1(r1)
/* restore non-volatile regs */
lwz r13, GPR13_OFFSET(r1)
lmw r16, GPR16_OFFSET(r1)
skip_restore_nonvolatile_regs:
lwz r3, EXC_XER_OFFSET(r1)
lwz r4, EXC_CTR_OFFSET(r1)
mtxer r3
mtctr r4
/* restore lr, r15 */
mtlr r15
lwz r15, GPR15_OFFSET(r1)
/* restore scratch regs */
lwz r12, GPR12_OFFSET(r1)
lwz r11, GPR11_OFFSET(r1)
lwz r10, GPR10_OFFSET(r1)
lwz r9, GPR9_OFFSET(r1)
lwz r8, GPR8_OFFSET(r1)
lwz r7, GPR7_OFFSET(r1)
lwz r6, GPR6_OFFSET(r1)
/* r4, r5 are eventually restored by caller */
lwz r3, GPR3_OFFSET(r1)
lwz r2, GPR2_OFFSET(r1)
/* r1, is eventually restored by caller */
lwz r0, GPR0_OFFSET(r1)
beq cr3, 2f
/* restore MSR settings */
lwz r5, ppc_exc_msr_bits@sdarel(r13)
mfmsr r4
andc r4, r4, r5
mtmsr r4
sync
isync
2:
lwz r4, EXC_CR_OFFSET(r1)
mtcr r4
/* Must disable interrupts prior to restoring SSRs.
* Here's a scenario discovered by Sebastian Huber:
* 1) CE happens between writing to SRR and RFI
* 2) CE handler does something which requires a task switch
* 3) CE wrapper returns and determines that task switch
* is OK since EE lock is not held, dispatch-disable level
* is zero etc.
* 4) switch to other task enables EE
* 5) eventually, switch back to task interrupted by 1)
* 6) RFI happens but SRR contents have been clobbered.
*/
lwz r4, ppc_exc_msr_irq_mask@sdarel(r13)
mfmsr r5
andc r4, r5, r4
mtmsr r4
/* restore SRR and stack */
lwz r4, SRR0_FRAME_OFFSET(r1)
lwz r5, SRR1_FRAME_OFFSET(r1)
blr
.global __ppc_exc_wrappers_end
__ppc_exc_wrappers_end = .

View File

@@ -13,6 +13,10 @@
#ifndef PPC_EXC_SHARED_H
#define PPC_EXC_SHARED_H
#include <stdint.h>
#include "vectors.h"
#ifdef __cplusplus
extern "C" {
#endif
@@ -33,7 +37,6 @@ extern "C" {
*
* Other return values are reserved.
*/
typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector);
/*
@@ -46,52 +49,6 @@ typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector);
*/
extern uint32_t ppc_exc_msr_bits;
/*
* Set of MSR bits required to disable all
* asynchronous exceptions (depends on CPU type;
* must be set during initialization).
* Interrupt are disabled by writing the
* one's complement of this mask to msr:
* msr &= ~ppc_exc_msr_irq_mask;
*/
extern uint32_t ppc_exc_msr_irq_mask;
/*
* Cache size of the interrupt stack in a SDA variable
*/
extern uint32_t ppc_exc_intr_stack_size;
/*
* This variable defines the semantics of asynchronous
* critical exceptions ("critical interrupts")
* on BookE-style CPUs.
*
* There are the following ways of using these interrupts
*
* 1) permanently disabled; no support
* 2) permanently enabled; handlers for critical interrupts
* MUST NOT use any RTEMS primitives at all. They cannot
* directly e.g., release a semaphore.
* 3) enabled/disabled by the OS during critical sections.
* In this scenario critical interrupts are not much
* different from 'normal' interrupts but handlers may
* safely use RTEMS primitives (i.e., the subset which
* is OK to use from ISR context).
*
* The BSP (or application) may initialize this
* variable PRIOR to calling 'initialize_exceptions'
* to any of the following values:
*
* NOTE: so far, OS_SUPPORT is not supported by the cpukit
* yet since the IRQ/enabling-disabling primitives
* do not mask MSR_CE yet.
*/
#define PPC_EXC_CRIT_NO_OS_SUPPORT 1
#define PPC_EXC_CRIT_OS_SUPPORT 0
#define PPC_EXC_CRIT_DISABLED (-1)
extern int32_t ppc_exc_crit_always_enabled;
/* (See README under CAVEATS). During initialization
* a check is performed to assert that write-back
* caching is enabled for memory accesses. If a BSP
@@ -169,6 +126,8 @@ extern void ppc_exc_tgpr_clr_prolog();
*/
extern void ppc_exc_min_prolog_auto();
extern void ppc_exc_min_prolog_auto_packed();
/* CPU support may store the address of a function here
* that can be used by the default exception handler to

View File

@@ -39,11 +39,9 @@ uint32_t ppc_exc_lock_std = 0;
uint32_t ppc_exc_lock_crit = 0;
uint32_t ppc_exc_lock_mchk = 0;
uint32_t ppc_exc_gpr3_std = 0;
uint32_t ppc_exc_gpr3_crit = 0;
uint32_t ppc_exc_gpr3_mchk = 0;
uint32_t ppc_exc_msr_irq_mask = MSR_EE;
uint32_t ppc_exc_vector_register_std = 0;
uint32_t ppc_exc_vector_register_crit = 0;
uint32_t ppc_exc_vector_register_mchk = 0;
/* MSR bits to enable once critical status info is saved and the stack
* is switched; must be set depending on CPU type
@@ -53,74 +51,44 @@ uint32_t ppc_exc_msr_irq_mask = MSR_EE;
*/
uint32_t ppc_exc_msr_bits = MSR_IR | MSR_DR | MSR_RI;
uint32_t ppc_exc_intr_stack_size = 0;
int32_t ppc_exc_crit_always_enabled = PPC_EXC_CRIT_NO_OS_SUPPORT;
/* Table of C-handlers */
static ppc_exc_handler_t ppc_exc_handlers[LAST_VALID_EXC + 1] = {0, };
ppc_exc_handler_t
ppc_exc_get_handler(unsigned vector)
int ppc_exc_handler_default( BSP_Exception_frame *f, unsigned int vector)
{
if ( vector > LAST_VALID_EXC )
return 0;
return ppc_exc_handlers[vector];
return 1;
}
int
ppc_exc_set_handler(unsigned vector, ppc_exc_handler_t hdl)
/* Table of C-handlers */
ppc_exc_handler_t ppc_exc_handler_table [LAST_VALID_EXC + 1] = {
[0 ... LAST_VALID_EXC] = ppc_exc_handler_default
};
ppc_exc_handler_t ppc_exc_get_handler( unsigned vector)
{
if ( vector > LAST_VALID_EXC )
ppc_exc_handler_t handler = NULL;
if (vector > LAST_VALID_EXC) {
return 0;
}
if (ppc_exc_handler_table [vector] != ppc_exc_handler_default) {
handler = ppc_exc_handler_table [vector];
}
return handler;
}
int ppc_exc_set_handler( unsigned vector, ppc_exc_handler_t handler)
{
if (vector > LAST_VALID_EXC) {
return -1;
ppc_exc_handlers[vector] = hdl;
}
if (handler == NULL) {
ppc_exc_handler_table [vector] = ppc_exc_handler_default;
} else {
ppc_exc_handler_table [vector] = handler;
}
return 0;
}
/* This routine executes on the interrupt stack (if vect < 0) */
int
ppc_exc_C_wrapper(int vect, BSP_Exception_frame *f)
{
unsigned int i = vect & 0x3f;
int rval = 1;
if ( i <= LAST_VALID_EXC && ppc_exc_handlers[i] ) {
rval = ppc_exc_handlers[i](f, i);
}
if ( rval ) {
/* not handled, so far ... */
if ( globalExceptHdl ) {
/*
* global handler must be prepared to
* deal with asynchronous exceptions!
*/
globalExceptHdl(f);
}
rval = 0;
}
if ( (ppc_exc_msr_bits ^ f->EXC_SRR1) & MSR_RI ) {
printk("unrecoverable exception (RI was clear), spinning to death.\n");
while (1)
;
}
return rval;
}
void
ppc_exc_wrapup(int ll_rval, BSP_Exception_frame *f)
ppc_exc_wrapup( BSP_Exception_frame *f)
{
/* Check if we need to run the global handler now */
if ( ll_rval ) {
/* We get here if ppc_exc_C_wrapper() returned nonzero.
* This could be useful if we need to do something
* with thread-dispatching enabled (at this point it is)
* after handling an asynchronous exception.
*/
}
/* dispatch_disable level is decremented from assembly code. */
if ( _Context_Switch_necessary ) {
/* FIXME: I believe it should be OK to re-enable

View File

@@ -14,6 +14,7 @@
*/
#ifndef LIBCPU_POWERPC_BSPSUPP_VECTORS_H
#define LIBCPU_POWERPC_BSPSUPP_VECTORS_H
#include <libcpu/raw_exception.h>
/*
@@ -61,15 +62,21 @@
#define EXC_CTR_OFFSET 152
#define EXC_XER_OFFSET 156
#define EXC_LR_OFFSET 160
/* Exception stack frame -> BSP_Exception_frame */
#define FRAME_LINK_SPACE 8
/*
* maintain the EABI requested 8 bytes aligment
* As SVR4 ABI requires 16, make it 16 (as some
* exception may need more registers to be processed...)
*/
#define EXCEPTION_FRAME_END 176
#define EXCEPTION_FRAME_END 176
#ifndef ASM
#include <stdint.h>
/* codemove is like memmove, but it also gets the cache line size
* as 4th parameter to synchronize them. If this last parameter is
* zero, it performs more or less like memmove. No copy is performed if
@@ -80,7 +87,12 @@
extern void * codemove(void *, const void *, unsigned int, unsigned long);
extern void exception_nop_enable(const rtems_raw_except_connect_data* ptr);
extern int exception_always_enabled(const rtems_raw_except_connect_data* ptr);
extern void initialize_exceptions();
void ppc_exc_initialize(
uint32_t interrupt_disable_mask,
uint32_t interrupt_stack_start,
uint32_t interrupt_stack_size
);
typedef struct _BSP_Exception_frame {
unsigned EXC_SRR0;

View File

@@ -47,6 +47,8 @@ typedef struct LRFrameRec_ {
#define STACK_CLAMP 50 /* in case we have a corrupted bottom */
SPR_RW(SPRG1)
SPR_RW(SPRG2)
SPR_RO(LR)
SPR_RO(DAR)
#define DEAR_BOOKE 61
@@ -250,11 +252,11 @@ ppc_exc_min_prolog_template_t tmpl;
return (rtems_raw_except_func)prologues[n_prolog++];
}
void ppc_exc_init(
void ppc_exc_table_init(
rtems_raw_except_connect_data *exception_table,
int nEntries)
{
int i,v;
unsigned i,v;
ppc_raw_exception_category cat;
uintptr_t vaddr;
@@ -270,11 +272,17 @@ uintptr_t vaddr;
exception_config.rawExceptHdlTbl = exception_table;
exception_config.defaultRawEntry.exceptIndex = 0;
exception_config.defaultRawEntry.hdl.vector = 0;
/* Note that the 'auto' handler cannot be used for everything; in particular,
* it assumes classic exceptions with a vector offset aligned on a 256-byte
* boundary.
*/
exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto;
if (ppc_cpu_has_ivpr_and_ivor()) {
/* Use packed version with 16-byte boundaries for CPUs with IVPR and IVOR registers */
exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto_packed;
} else {
/* Note that the 'auto' handler cannot be used for everything; in particular,
* it assumes classic exceptions with a vector offset aligned on a 256-byte
* boundary.
*/
exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto;
}
/*
* Note that the cast of an array address to an unsigned
@@ -306,7 +314,7 @@ uintptr_t vaddr;
* default prolog can handle classic, synchronous exceptions
* with a vector offset aligned on a 256-byte boundary.
*/
if ( PPC_EXC_CLASSIC == cat && 0 == ( vaddr & 0xff ) ) {
if (cat == PPC_EXC_CLASSIC && ((vaddr & 0xff) == 0 || (ppc_cpu_has_ivpr_and_ivor() && (vaddr & 0xf) == 0))) {
exception_table[i].hdl.raw_hdl_size = exception_config.defaultRawEntry.hdl.raw_hdl_size;
exception_table[i].hdl.raw_hdl = exception_config.defaultRawEntry.hdl.raw_hdl;
} else {
@@ -329,44 +337,45 @@ uintptr_t vaddr;
#endif
}
void initialize_exceptions()
void ppc_exc_initialize(
uint32_t interrupt_disable_mask,
uint32_t interrupt_stack_start,
uint32_t interrupt_stack_size
)
{
int i;
int n = sizeof(exception_table)/sizeof(exception_table[0]);
int i;
int n = sizeof(exception_table)/sizeof(exception_table[0]);
uint32_t interrupt_stack_end = 0;
uint32_t interrupt_stack_pointer = 0;
uint32_t *p = NULL;
/* Ensure proper interrupt stack alignment */
interrupt_stack_start &= ~(CPU_STACK_ALIGNMENT - 1);
interrupt_stack_size &= ~(CPU_STACK_ALIGNMENT - 1);
/* Interrupt stack end and pointer */
interrupt_stack_end = interrupt_stack_start + interrupt_stack_size;
interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE;
/* Tag interrupt stack bottom */
p = (uint32_t *) interrupt_stack_pointer;
*p = 0;
/* Move interrupt stack values to special purpose registers */
_write_SPRG1( interrupt_stack_pointer);
_write_SPRG2( interrupt_stack_start);
/* Interrupt disable mask */
ppc_interrupt_set_disable_mask( interrupt_disable_mask);
/* Use current MMU / RI settings when running C exception handlers */
ppc_exc_msr_bits = _read_MSR() & ( MSR_DR | MSR_IR | MSR_RI );
/* Cache size of the interrupt stack in a SDA variable */
ppc_exc_intr_stack_size = rtems_configuration_get_interrupt_stack_size();
/* Copy into a SDA variable that is easy to access from
* assembly code
*/
if ( ppc_cpu_is_bookE() ) {
ppc_exc_msr_irq_mask = MSR_EE | MSR_CE | MSR_DE ;
switch (ppc_exc_crit_always_enabled) {
case PPC_EXC_CRIT_NO_OS_SUPPORT:
_write_MSR(_read_MSR() | (MSR_CE | MSR_DE));
break;
case PPC_EXC_CRIT_OS_SUPPORT:
printk("ppc_exc: PPC_EXC_CRIT_OS_SUPPORT not yet implemented\n");
/* fall thru */
case PPC_EXC_CRIT_DISABLED:
default:
ppc_exc_crit_always_enabled = PPC_EXC_CRIT_DISABLED;
_write_MSR(_read_MSR() & ~(MSR_CE | MSR_DE));
break;
}
} else {
ppc_exc_msr_irq_mask = MSR_EE ;
}
for ( i=0; i<n; i++ )
exception_table[i].hdl.vector = i;
ppc_exc_init(exception_table, n);
ppc_exc_table_init(exception_table, n);
/* If we are on a classic PPC with MSR_DR enabled then
* assert that the mapping for at least this task's