Merged of mcp750 and mvme2307 BSP by Eric Valette <valette@crf.canon.fr>.

As part of this effort, the mpc750 libcpu code is now shared with the
ppc6xx.
This commit is contained in:
Joel Sherrill
1999-12-02 13:54:30 +00:00
parent 5d3da15b83
commit 5554279677
10 changed files with 0 additions and 3503 deletions

View File

@@ -1,90 +0,0 @@
#
# $Id$
#
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = ../..
subdir = powerpc/other_cpu
RTEMS_ROOT = @RTEMS_ROOT@
PROJECT_ROOT = @PROJECT_ROOT@
VPATH = @srcdir@
RELS = ../$(ARCH)/rtems-cpu.rel
# C source names, if any, go here -- minus the .c
C_PIECES = cpu ppccache
C_FILES = $(C_PIECES:%=%.c)
C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
ROOT_H_PIECES =
ROOT_H_FILES = $(ROOT_H_PIECES:%=$(srcdir)/%)
RTEMS_SCORE_H_PIECES = cpu.h
RTEMS_SCORE_H_FILES = $(RTEMS_SCORE_H_PIECES:%=$(srcdir)/%)
H_PIECES = $(ROOT_H_PIECES) $(RTEMS_SCORE_H_PIECES)
H_FILES = $(H_PIECES%=$(srcdir)/%)
I_PIECES = c_isr
I_FILES = $(I_PIECES:%=$(srcdir)/%.inl)
# Assembly source names, if any, go here -- minus the .S
S_PIECES = cpu_asm rtems # irq_stub
S_FILES = $(S_PIECES:%=%.S)
S_O_FILES = $(S_FILES:%.S=${ARCH}/%.o)
SRCS = $(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) \
$(I_FILES)
OBJS = $(C_O_FILES) $(CC_O_FILES) $(S_O_FILES)
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
include $(RTEMS_ROOT)/make/leaf.cfg
INSTALL_CHANGE = @INSTALL_CHANGE@
mkinstalldirs = $(SHELL) $(top_srcdir)/@RTEMS_TOPdir@/mkinstalldirs
INSTALLDIRS = $(PROJECT_INCLUDE)/rtems/score $(PROJECT_INCLUDE)
$(INSTALLDIRS):
@$(mkinstalldirs) $(INSTALLDIRS)
#
# (OPTIONAL) Add local stuff here using +=
#
DEFINES +=
CPPFLAGS +=
CFLAGS += $(CFLAGS_OS_V)
LD_PATHS +=
LD_LIBS +=
LDFLAGS +=
#
# Add your list of files to delete here. The config files
# already know how to delete some stuff, so you may want
# to just run 'make clean' first to see what gets missed.
# 'make clobber' already includes 'make clean'
#
CLEAN_ADDITIONS +=
CLOBBER_ADDITIONS +=
../$(ARCH)/rtems-cpu.rel: $(OBJS)
test -d ../$(ARCH) || mkdir ../$(ARCH)
$(make-rel)
all: ${ARCH} $(SRCS) preinstall $(OBJS) $(RELS)
# Install the program(s), appending _g or _p as appropriate.
# for include files, just use $(INSTALL_CHANGE)
install: all
preinstall: ${ARCH}
@$(INSTALL_CHANGE) -m 644 $(RTEMS_SCORE_H_FILES) $(I_FILES) $(PROJECT_INCLUDE)/rtems/score
@$(INSTALL_CHANGE) -m 644 $(ROOT_H_FILES) $(PROJECT_INCLUDE)
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) \
&& CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status

View File

@@ -1,78 +0,0 @@
#
# $Id$
#
There are various issues regarding this port:
1) Legal
This port is written by Andrew Bray <andy@i-cubed.co.uk>, and
is copyright 1995 i-cubed ltd.
This port was later updated by Joel Sherrill <joel@OARcorp.com>
to test the support for the PPC603, PPC603e, and PPC604. This
was tested on the PowerPC simulator PSIM and a VMEbus single board
computer.
2) CPU support.
This release fully supports the PPC403GA, PPC403GB, PPC603, PPC603e,
and PPC604 processors. A good faith attempt has been made to include
support other models based upon available documentation.
This port was originally written and tested on the PPC403GA (using
software floating point). Current ports are tested on 60x CPUs
using the PowerPC simulator PSIM.
Andrew Bray received assistance during the initial porting effort
from IBM and Blue Micro and we would like to gratefully acknowledge
that help.
The support for the PPC602 processor is incomplete as only sketchy
data is currently available. Perhaps this model has been dropped.
3) Application Binary INterface
In the context of RTEMS, the ABI is of interest for the following
aspects:
a) Register usage. Which registers are used to provide static variable
linkage, stack pointer etc.
b) Function calling convention. How parameters are passed, how function
variables should be invoked, how values are returned, etc.
c) Stack frame layout.
I am aware of a number of ABIs for the PowerPC:
a) The PowerOpen ABI. This is the original Power ABI used on the RS/6000.
This is the only ABI supported by versions of GCC before 2.7.0.
b) The SVR4 ABI. This is the ABI defined by SunSoft for the Solaris port
to the PowerPC.
c) The Embedded ABI. This is an embedded ABI for PowerPC use, which has no
operating system interface defined. It is promoted by SunSoft, Motorola,
and Cygnus Support. Cygnus are porting the GNU toolchain to this ABI.
d) GCC 2.7.0. This compiler is partway along the road to supporting the EABI,
but is currently halfway in between.
This port was built and tested using the PowerOpen ABI, with the following
caveat: we used an ELF assembler and linker. So some attention may be
required on the assembler files to get them through a traditional (XCOFF)
PowerOpen assembler.
This port contains support for the other ABIs, but this may prove to be
incomplete as it is untested.
The RTEMS PowerPC port supports EABI as the primary ABI. The powerpc-rtems
GNU toolset configuration is EABI and .
Andrew Bray, 4 December 1995
Joel Sherrill, 16 July 1997

View File

@@ -1,8 +0,0 @@
#
# $Id$
#
Todo list:
Maybe decode external interrupts like the HPPA does.
See c/src/lib/libcpu/powerpc/ppc403/ictrl/* for implementation on ppc403

View File

@@ -1,4 +0,0 @@
RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
{
return (_ISR_Nest_level != 0);
}

View File

@@ -1,853 +0,0 @@
/*
* PowerPC CPU Dependent Source
*
* Author: Andrew Bray <andy@i-cubed.co.uk>
*
* COPYRIGHT (c) 1995 by i-cubed ltd.
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of i-cubed limited not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* i-cubed limited makes no representations about the suitability
* of this software for any purpose.
*
* Derived from c/src/exec/cpu/no_cpu/cpu.c:
*
* COPYRIGHT (c) 1989-1997.
* On-Line Applications Research Corporation (OAR).
* Copyright assigned to U.S. Government, 1994.
*
* The license and distribution terms for this file may be found in
* the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#include <rtems/system.h>
#include <rtems/score/isr.h>
#include <rtems/score/context.h>
#include <rtems/score/thread.h>
#include <rtems/score/interr.h>
/*
* These are for testing purposes.
*/
/* _CPU_Initialize
*
* This routine performs processor dependent initialization.
*
* INPUT PARAMETERS:
* cpu_table - CPU table to initialize
* thread_dispatch - address of disptaching routine
*/
static void ppc_spurious(int, CPU_Interrupt_frame *);
void _CPU_Initialize(
rtems_cpu_table *cpu_table,
void (*thread_dispatch) /* ignored on this CPU */
)
{
proc_ptr handler = (proc_ptr)ppc_spurious;
int i;
#if (PPC_ABI != PPC_ABI_POWEROPEN)
register unsigned32 r2 = 0;
#if (PPC_ABI != PPC_ABI_GCC27)
register unsigned32 r13 = 0;
asm ("mr %0,13" : "=r" ((r13)) : "0" ((r13)));
_CPU_IRQ_info.Default_r13 = r13;
#endif
asm ("mr %0,2" : "=r" ((r2)) : "0" ((r2)));
_CPU_IRQ_info.Default_r2 = r2;
#endif
_CPU_IRQ_info.Nest_level = &_ISR_Nest_level;
_CPU_IRQ_info.Disable_level = &_Thread_Dispatch_disable_level;
_CPU_IRQ_info.Vector_table = _ISR_Vector_table;
#if (PPC_ABI == PPC_ABI_POWEROPEN)
_CPU_IRQ_info.Dispatch_r2 = ((unsigned32 *)_Thread_Dispatch)[1];
#endif
_CPU_IRQ_info.Switch_necessary = &_Context_Switch_necessary;
_CPU_IRQ_info.Signal = &_ISR_Signals_to_thread_executing;
#if (PPC_USE_SPRG)
i = (int)&_CPU_IRQ_info;
asm volatile("mtspr 0x113, %0" : "=r" (i) : "0" (i)); /* SPRG 3 */
#endif
/*
* Store Msr Value in the IRQ info structure.
*/
_CPU_MSR_Value(_CPU_IRQ_info.msr_initial);
#if (PPC_USE_SPRG)
i = _CPU_IRQ_info.msr_initial;
asm volatile("mtspr 0x112, %0" : "=r" (i) : "0" (i)); /* SPRG 2 */
#endif
if ( cpu_table->spurious_handler )
handler = (proc_ptr)cpu_table->spurious_handler;
for (i = 0; i < PPC_INTERRUPT_MAX; i++)
_ISR_Vector_table[i] = handler;
_CPU_Table = *cpu_table;
}
/*PAGE
*
* _CPU_ISR_Calculate_level
*
* The PowerPC puts its interrupt enable status in the MSR register
* which also contains things like endianness control. To be more
* awkward, the layout varies from processor to processor. This
* is why it was necessary to adopt a scheme which allowed the user
* to specify specifically which interrupt sources were enabled.
*/
unsigned32 _CPU_ISR_Calculate_level(
unsigned32 new_level
)
{
register unsigned32 new_msr = 0;
/*
* Set the critical interrupt enable bit
*/
#if (PPC_HAS_RFCI)
if ( !(new_level & PPC_INTERRUPT_LEVEL_CE) )
new_msr |= PPC_MSR_CE;
#endif
if ( !(new_level & PPC_INTERRUPT_LEVEL_ME) )
new_msr |= PPC_MSR_ME;
if ( !(new_level & PPC_INTERRUPT_LEVEL_EE) )
new_msr |= PPC_MSR_EE;
return new_msr;
}
/*PAGE
*
* _CPU_ISR_Set_level
*
* This routine sets the requested level in the MSR.
*/
void _CPU_ISR_Set_level(
unsigned32 new_level
)
{
register unsigned32 tmp = 0;
register unsigned32 new_msr;
new_msr = _CPU_ISR_Calculate_level( new_level );
asm volatile (
"mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" :
"=&r" ((tmp)) :
"r" ((PPC_MSR_DISABLE_MASK)), "r" ((new_msr)), "0" ((tmp))
);
}
/*PAGE
*
* _CPU_ISR_Get_level
*
* This routine gets the current interrupt level from the MSR and
* converts it to an RTEMS interrupt level.
*/
unsigned32 _CPU_ISR_Get_level( void )
{
unsigned32 level = 0;
unsigned32 msr;
asm volatile("mfmsr %0" : "=r" ((msr)));
msr &= PPC_MSR_DISABLE_MASK;
/*
* Set the critical interrupt enable bit
*/
#if (PPC_HAS_RFCI)
if ( !(msr & PPC_MSR_CE) )
level |= PPC_INTERRUPT_LEVEL_CE;
#endif
if ( !(msr & PPC_MSR_ME) )
level |= PPC_INTERRUPT_LEVEL_ME;
if ( !(msr & PPC_MSR_EE) )
level |= PPC_INTERRUPT_LEVEL_EE;
return level;
}
/*PAGE
*
* _CPU_Context_Initialize
*/
#if (PPC_ABI == PPC_ABI_POWEROPEN)
#define CPU_MINIMUM_STACK_FRAME_SIZE 56
#else /* PPC_ABI_SVR4 or PPC_ABI_EABI */
#define CPU_MINIMUM_STACK_FRAME_SIZE 8
#endif
void _CPU_Context_Initialize(
Context_Control *the_context,
unsigned32 *stack_base,
unsigned32 size,
unsigned32 new_level,
void *entry_point,
boolean is_fp
)
{
unsigned32 msr_value;
unsigned32 sp;
sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
*((unsigned32 *)sp) = 0;
the_context->gpr1 = sp;
the_context->msr = _CPU_ISR_Calculate_level( new_level );
/*
* The FP bit of the MSR should only be enabled if this is a floating
* point task. Unfortunately, the vfprintf_r routine in newlib
* ends up pushing a floating point register regardless of whether or
* not a floating point number is being printed. Serious restructuring
* of vfprintf.c will be required to avoid this behavior. At this
* time (7 July 1997), this restructuring is not being done.
*/
/*if ( is_fp ) */
the_context->msr |= PPC_MSR_FP;
/*
* Calculate the task's MSR value:
*
* + Set the exception prefix bit to point to the exception table
* + Force the RI bit
* + Use the DR and IR bits
*/
_CPU_MSR_Value( msr_value );
the_context->msr |= (msr_value & PPC_MSR_EP);
the_context->msr |= PPC_MSR_RI;
the_context->msr |= msr_value & (PPC_MSR_DR|PPC_MSR_IR);
#if (PPC_ABI == PPC_ABI_POWEROPEN)
{ unsigned32 *desc = (unsigned32 *)entry_point;
the_context->pc = desc[0];
the_context->gpr2 = desc[1];
}
#endif
#if (PPC_ABI == PPC_ABI_SVR4)
{ unsigned r13 = 0;
asm volatile ("mr %0, 13" : "=r" ((r13)));
the_context->pc = (unsigned32)entry_point;
the_context->gpr13 = r13;
}
#endif
#if (PPC_ABI == PPC_ABI_EABI)
{ unsigned32 r2 = 0;
unsigned r13 = 0;
asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
the_context->pc = (unsigned32)entry_point;
the_context->gpr2 = r2;
the_context->gpr13 = r13;
}
#endif
}
/* _CPU_ISR_install_vector
*
* This kernel routine installs the RTEMS handler for the
* specified vector.
*
* Input parameters:
* vector - interrupt vector number
* old_handler - former ISR for this vector number
* new_handler - replacement ISR for this vector number
*
* Output parameters: NONE
*
*/
void _CPU_ISR_install_vector(
unsigned32 vector,
proc_ptr new_handler,
proc_ptr *old_handler
)
{
proc_ptr ignored;
*old_handler = _ISR_Vector_table[ vector ];
/*
* If the interrupt vector table is a table of pointer to isr entry
* points, then we need to install the appropriate RTEMS interrupt
* handler for this vector number.
*/
/*
* Install the wrapper so this ISR can be invoked properly.
*/
if (_CPU_Table.exceptions_in_RAM)
_CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
/*
* We put the actual user ISR address in '_ISR_vector_table'. This will
* be used by the _ISR_Handler so the user gets control.
*/
_ISR_Vector_table[ vector ] = new_handler ? (ISR_Handler_entry)new_handler :
_CPU_Table.spurious_handler ?
(ISR_Handler_entry)_CPU_Table.spurious_handler :
(ISR_Handler_entry)ppc_spurious;
}
/*PAGE
*
* _CPU_Install_interrupt_stack
*/
void _CPU_Install_interrupt_stack( void )
{
#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
_CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 56;
#else
_CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 8;
#endif
}
/* Handle a spurious interrupt */
static void ppc_spurious(int v, CPU_Interrupt_frame *i)
{
#if 0
printf("Spurious interrupt on vector %d from %08.8x\n",
v, i->pc);
#endif
#ifdef ppc403
if (v == PPC_IRQ_EXTERNAL)
{
register int r = 0;
asm volatile("mtdcr 0x42, %0" :
"=&r" ((r)) : "0" ((r))); /* EXIER */
}
else if (v == PPC_IRQ_PIT)
{
register int r = 0x08000000;
asm volatile("mtspr 0x3d8, %0" :
"=&r" ((r)) : "0" ((r))); /* TSR */
}
else if (v == PPC_IRQ_FIT)
{
register int r = 0x04000000;
asm volatile("mtspr 0x3d8, %0" :
"=&r" ((r)) : "0" ((r))); /* TSR */
}
#endif
}
void _CPU_Fatal_error(unsigned32 _error)
{
asm volatile ("mr 3, %0" : : "r" ((_error)));
asm volatile ("tweq 5,5");
asm volatile ("li 0,0; mtmsr 0");
while (1) ;
}
#define PPC_SYNCHRONOUS_TRAP_BIT_MASK 0x100
#define PPC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
#define PPC_SYNCHRONOUS_TRAP ( _trap ) ((_trap)+PPC_SYNCHRONOUS_TRAP_BIT_MASK)
#define PPC_REAL_TRAP_NUMBER ( _trap ) ((_trap)%PPC_SYNCHRONOUS_TRAP_BIT_MASK)
const CPU_Trap_table_entry _CPU_Trap_slot_template = {
#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
#error " Vector install not tested."
#if (PPC_HAS_FPU)
#error " Vector install not tested."
0x9421feb0, /* stwu r1, -(20*4 + 18*8 + IP_END)(r1) */
#else
#error " Vector install not tested."
0x9421ff40, /* stwu r1, -(20*4 + IP_END)(r1) */
#endif
#else
0x9421ff90, /* stwu r1, -(IP_END)(r1) */
#endif
0x90010008, /* stw %r0, IP_0(%r1) */
0x38000000, /* li %r0, PPC_IRQ */
0x48000002 /* ba PROC (_ISR_Handler) */
};
#if defined(mpc860) || defined(mpc821)
const CPU_Trap_table_entry _CPU_Trap_slot_template_m860 = {
0x7c0803ac, /* mtlr %r0 */
0x81210028, /* lwz %r9, IP_9(%r1) */
0x38000000, /* li %r0, PPC_IRQ */
0x48000002 /* b PROC (_ISR_Handler) */
};
#endif /* mpc860 */
unsigned32 ppc_exception_vector_addr(
unsigned32 vector
);
/*PAGE
*
* _CPU_ISR_install_raw_handler
*
* This routine installs the specified handler as a "raw" non-executive
* supported trap handler (a.k.a. interrupt service routine).
*
* Input Parameters:
* vector - trap table entry number plus synchronous
* vs. asynchronous information
* new_handler - address of the handler to be installed
* old_handler - pointer to an address of the handler previously installed
*
* Output Parameters: NONE
* *new_handler - address of the handler previously installed
*
* NOTE:
*
* This routine is based on the SPARC routine _CPU_ISR_install_raw_handler.
* Install a software trap handler as an executive interrupt handler
* (which is desirable since RTEMS takes care of window and register issues),
* then the executive needs to know that the return address is to the trap
* rather than the instruction following the trap.
*
*/
void _CPU_ISR_install_raw_handler(
unsigned32 vector,
proc_ptr new_handler,
proc_ptr *old_handler
)
{
unsigned32 real_vector;
CPU_Trap_table_entry *slot;
unsigned32 u32_handler=0;
/*
* Get the "real" trap number for this vector ignoring the synchronous
* versus asynchronous indicator included with our vector numbers.
*/
real_vector = vector;
/*
* Get the current base address of the trap table and calculate a pointer
* to the slot we are interested in.
*/
slot = (CPU_Trap_table_entry *)ppc_exception_vector_addr( real_vector );
/*
* Get the address of the old_handler from the trap table.
*
* NOTE: The old_handler returned will be bogus if it does not follow
* the RTEMS model.
*/
#define HIGH_BITS_MASK 0xFFFFFC00
#define HIGH_BITS_SHIFT 10
#define LOW_BITS_MASK 0x000003FF
if (slot->stwu_r1 == _CPU_Trap_slot_template.stwu_r1) {
/*
* Set u32_handler = to target address
*/
u32_handler = slot->b_Handler & 0x03fffffc;
/* IMD FIX: sign extend address fragment... */
if (u32_handler & 0x02000000) {
u32_handler |= 0xfc000000;
}
*old_handler = (proc_ptr) u32_handler;
} else
/* There are two kinds of handlers for the MPC860. One is the 'standard'
* one like above. The other is for the cascaded interrupts from the SIU
* and CPM. Therefore we must check for the alternate one if the standard
* one is not present
*/
#if defined(mpc860) || defined(mpc821)
if (slot->stwu_r1 == _CPU_Trap_slot_template_m860.stwu_r1) {
/*
* Set u32_handler = to target address
*/
u32_handler = slot->b_Handler & 0x03fffffc;
*old_handler = (proc_ptr) u32_handler;
} else
#endif /* mpc860 */
*old_handler = 0;
/*
* Copy the template to the slot and then fix it.
*/
#if defined(mpc860) || defined(mpc821)
if (vector >= PPC_IRQ_IRQ0)
*slot = _CPU_Trap_slot_template_m860;
else
#endif /* mpc860 */
*slot = _CPU_Trap_slot_template;
u32_handler = (unsigned32) new_handler;
/*
* IMD FIX: insert address fragment only (bits 6..29)
* therefore check for proper address range
* and remove unwanted bits
*/
if ((u32_handler & 0xfc000000) == 0xfc000000) {
u32_handler &= ~0xfc000000;
}
else if ((u32_handler & 0xfc000000) != 0x00000000) {
_Internal_error_Occurred(INTERNAL_ERROR_CORE,
TRUE,
u32_handler);
}
slot->b_Handler |= u32_handler;
slot->li_r0_IRQ |= vector;
_CPU_Data_Cache_Block_Flush( slot );
}
unsigned32 ppc_exception_vector_addr(
unsigned32 vector
)
{
#if (!PPC_HAS_EVPR)
unsigned32 Msr;
#endif
unsigned32 Top = 0;
unsigned32 Offset = 0x000;
#if (PPC_HAS_EXCEPTION_PREFIX)
_CPU_MSR_Value ( Msr );
if ( ( Msr & PPC_MSR_EP) != 0 ) /* Vectors at FFFx_xxxx */
Top = 0xfff00000;
#elif (PPC_HAS_EVPR)
asm volatile( "mfspr %0,0x3d6" : "=r" (Top)); /* EVPR */
Top = Top & 0xffff0000;
#endif
switch ( vector ) {
case PPC_IRQ_SYSTEM_RESET: /* on 40x aka PPC_IRQ_CRIT */
Offset = 0x00100;
break;
case PPC_IRQ_MCHECK:
Offset = 0x00200;
break;
case PPC_IRQ_PROTECT:
Offset = 0x00300;
break;
case PPC_IRQ_ISI:
Offset = 0x00400;
break;
case PPC_IRQ_EXTERNAL:
Offset = 0x00500;
break;
case PPC_IRQ_ALIGNMENT:
Offset = 0x00600;
break;
case PPC_IRQ_PROGRAM:
Offset = 0x00700;
break;
case PPC_IRQ_NOFP:
Offset = 0x00800;
break;
case PPC_IRQ_DECREMENTER:
Offset = 0x00900;
break;
case PPC_IRQ_RESERVED_A:
Offset = 0x00a00;
break;
case PPC_IRQ_RESERVED_B:
Offset = 0x00b00;
break;
case PPC_IRQ_SCALL:
Offset = 0x00c00;
break;
case PPC_IRQ_TRACE:
Offset = 0x00d00;
break;
case PPC_IRQ_FP_ASST:
Offset = 0x00e00;
break;
#if defined(ppc403)
/* PPC_IRQ_CRIT is the same vector as PPC_IRQ_RESET
case PPC_IRQ_CRIT:
Offset = 0x00100;
break;
*/
case PPC_IRQ_PIT:
Offset = 0x01000;
break;
case PPC_IRQ_FIT:
Offset = 0x01010;
break;
case PPC_IRQ_WATCHDOG:
Offset = 0x01020;
break;
case PPC_IRQ_DEBUG:
Offset = 0x02000;
break;
#elif defined(ppc601)
case PPC_IRQ_TRACE:
Offset = 0x02000;
break;
#elif defined(ppc603)
case PPC_IRQ_TRANS_MISS:
Offset = 0x1000;
break;
case PPC_IRQ_DATA_LOAD:
Offset = 0x1100;
break;
case PPC_IRQ_DATA_STORE:
Offset = 0x1200;
break;
case PPC_IRQ_ADDR_BRK:
Offset = 0x1300;
break;
case PPC_IRQ_SYS_MGT:
Offset = 0x1400;
break;
#elif defined(ppc603e)
case PPC_TLB_INST_MISS:
Offset = 0x1000;
break;
case PPC_TLB_LOAD_MISS:
Offset = 0x1100;
break;
case PPC_TLB_STORE_MISS:
Offset = 0x1200;
break;
case PPC_IRQ_ADDRBRK:
Offset = 0x1300;
break;
case PPC_IRQ_SYS_MGT:
Offset = 0x1400;
break;
#elif defined(ppc604)
case PPC_IRQ_ADDR_BRK:
Offset = 0x1300;
break;
case PPC_IRQ_SYS_MGT:
Offset = 0x1400;
break;
#elif defined(mpc860) || defined(mpc821)
case PPC_IRQ_EMULATE:
Offset = 0x1000;
break;
case PPC_IRQ_INST_MISS:
Offset = 0x1100;
break;
case PPC_IRQ_DATA_MISS:
Offset = 0x1200;
break;
case PPC_IRQ_INST_ERR:
Offset = 0x1300;
break;
case PPC_IRQ_DATA_ERR:
Offset = 0x1400;
break;
case PPC_IRQ_DATA_BPNT:
Offset = 0x1c00;
break;
case PPC_IRQ_INST_BPNT:
Offset = 0x1d00;
break;
case PPC_IRQ_IO_BPNT:
Offset = 0x1e00;
break;
case PPC_IRQ_DEV_PORT:
Offset = 0x1f00;
break;
case PPC_IRQ_IRQ0:
Offset = 0x2000;
break;
case PPC_IRQ_LVL0:
Offset = 0x2040;
break;
case PPC_IRQ_IRQ1:
Offset = 0x2080;
break;
case PPC_IRQ_LVL1:
Offset = 0x20c0;
break;
case PPC_IRQ_IRQ2:
Offset = 0x2100;
break;
case PPC_IRQ_LVL2:
Offset = 0x2140;
break;
case PPC_IRQ_IRQ3:
Offset = 0x2180;
break;
case PPC_IRQ_LVL3:
Offset = 0x21c0;
break;
case PPC_IRQ_IRQ4:
Offset = 0x2200;
break;
case PPC_IRQ_LVL4:
Offset = 0x2240;
break;
case PPC_IRQ_IRQ5:
Offset = 0x2280;
break;
case PPC_IRQ_LVL5:
Offset = 0x22c0;
break;
case PPC_IRQ_IRQ6:
Offset = 0x2300;
break;
case PPC_IRQ_LVL6:
Offset = 0x2340;
break;
case PPC_IRQ_IRQ7:
Offset = 0x2380;
break;
case PPC_IRQ_LVL7:
Offset = 0x23c0;
break;
case PPC_IRQ_CPM_RESERVED_0:
Offset = 0x2400;
break;
case PPC_IRQ_CPM_PC4:
Offset = 0x2410;
break;
case PPC_IRQ_CPM_PC5:
Offset = 0x2420;
break;
case PPC_IRQ_CPM_SMC2:
Offset = 0x2430;
break;
case PPC_IRQ_CPM_SMC1:
Offset = 0x2440;
break;
case PPC_IRQ_CPM_SPI:
Offset = 0x2450;
break;
case PPC_IRQ_CPM_PC6:
Offset = 0x2460;
break;
case PPC_IRQ_CPM_TIMER4:
Offset = 0x2470;
break;
case PPC_IRQ_CPM_RESERVED_8:
Offset = 0x2480;
break;
case PPC_IRQ_CPM_PC7:
Offset = 0x2490;
break;
case PPC_IRQ_CPM_PC8:
Offset = 0x24a0;
break;
case PPC_IRQ_CPM_PC9:
Offset = 0x24b0;
break;
case PPC_IRQ_CPM_TIMER3:
Offset = 0x24c0;
break;
case PPC_IRQ_CPM_RESERVED_D:
Offset = 0x24d0;
break;
case PPC_IRQ_CPM_PC10:
Offset = 0x24e0;
break;
case PPC_IRQ_CPM_PC11:
Offset = 0x24f0;
break;
case PPC_IRQ_CPM_I2C:
Offset = 0x2500;
break;
case PPC_IRQ_CPM_RISC_TIMER:
Offset = 0x2510;
break;
case PPC_IRQ_CPM_TIMER2:
Offset = 0x2520;
break;
case PPC_IRQ_CPM_RESERVED_13:
Offset = 0x2530;
break;
case PPC_IRQ_CPM_IDMA2:
Offset = 0x2540;
break;
case PPC_IRQ_CPM_IDMA1:
Offset = 0x2550;
break;
case PPC_IRQ_CPM_SDMA_ERROR:
Offset = 0x2560;
break;
case PPC_IRQ_CPM_PC12:
Offset = 0x2570;
break;
case PPC_IRQ_CPM_PC13:
Offset = 0x2580;
break;
case PPC_IRQ_CPM_TIMER1:
Offset = 0x2590;
break;
case PPC_IRQ_CPM_PC14:
Offset = 0x25a0;
break;
case PPC_IRQ_CPM_SCC4:
Offset = 0x25b0;
break;
case PPC_IRQ_CPM_SCC3:
Offset = 0x25c0;
break;
case PPC_IRQ_CPM_SCC2:
Offset = 0x25d0;
break;
case PPC_IRQ_CPM_SCC1:
Offset = 0x25e0;
break;
case PPC_IRQ_CPM_PC15:
Offset = 0x25f0;
break;
#endif
}
Top += Offset;
return Top;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,809 +0,0 @@
/* cpu_asm.s 1.1 - 95/12/04
*
* This file contains the assembly code for the PowerPC implementation
* of RTEMS.
*
* Author: Andrew Bray <andy@i-cubed.co.uk>
*
* COPYRIGHT (c) 1995 by i-cubed ltd.
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of i-cubed limited not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* i-cubed limited makes no representations about the suitability
* of this software for any purpose.
*
* Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
*
* COPYRIGHT (c) 1989-1997.
* On-Line Applications Research Corporation (OAR).
* Copyright assigned to U.S. Government, 1994.
*
* The license and distribution terms for this file may in
* the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#include <asm.h>
/*
* Offsets for various Contexts
*/
.set GP_1, 0
.set GP_2, (GP_1 + 4)
.set GP_13, (GP_2 + 4)
.set GP_14, (GP_13 + 4)
.set GP_15, (GP_14 + 4)
.set GP_16, (GP_15 + 4)
.set GP_17, (GP_16 + 4)
.set GP_18, (GP_17 + 4)
.set GP_19, (GP_18 + 4)
.set GP_20, (GP_19 + 4)
.set GP_21, (GP_20 + 4)
.set GP_22, (GP_21 + 4)
.set GP_23, (GP_22 + 4)
.set GP_24, (GP_23 + 4)
.set GP_25, (GP_24 + 4)
.set GP_26, (GP_25 + 4)
.set GP_27, (GP_26 + 4)
.set GP_28, (GP_27 + 4)
.set GP_29, (GP_28 + 4)
.set GP_30, (GP_29 + 4)
.set GP_31, (GP_30 + 4)
.set GP_CR, (GP_31 + 4)
.set GP_PC, (GP_CR + 4)
.set GP_MSR, (GP_PC + 4)
#if (PPC_HAS_DOUBLE == 1)
.set FP_0, 0
.set FP_1, (FP_0 + 8)
.set FP_2, (FP_1 + 8)
.set FP_3, (FP_2 + 8)
.set FP_4, (FP_3 + 8)
.set FP_5, (FP_4 + 8)
.set FP_6, (FP_5 + 8)
.set FP_7, (FP_6 + 8)
.set FP_8, (FP_7 + 8)
.set FP_9, (FP_8 + 8)
.set FP_10, (FP_9 + 8)
.set FP_11, (FP_10 + 8)
.set FP_12, (FP_11 + 8)
.set FP_13, (FP_12 + 8)
.set FP_14, (FP_13 + 8)
.set FP_15, (FP_14 + 8)
.set FP_16, (FP_15 + 8)
.set FP_17, (FP_16 + 8)
.set FP_18, (FP_17 + 8)
.set FP_19, (FP_18 + 8)
.set FP_20, (FP_19 + 8)
.set FP_21, (FP_20 + 8)
.set FP_22, (FP_21 + 8)
.set FP_23, (FP_22 + 8)
.set FP_24, (FP_23 + 8)
.set FP_25, (FP_24 + 8)
.set FP_26, (FP_25 + 8)
.set FP_27, (FP_26 + 8)
.set FP_28, (FP_27 + 8)
.set FP_29, (FP_28 + 8)
.set FP_30, (FP_29 + 8)
.set FP_31, (FP_30 + 8)
.set FP_FPSCR, (FP_31 + 8)
#else
.set FP_0, 0
.set FP_1, (FP_0 + 4)
.set FP_2, (FP_1 + 4)
.set FP_3, (FP_2 + 4)
.set FP_4, (FP_3 + 4)
.set FP_5, (FP_4 + 4)
.set FP_6, (FP_5 + 4)
.set FP_7, (FP_6 + 4)
.set FP_8, (FP_7 + 4)
.set FP_9, (FP_8 + 4)
.set FP_10, (FP_9 + 4)
.set FP_11, (FP_10 + 4)
.set FP_12, (FP_11 + 4)
.set FP_13, (FP_12 + 4)
.set FP_14, (FP_13 + 4)
.set FP_15, (FP_14 + 4)
.set FP_16, (FP_15 + 4)
.set FP_17, (FP_16 + 4)
.set FP_18, (FP_17 + 4)
.set FP_19, (FP_18 + 4)
.set FP_20, (FP_19 + 4)
.set FP_21, (FP_20 + 4)
.set FP_22, (FP_21 + 4)
.set FP_23, (FP_22 + 4)
.set FP_24, (FP_23 + 4)
.set FP_25, (FP_24 + 4)
.set FP_26, (FP_25 + 4)
.set FP_27, (FP_26 + 4)
.set FP_28, (FP_27 + 4)
.set FP_29, (FP_28 + 4)
.set FP_30, (FP_29 + 4)
.set FP_31, (FP_30 + 4)
.set FP_FPSCR, (FP_31 + 4)
#endif
.set IP_LINK, 0
#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
.set IP_0, (IP_LINK + 56)
#else
.set IP_0, (IP_LINK + 8)
#endif
.set IP_2, (IP_0 + 4)
.set IP_3, (IP_2 + 4)
.set IP_4, (IP_3 + 4)
.set IP_5, (IP_4 + 4)
.set IP_6, (IP_5 + 4)
.set IP_7, (IP_6 + 4)
.set IP_8, (IP_7 + 4)
.set IP_9, (IP_8 + 4)
.set IP_10, (IP_9 + 4)
.set IP_11, (IP_10 + 4)
.set IP_12, (IP_11 + 4)
.set IP_13, (IP_12 + 4)
.set IP_28, (IP_13 + 4)
.set IP_29, (IP_28 + 4)
.set IP_30, (IP_29 + 4)
.set IP_31, (IP_30 + 4)
.set IP_CR, (IP_31 + 4)
.set IP_CTR, (IP_CR + 4)
.set IP_XER, (IP_CTR + 4)
.set IP_LR, (IP_XER + 4)
.set IP_PC, (IP_LR + 4)
.set IP_MSR, (IP_PC + 4)
.set IP_END, (IP_MSR + 16)
/* _CPU_IRQ_info offsets */
/* These must be in this order */
.set Nest_level, 0
.set Disable_level, 4
.set Vector_table, 8
.set Stack, 12
#if (PPC_ABI == PPC_ABI_POWEROPEN)
.set Dispatch_r2, 16
.set Switch_necessary, 20
#else
.set Default_r2, 16
#if (PPC_ABI != PPC_ABI_GCC27)
.set Default_r13, 20
.set Switch_necessary, 24
#else
.set Switch_necessary, 20
#endif
#endif
.set Signal, Switch_necessary + 4
.set msr_initial, Signal + 4
BEGIN_CODE
/*
* _CPU_Context_save_fp_context
*
* This routine is responsible for saving the FP context
* at *fp_context_ptr. If the point to load the FP context
* from is changed then the pointer is modified by this routine.
*
* Sometimes a macro implementation of this is in cpu.h which dereferences
* the ** and a similarly named routine in this file is passed something
* like a (Context_Control_fp *). The general rule on making this decision
* is to avoid writing assembly language.
*/
ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
PUBLIC_PROC (_CPU_Context_save_fp)
PROC (_CPU_Context_save_fp):
#if (PPC_HAS_FPU == 1)
lwz r3, 0(r3)
#if (PPC_HAS_DOUBLE == 1)
stfd f0, FP_0(r3)
stfd f1, FP_1(r3)
stfd f2, FP_2(r3)
stfd f3, FP_3(r3)
stfd f4, FP_4(r3)
stfd f5, FP_5(r3)
stfd f6, FP_6(r3)
stfd f7, FP_7(r3)
stfd f8, FP_8(r3)
stfd f9, FP_9(r3)
stfd f10, FP_10(r3)
stfd f11, FP_11(r3)
stfd f12, FP_12(r3)
stfd f13, FP_13(r3)
stfd f14, FP_14(r3)
stfd f15, FP_15(r3)
stfd f16, FP_16(r3)
stfd f17, FP_17(r3)
stfd f18, FP_18(r3)
stfd f19, FP_19(r3)
stfd f20, FP_20(r3)
stfd f21, FP_21(r3)
stfd f22, FP_22(r3)
stfd f23, FP_23(r3)
stfd f24, FP_24(r3)
stfd f25, FP_25(r3)
stfd f26, FP_26(r3)
stfd f27, FP_27(r3)
stfd f28, FP_28(r3)
stfd f29, FP_29(r3)
stfd f30, FP_30(r3)
stfd f31, FP_31(r3)
mffs f2
stfd f2, FP_FPSCR(r3)
#else
stfs f0, FP_0(r3)
stfs f1, FP_1(r3)
stfs f2, FP_2(r3)
stfs f3, FP_3(r3)
stfs f4, FP_4(r3)
stfs f5, FP_5(r3)
stfs f6, FP_6(r3)
stfs f7, FP_7(r3)
stfs f8, FP_8(r3)
stfs f9, FP_9(r3)
stfs f10, FP_10(r3)
stfs f11, FP_11(r3)
stfs f12, FP_12(r3)
stfs f13, FP_13(r3)
stfs f14, FP_14(r3)
stfs f15, FP_15(r3)
stfs f16, FP_16(r3)
stfs f17, FP_17(r3)
stfs f18, FP_18(r3)
stfs f19, FP_19(r3)
stfs f20, FP_20(r3)
stfs f21, FP_21(r3)
stfs f22, FP_22(r3)
stfs f23, FP_23(r3)
stfs f24, FP_24(r3)
stfs f25, FP_25(r3)
stfs f26, FP_26(r3)
stfs f27, FP_27(r3)
stfs f28, FP_28(r3)
stfs f29, FP_29(r3)
stfs f30, FP_30(r3)
stfs f31, FP_31(r3)
mffs f2
stfs f2, FP_FPSCR(r3)
#endif
#endif
blr
/*
* _CPU_Context_restore_fp_context
*
* This routine is responsible for restoring the FP context
* at *fp_context_ptr. If the point to load the FP context
* from is changed then the pointer is modified by this routine.
*
* Sometimes a macro implementation of this is in cpu.h which dereferences
* the ** and a similarly named routine in this file is passed something
* like a (Context_Control_fp *). The general rule on making this decision
* is to avoid writing assembly language.
*/
ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
PUBLIC_PROC (_CPU_Context_restore_fp)
PROC (_CPU_Context_restore_fp):
#if (PPC_HAS_FPU == 1)
lwz r3, 0(r3)
#if (PPC_HAS_DOUBLE == 1)
lfd f2, FP_FPSCR(r3)
mtfsf 255, f2
lfd f0, FP_0(r3)
lfd f1, FP_1(r3)
lfd f2, FP_2(r3)
lfd f3, FP_3(r3)
lfd f4, FP_4(r3)
lfd f5, FP_5(r3)
lfd f6, FP_6(r3)
lfd f7, FP_7(r3)
lfd f8, FP_8(r3)
lfd f9, FP_9(r3)
lfd f10, FP_10(r3)
lfd f11, FP_11(r3)
lfd f12, FP_12(r3)
lfd f13, FP_13(r3)
lfd f14, FP_14(r3)
lfd f15, FP_15(r3)
lfd f16, FP_16(r3)
lfd f17, FP_17(r3)
lfd f18, FP_18(r3)
lfd f19, FP_19(r3)
lfd f20, FP_20(r3)
lfd f21, FP_21(r3)
lfd f22, FP_22(r3)
lfd f23, FP_23(r3)
lfd f24, FP_24(r3)
lfd f25, FP_25(r3)
lfd f26, FP_26(r3)
lfd f27, FP_27(r3)
lfd f28, FP_28(r3)
lfd f29, FP_29(r3)
lfd f30, FP_30(r3)
lfd f31, FP_31(r3)
#else
lfs f2, FP_FPSCR(r3)
mtfsf 255, f2
lfs f0, FP_0(r3)
lfs f1, FP_1(r3)
lfs f2, FP_2(r3)
lfs f3, FP_3(r3)
lfs f4, FP_4(r3)
lfs f5, FP_5(r3)
lfs f6, FP_6(r3)
lfs f7, FP_7(r3)
lfs f8, FP_8(r3)
lfs f9, FP_9(r3)
lfs f10, FP_10(r3)
lfs f11, FP_11(r3)
lfs f12, FP_12(r3)
lfs f13, FP_13(r3)
lfs f14, FP_14(r3)
lfs f15, FP_15(r3)
lfs f16, FP_16(r3)
lfs f17, FP_17(r3)
lfs f18, FP_18(r3)
lfs f19, FP_19(r3)
lfs f20, FP_20(r3)
lfs f21, FP_21(r3)
lfs f22, FP_22(r3)
lfs f23, FP_23(r3)
lfs f24, FP_24(r3)
lfs f25, FP_25(r3)
lfs f26, FP_26(r3)
lfs f27, FP_27(r3)
lfs f28, FP_28(r3)
lfs f29, FP_29(r3)
lfs f30, FP_30(r3)
lfs f31, FP_31(r3)
#endif
#endif
blr
/* _CPU_Context_switch
*
* This routine performs a normal non-FP context switch.
*/
ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
PUBLIC_PROC (_CPU_Context_switch)
PROC (_CPU_Context_switch):
sync
isync
#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */
stw r1, GP_1(r3)
lwz r1, GP_1(r4)
stw r2, GP_2(r3)
lwz r2, GP_2(r4)
#if (PPC_USE_MULTIPLE == 1)
stmw r13, GP_13(r3)
lmw r13, GP_13(r4)
#else
stw r13, GP_13(r3)
lwz r13, GP_13(r4)
stw r14, GP_14(r3)
lwz r14, GP_14(r4)
stw r15, GP_15(r3)
lwz r15, GP_15(r4)
stw r16, GP_16(r3)
lwz r16, GP_16(r4)
stw r17, GP_17(r3)
lwz r17, GP_17(r4)
stw r18, GP_18(r3)
lwz r18, GP_18(r4)
stw r19, GP_19(r3)
lwz r19, GP_19(r4)
stw r20, GP_20(r3)
lwz r20, GP_20(r4)
stw r21, GP_21(r3)
lwz r21, GP_21(r4)
stw r22, GP_22(r3)
lwz r22, GP_22(r4)
stw r23, GP_23(r3)
lwz r23, GP_23(r4)
stw r24, GP_24(r3)
lwz r24, GP_24(r4)
stw r25, GP_25(r3)
lwz r25, GP_25(r4)
stw r26, GP_26(r3)
lwz r26, GP_26(r4)
stw r27, GP_27(r3)
lwz r27, GP_27(r4)
stw r28, GP_28(r3)
lwz r28, GP_28(r4)
stw r29, GP_29(r3)
lwz r29, GP_29(r4)
stw r30, GP_30(r3)
lwz r30, GP_30(r4)
stw r31, GP_31(r3)
lwz r31, GP_31(r4)
#endif
mfcr r5
stw r5, GP_CR(r3)
lwz r5, GP_CR(r4)
mflr r6
mtcrf 255, r5
stw r6, GP_PC(r3)
lwz r6, GP_PC(r4)
mfmsr r7
mtlr r6
stw r7, GP_MSR(r3)
lwz r7, GP_MSR(r4)
mtmsr r7
#endif
#if (PPC_CACHE_ALIGNMENT == 16)
/* This assumes that all the registers are in the given order */
li r5, 16
addi r3,r3,-4
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r1, GP_1+4(r3)
stw r2, GP_2+4(r3)
#if (PPC_USE_MULTIPLE == 1)
addi r3, r3, GP_14+4
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
addi r3, r3, GP_18-GP_14
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
addi r3, r3, GP_22-GP_18
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
addi r3, r3, GP_26-GP_22
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stmw r13, GP_13-GP_26(r3)
#else
stw r13, GP_13+4(r3)
stwu r14, GP_14+4(r3)
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r15, GP_15-GP_14(r3)
stw r16, GP_16-GP_14(r3)
stw r17, GP_17-GP_14(r3)
stwu r18, GP_18-GP_14(r3)
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r19, GP_19-GP_18(r3)
stw r20, GP_20-GP_18(r3)
stw r21, GP_21-GP_18(r3)
stwu r22, GP_22-GP_18(r3)
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r23, GP_23-GP_22(r3)
stw r24, GP_24-GP_22(r3)
stw r25, GP_25-GP_22(r3)
stwu r26, GP_26-GP_22(r3)
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r27, GP_27-GP_26(r3)
stw r28, GP_28-GP_26(r3)
stw r29, GP_29-GP_26(r3)
stw r30, GP_30-GP_26(r3)
stw r31, GP_31-GP_26(r3)
#endif
#if ( PPC_USE_DATA_CACHE )
dcbt r0, r4
#endif
mfcr r6
stw r6, GP_CR-GP_26(r3)
mflr r7
stw r7, GP_PC-GP_26(r3)
mfmsr r8
stw r8, GP_MSR-GP_26(r3)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r1, GP_1(r4)
lwz r2, GP_2(r4)
#if (PPC_USE_MULTIPLE == 1)
addi r4, r4, GP_15
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
addi r4, r4, GP_19-GP_15
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
addi r4, r4, GP_23-GP_19
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
addi r4, r4, GP_27-GP_23
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lmw r13, GP_13-GP_27(r4)
#else
lwz r13, GP_13(r4)
lwz r14, GP_14(r4)
lwzu r15, GP_15(r4)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r16, GP_16-GP_15(r4)
lwz r17, GP_17-GP_15(r4)
lwz r18, GP_18-GP_15(r4)
lwzu r19, GP_19-GP_15(r4)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r20, GP_20-GP_19(r4)
lwz r21, GP_21-GP_19(r4)
lwz r22, GP_22-GP_19(r4)
lwzu r23, GP_23-GP_19(r4)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r24, GP_24-GP_23(r4)
lwz r25, GP_25-GP_23(r4)
lwz r26, GP_26-GP_23(r4)
lwzu r27, GP_27-GP_23(r4)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r28, GP_28-GP_27(r4)
lwz r29, GP_29-GP_27(r4)
lwz r30, GP_30-GP_27(r4)
lwz r31, GP_31-GP_27(r4)
#endif
lwz r6, GP_CR-GP_27(r4)
lwz r7, GP_PC-GP_27(r4)
lwz r8, GP_MSR-GP_27(r4)
mtcrf 255, r6
mtlr r7
mtmsr r8
#endif
#if (PPC_CACHE_ALIGNMENT == 32)
/* This assumes that all the registers are in the given order */
li r5, 32
addi r3,r3,-4
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r1, GP_1+4(r3)
stw r2, GP_2+4(r3)
#if (PPC_USE_MULTIPLE == 1)
addi r3, r3, GP_18+4
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stmw r13, GP_13-GP_18(r3)
#else
stw r13, GP_13+4(r3)
stw r14, GP_14+4(r3)
stw r15, GP_15+4(r3)
stw r16, GP_16+4(r3)
stw r17, GP_17+4(r3)
stwu r18, GP_18+4(r3)
#if ( PPC_USE_DATA_CACHE )
dcbz r5, r3
#endif
stw r19, GP_19-GP_18(r3)
stw r20, GP_20-GP_18(r3)
stw r21, GP_21-GP_18(r3)
stw r22, GP_22-GP_18(r3)
stw r23, GP_23-GP_18(r3)
stw r24, GP_24-GP_18(r3)
stw r25, GP_25-GP_18(r3)
stw r26, GP_26-GP_18(r3)
stw r27, GP_27-GP_18(r3)
stw r28, GP_28-GP_18(r3)
stw r29, GP_29-GP_18(r3)
stw r30, GP_30-GP_18(r3)
stw r31, GP_31-GP_18(r3)
#endif
#if ( PPC_USE_DATA_CACHE )
dcbt r0, r4
#endif
mfcr r6
stw r6, GP_CR-GP_18(r3)
mflr r7
stw r7, GP_PC-GP_18(r3)
mfmsr r8
stw r8, GP_MSR-GP_18(r3)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r1, GP_1(r4)
lwz r2, GP_2(r4)
#if (PPC_USE_MULTIPLE == 1)
addi r4, r4, GP_19
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lmw r13, GP_13-GP_19(r4)
#else
lwz r13, GP_13(r4)
lwz r14, GP_14(r4)
lwz r15, GP_15(r4)
lwz r16, GP_16(r4)
lwz r17, GP_17(r4)
lwz r18, GP_18(r4)
lwzu r19, GP_19(r4)
#if ( PPC_USE_DATA_CACHE )
dcbt r5, r4
#endif
lwz r20, GP_20-GP_19(r4)
lwz r21, GP_21-GP_19(r4)
lwz r22, GP_22-GP_19(r4)
lwz r23, GP_23-GP_19(r4)
lwz r24, GP_24-GP_19(r4)
lwz r25, GP_25-GP_19(r4)
lwz r26, GP_26-GP_19(r4)
lwz r27, GP_27-GP_19(r4)
lwz r28, GP_28-GP_19(r4)
lwz r29, GP_29-GP_19(r4)
lwz r30, GP_30-GP_19(r4)
lwz r31, GP_31-GP_19(r4)
#endif
lwz r6, GP_CR-GP_19(r4)
lwz r7, GP_PC-GP_19(r4)
lwz r8, GP_MSR-GP_19(r4)
mtcrf 255, r6
mtlr r7
mtmsr r8
#endif
blr
/*
* _CPU_Context_restore
*
* This routine is generallu used only to restart self in an
* efficient manner. It may simply be a label in _CPU_Context_switch.
*
* NOTE: May be unnecessary to reload some registers.
*/
/*
* ACB: Don't worry about cache optimisation here - this is not THAT critical.
*/
ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
PUBLIC_PROC (_CPU_Context_restore)
PROC (_CPU_Context_restore):
lwz r5, GP_CR(r3)
lwz r6, GP_PC(r3)
lwz r7, GP_MSR(r3)
mtcrf 255, r5
mtlr r6
mtmsr r7
lwz r1, GP_1(r3)
lwz r2, GP_2(r3)
#if (PPC_USE_MULTIPLE == 1)
lmw r13, GP_13(r3)
#else
lwz r13, GP_13(r3)
lwz r14, GP_14(r3)
lwz r15, GP_15(r3)
lwz r16, GP_16(r3)
lwz r17, GP_17(r3)
lwz r18, GP_18(r3)
lwz r19, GP_19(r3)
lwz r20, GP_20(r3)
lwz r21, GP_21(r3)
lwz r22, GP_22(r3)
lwz r23, GP_23(r3)
lwz r24, GP_24(r3)
lwz r25, GP_25(r3)
lwz r26, GP_26(r3)
lwz r27, GP_27(r3)
lwz r28, GP_28(r3)
lwz r29, GP_29(r3)
lwz r30, GP_30(r3)
lwz r31, GP_31(r3)
#endif
blr
/* Individual interrupt prologues look like this:
* #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
* #if (PPC_HAS_FPU)
* stwu r1, -(20*4 + 18*8 + IP_END)(r1)
* #else
* stwu r1, -(20*4 + IP_END)(r1)
* #endif
* #else
* stwu r1, -(IP_END)(r1)
* #endif
* stw r0, IP_0(r1)
*
* li r0, vectornum
* b PROC (_ISR_Handler{,C})
*/
/* void __ISR_Handler()
*
* This routine provides the RTEMS interrupt management.
* The vector number is in r0. R0 has already been stacked.
*
*/
ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
PUBLIC_PROC (_ISR_Handler)
PROC (_ISR_Handler):
#define LABEL(x) x
/* XXX ??
#define MTSAVE(x) mtspr sprg0, x
#define MFSAVE(x) mfspr x, sprg0
*/
#define MTPC(x) mtspr srr0, x
#define MFPC(x) mfspr x, srr0
#define MTMSR(x) mtspr srr1, x
#define MFMSR(x) mfspr x, srr1
#include "irq_stub.S"
rfi
#if (PPC_HAS_RFCI == 1)
/* void __ISR_HandlerC()
*
* This routine provides the RTEMS interrupt management.
* For critical interrupts
*
*/
ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
PUBLIC_PROC (_ISR_HandlerC)
PROC (_ISR_HandlerC):
#undef LABEL
#undef MTSAVE
#undef MFSAVE
#undef MTPC
#undef MFPC
#undef MTMSR
#undef MFMSR
#define LABEL(x) x##_C
/* XXX??
#define MTSAVE(x) mtspr sprg1, x
#define MFSAVE(x) mfspr x, sprg1
*/
#define MTPC(x) mtspr srr2, x
#define MFPC(x) mfspr x, srr2
#define MTMSR(x) mtspr srr3, x
#define MFMSR(x) mfspr x, srr3
#include "irq_stub.S"
rfci
#endif
/* PowerOpen descriptors for indirect function calls.
*/
#if (PPC_ABI == PPC_ABI_POWEROPEN)
DESCRIPTOR (_CPU_Context_save_fp)
DESCRIPTOR (_CPU_Context_restore_fp)
DESCRIPTOR (_CPU_Context_switch)
DESCRIPTOR (_CPU_Context_restore)
DESCRIPTOR (_ISR_Handler)
#if (PPC_HAS_RFCI == 1)
DESCRIPTOR (_ISR_HandlerC)
#endif
#endif

View File

@@ -1,268 +0,0 @@
/*
* This file contains the interrupt handler assembly code for the PowerPC
* implementation of RTEMS. It is #included from cpu_asm.s.
*
* Author: Andrew Bray <andy@i-cubed.co.uk>
*
* COPYRIGHT (c) 1995 by i-cubed ltd.
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of i-cubed limited not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* i-cubed limited makes no representations about the suitability
* of this software for any purpose.
*
* $Id$
*/
/* void __ISR_Handler()
*
* This routine provides the RTEMS interrupt management.
* The vector number is in r0. R0 has already been stacked.
*
*/
PUBLIC_VAR (_CPU_IRQ_info )
/* Finish off the interrupt frame */
stw r2, IP_2(r1)
stw r3, IP_3(r1)
stw r4, IP_4(r1)
stw r5, IP_5(r1)
stw r6, IP_6(r1)
stw r7, IP_7(r1)
stw r8, IP_8(r1)
stw r9, IP_9(r1)
stw r10, IP_10(r1)
stw r11, IP_11(r1)
stw r12, IP_12(r1)
stw r13, IP_13(r1)
stmw r28, IP_28(r1)
mfcr r5
mfctr r6
mfxer r7
mflr r8
MFPC (r9)
MFMSR (r10)
/* Establish addressing */
#if (PPC_USE_SPRG)
mfspr r11, sprg3
#else
lis r11,_CPU_IRQ_info@ha
addi r11,r11,_CPU_IRQ_info@l
#endif
dcbt r0, r11
stw r5, IP_CR(r1)
stw r6, IP_CTR(r1)
stw r7, IP_XER(r1)
stw r8, IP_LR(r1)
stw r9, IP_PC(r1)
stw r10, IP_MSR(r1)
lwz r30, Vector_table(r11)
slwi r4,r0,2
lwz r28, Nest_level(r11)
add r4, r4, r30
lwz r30, 0(r28)
mr r3, r0
lwz r31, Stack(r11)
/*
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
* if ( _ISR_Nest_level == 0 )
* switch to software interrupt stack
* #endif
*/
/* Switch stacks, here we must prevent ALL interrupts */
#if (PPC_USE_SPRG)
mfmsr r5
mfspr r6, sprg2
#else
lwz r6,msr_initial(r11)
lis r5,~PPC_MSR_DISABLE_MASK@ha
ori r5,r5,~PPC_MSR_DISABLE_MASK@l
and r6,r6,r5
mfmsr r5
#endif
mtmsr r6
cmpwi r30, 0
lwz r29, Disable_level(r11)
subf r31,r1,r31
bne LABEL (nested)
stwux r1,r1,r31
LABEL (nested):
/*
* _ISR_Nest_level++;
*/
lwz r31, 0(r29)
addi r30,r30,1
stw r30,0(r28)
/* From here on out, interrupts can be re-enabled. RTEMS
* convention says not.
*/
lwz r4,0(r4)
/*
* _Thread_Dispatch_disable_level++;
*/
addi r31,r31,1
stw r31, 0(r29)
/* SCE 980217
*
* We need address translation ON when we call our ISR routine
mtmsr r5
*/
/*
* (*_ISR_Vector_table[ vector ])( vector );
*/
#if (PPC_ABI == PPC_ABI_POWEROPEN)
lwz r6,0(r4)
lwz r2,4(r4)
mtlr r6
lwz r11,8(r4)
#endif
#if (PPC_ABI == PPC_ABI_GCC27)
lwz r2, Default_r2(r11)
mtlr r4
#lwz r2, 0(r2)
#endif
#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI)
mtlr r4
lwz r2, Default_r2(r11)
lwz r13, Default_r13(r11)
#lwz r2, 0(r2)
#lwz r13, 0(r13)
#endif
mr r4,r1
blrl
/* NOP marker for debuggers */
or r6,r6,r6
/* We must re-disable the interrupts */
#if (PPC_USE_SPRG)
mfspr r11, sprg3
mfspr r0, sprg2
#else
lis r11,_CPU_IRQ_info@ha
addi r11,r11,_CPU_IRQ_info@l
lwz r0,msr_initial(r11)
lis r30,~PPC_MSR_DISABLE_MASK@ha
ori r30,r30,~PPC_MSR_DISABLE_MASK@l
and r0,r0,r30
#endif
mtmsr r0
lwz r30, 0(r28)
lwz r31, 0(r29)
/*
* if (--Thread_Dispatch_disable,--_ISR_Nest_level)
* goto easy_exit;
*/
addi r30, r30, -1
cmpwi r30, 0
addi r31, r31, -1
stw r30, 0(r28)
stw r31, 0(r29)
bne LABEL (easy_exit)
cmpwi r31, 0
lwz r30, Switch_necessary(r11)
/*
* #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
* restore stack
* #endif
*/
lwz r1,0(r1)
bne LABEL (easy_exit)
lwz r30, 0(r30)
lwz r31, Signal(r11)
/*
* if ( _Context_Switch_necessary )
* goto switch
*/
cmpwi r30, 0
lwz r28, 0(r31)
li r6,0
bne LABEL (switch)
/*
* if ( !_ISR_Signals_to_thread_executing )
* goto easy_exit
* _ISR_Signals_to_thread_executing = 0;
*/
cmpwi r28, 0
beq LABEL (easy_exit)
/*
* switch:
* call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
*/
LABEL (switch):
stw r6, 0(r31)
/* Re-enable interrupts */
lwz r0, IP_MSR(r1)
#if (PPC_ABI == PPC_ABI_POWEROPEN)
lwz r2, Dispatch_r2(r11)
#else
/* R2 and R13 still hold their values from the last call */
#endif
mtmsr r0
bl SYM (_Thread_Dispatch)
/* NOP marker for debuggers */
or r6,r6,r6
/*
* prepare to get out of interrupt
*/
/* Re-disable IRQs */
#if (PPC_USE_SPRG)
mfspr r0, sprg2
#else
lis r11,_CPU_IRQ_info@ha
addi r11,r11,_CPU_IRQ_info@l
lwz r0,msr_initial(r11)
lis r5,~PPC_MSR_DISABLE_MASK@ha
ori r5,r5,~PPC_MSR_DISABLE_MASK@l
and r0,r0,r5
#endif
mtmsr r0
/*
* easy_exit:
* prepare to get out of interrupt
* return from interrupt
*/
LABEL (easy_exit):
lwz r5, IP_CR(r1)
lwz r6, IP_CTR(r1)
lwz r7, IP_XER(r1)
lwz r8, IP_LR(r1)
lwz r9, IP_PC(r1)
lwz r10, IP_MSR(r1)
mtcrf 255,r5
mtctr r6
mtxer r7
mtlr r8
MTPC (r9)
MTMSR (r10)
lwz r0, IP_0(r1)
lwz r2, IP_2(r1)
lwz r3, IP_3(r1)
lwz r4, IP_4(r1)
lwz r5, IP_5(r1)
lwz r6, IP_6(r1)
lwz r7, IP_7(r1)
lwz r8, IP_8(r1)
lwz r9, IP_9(r1)
lwz r10, IP_10(r1)
lwz r11, IP_11(r1)
lwz r12, IP_12(r1)
lwz r13, IP_13(r1)
lmw r28, IP_28(r1)
lwz r1, 0(r1)

View File

@@ -1,61 +0,0 @@
/*
* PowerPC Cache enable routines
*
* $Id$
*/
#include <rtems/system.h>
#define PPC_Get_HID0( _value ) \
do { \
_value = 0; /* to avoid warnings */ \
asm volatile( \
"mfspr %0, 0x3f0;" /* get HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
#define PPC_Set_HID0( _value ) \
do { \
asm volatile( \
"isync;" \
"mtspr 0x3f0, %0;" /* load HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
void powerpc_instruction_cache_enable ()
{
unsigned32 value;
/*
* Enable the instruction cache
*/
PPC_Get_HID0( value );
value |= 0x00008000; /* Set ICE bit */
PPC_Set_HID0( value );
}
void powerpc_data_cache_enable ()
{
unsigned32 value;
/*
* enable data cache
*/
PPC_Get_HID0( value );
value |= 0x00004000; /* set DCE bit */
PPC_Set_HID0( value );
}

View File

@@ -1,132 +0,0 @@
/* rtems.s
*
* This file contains the single entry point code for
* the PowerPC implementation of RTEMS.
*
* Author: Andrew Bray <andy@i-cubed.co.uk>
*
* COPYRIGHT (c) 1995 by i-cubed ltd.
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of i-cubed limited not be used in
* advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
* i-cubed limited makes no representations about the suitability
* of this software for any purpose.
*
* Derived from c/src/exec/cpu/no_cpu/rtems.c:
*
* COPYRIGHT (c) 1989-1997.
* On-Line Applications Research Corporation (OAR).
* Copyright assigned to U.S. Government, 1994.
*
* The license and distribution terms for this file may in
* the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#include <asm.h>
BEGIN_CODE
/*
* RTEMS
*
* This routine jumps to the directive indicated in r11.
* This routine is used when RTEMS is linked by itself and placed
* in ROM. This routine is the first address in the ROM space for
* RTEMS. The user "calls" this address with the directive arguments
* in the normal place.
* This routine then jumps indirectly to the correct directive
* preserving the arguments. The directive should not realize
* it has been "wrapped" in this way. The table "_Entry_points"
* is used to look up the directive.
*/
ALIGN (4, 2)
PUBLIC_PROC (RTEMS)
PROC (RTEMS):
#if (PPC_ABI == PPC_ABI_POWEROPEN)
mflr r0
stw r0, 8(r1)
stwu r1, -64(r1)
/* Establish addressing */
bl base
base:
mflr r12
addi r12, r12, tabaddr - base
lwz r12, Entry_points-abase(r12)
slwi r11, r11, 2
lwzx r12, r12, r11
stw r2, 56(r1)
lwz r0, 0(r12)
mtlr r0
lwz r2, 4(r12)
lwz r11, 8(r12)
blrl
lwz r2, 56(r1)
addi r1, r1, 64
lwz r0, 8(r1)
mtlr r0
#else
mflr r0
stw r0, 4(r1)
stwu r1, -16(r1)
/* Establish addressing */
bl base
base:
mflr r12
addi r12, r12, tabaddr - base
lwz r12, Entry_points-abase(r12)
slwi r11, r11, 2
lwzx r11, r12, r11
stw r2, 8(r1)
#if (PPC_ABI != PPC_ABI_GCC27)
stw r13, 12(r1)
#endif
mtlr r11
lwz r11, irqinfo-abase(r12)
lwz r2, 0(r11)
#if (PPC_ABI != PPC_ABI_GCC27)
lwz r13, 4(r11)
#endif
blrl
lwz r2, 8(r1)
#if (PPC_ABI != PPC_ABI_GCC27)
lwz r13, 12(r1)
#endif
addi r1, r1, 16
lwz r0, 4(r1)
mtlr r0
#endif
blr
/* Addressability stuff */
tabaddr:
abase:
EXTERN_VAR (_Entry_points)
Entry_points:
EXT_SYM_REF (_Entry_points)
#if (PPC_ABI != PPC_ABI_POWEROPEN)
EXTERN_VAR (_CPU_IRQ_info)
irqinfo:
EXT_SYM_REF (_CPU_IRQ_info)
#endif
#if (PPC_ABI == PPC_ABI_POWEROPEN)
DESCRIPTOR (RTEMS)
#endif