2010-06-17 Joel Sherrill <joel.sherrill@oarcorp.com>

* .cvsignore, ChangeLog, Makefile.am, configure.ac, preinstall.am,
	shared/cache/cache.c, shared/cache/cache_.h,
	shared/interrupts/installisrentries.c, shared/score/cpu.c,
	shared/score/interrupt.S, shared/syscall/sparc64-syscall.S,
	shared/syscall/sparc64-syscall.h: New files.
This commit is contained in:
Joel Sherrill
2010-06-17 16:16:25 +00:00
parent 62812daba8
commit c56982c86e
12 changed files with 1257 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
aclocal.m4
autom4te*.cache
config.cache
config.log
config.status
configure
Makefile
Makefile.in

View File

@@ -0,0 +1,55 @@
2010-06-17 Joel Sherrill <joel.sherrill@oarcorp.com>
* .cvsignore, ChangeLog, Makefile.am, configure.ac, preinstall.am,
shared/cache/cache.c, shared/cache/cache_.h,
shared/interrupts/installisrentries.c, shared/score/cpu.c,
shared/score/interrupt.S, shared/syscall/sparc64-syscall.S,
shared/syscall/sparc64-syscall.h: New files.
2010-05-13 Gedare Bloom <gedare@gwmail.gwu.edu>
* sun4u/syscall/sparc-syscall.S: disable/enable interrupts directly.
2010-05-10 Gedare Bloom <gedare@gwmail.gwu.edu>
* shared/score/cpu.c, Makefile.am: Moved cpu.c from sun4v/score
* configure.ac: m5sim is no longer part of shared. sun4u option added.
2010-05-10 Gedare Bloom <gedare@gwmail.gwu.edu>
* sun4u/: New model specific subdir.
* sun4u/score/, sun4u/syscall/: Copied from sun4v.
2010-05-03 Gedare Bloom <gedare@gwmail.gwu.edu>
* m5sim/syscall/sparc-syscall.h, m5sim/syscall/sparc-syscall.S:
Renamed files from syscall.h and syscall.S
2010-05-03 Gedare Bloom <gedare@gwmail.gwu.edu>
* sun4v/syscall/syscall.S: Explicitly enable IE bit when using
SYS_irqset.
* m5sim/score/cpu.c: install raw handler and isr get level are nops
2010-05-02 Gedare Bloom <gedare@gwmail.gwu.edu>
* m5sim/score m5sim/syscall: new subdirs
* m5sim/syscall/syscall.h m5sim/syscall/syscall.S: New files.
2010-05-02 Gedare Bloom <gedare@gwmail.gwu.edu>
* sun4v/score/cpu_asm.S: Remove context switch code.
2010-05-02 Gedare Bloom <gedare@gwmail.gwu.edu>
* shared/, shared/cache, shared/interrupts: Moved cache and
interrupts into shared folder.
2010-05-02 Gedare Bloom <gedare@gwmail.gwu.edu>
* sun4v, sun4v/score/cpu_asm.S, sun4v/score/cpu.c: Copied from
score/cpu/sparc64
2010-05-02 Gedare Bloom <gedare@gwmail.gwu.edu>
* ChangeLog: New file.

View File

@@ -0,0 +1,72 @@
##
## $Id$
##
ACLOCAL_AMFLAGS = -I ../../../aclocal
include $(top_srcdir)/../../../automake/compile.am
noinst_PROGRAMS =
# NOTE: shared applies to targets meant to run with supervisor privilege,
# i.e. sun4u or sun4v. Any targets (e.g. simulators) that cannot emulate
# supervisor privilege should use their own score and sparc64-syscall.
# The other shared code is probably usable, but should be defined separately.
# See the example at the end of Makefile.am.
if shared
include_libcpudir = $(includedir)/libcpu
include_libcpu_HEADERS = ../shared/include/cache.h
noinst_PROGRAMS += shared/shared-score.rel
shared_shared_score_rel_SOURCES = shared/score/cpu.c shared/score/interrupt.S
shared_shared_score_rel_CPPFLAGS = $(AM_CPPFLAGS)
shared_shared_score_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
noinst_PROGRAMS += shared/sparc64-syscall.rel
shared_sparc64_syscall_rel_SOURCES = shared/syscall/sparc64-syscall.S \
shared/syscall/sparc64-syscall.h
shared_sparc64_syscall_rel_CPPFLAGS = $(AM_CPPFLAGS)
shared_sparc64_syscall_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
noinst_PROGRAMS += shared/interrupts.rel
shared_interrupts_rel_SOURCES = shared/interrupts/installisrentries.c
shared_interrupts_rel_CPPFLAGS = $(AM_CPPFLAGS) -I$(srcdir)/shared/interrupts
shared_interrupts_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
noinst_PROGRAMS += shared/cache.rel
shared_cache_rel_SOURCES = shared/cache/cache.c shared/cache/cache_.h \
../shared/src/cache_aligned_malloc.c ../shared/src/cache_manager.c
shared_cache_rel_CPPFLAGS = $(AM_CPPFLAGS) -I$(srcdir)/shared/cache
shared_cache_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
endif
#if has_instruction_cache
#cache_rel_CPPFLAGS += -DHAS_INSTRUCTION_CACHE
#endif
### This is an example of how to define a separate score implementation.
## NOTE: Unlike other CPUs, we install into a subdirectory to avoid
## file name conflicts (See sh CPU for simexampleilar approach)
#include_sparc64dir = $(includedir)/sparc64
#include_rtems_scoredir = $(includedir)/rtems/score
#
#if simexample
#include_sparc64_HEADERS =
#include_rtems_score_HEADERS = $(include_rtems_scoredir)/sparc64.h \
# $(include_rtems_scoredir)/cpu.h \
# $(include_rtems_scoredir)/types.h
## simexample/score
#noinst_PROGRAMS += simexample/score.rel
#simexample_score_rel_SOURCES =
#simexample_score_rel_CPPFLAGS = $(AM_CPPFLAGS)
#simexample_score_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
#
#endif
### End of example.
include $(srcdir)/preinstall.am
include $(top_srcdir)/../../../automake/local.am

View File

@@ -0,0 +1,39 @@
## Process this file with autoconf to produce a configure script.
##
## $Id$
AC_PREREQ(2.60)
AC_INIT([rtems-c-src-lib-libcpu-sparc64],[_RTEMS_VERSION],[http://www.rtems.org/bugzilla])
AC_CONFIG_SRCDIR([shared])
RTEMS_TOP([../../../../..],[../../..])
RTEMS_CANONICAL_TARGET_CPU
AM_INIT_AUTOMAKE([no-define foreign subdir-objects 1.10])
AM_MAINTAINER_MODE
RTEMS_ENV_RTEMSBSP
RTEMS_PROJECT_ROOT
RTEMS_PROG_CC_FOR_TARGET
AM_PROG_CC_C_O
RTEMS_CANONICALIZE_TOOLS
RTEMS_PROG_CCAS
AM_CONDITIONAL(shared, test x"$RTEMS_CPU_MODEL" = x"sun4v" || \
test x"$RTEMS_CPU_MODEL" = x"sun4u")
AM_CONDITIONAL([sun4u],[test x"$RTEMS_CPU_MODEL" = x"sun4u"])
AM_CONDITIONAL([sun4v],[test x"$RTEMS_CPU_MODEL" = x"sun4v"])
## How to add a conditional simexample for the Makefile.am, based on a
## RTEMS_CPU_MODEL defined as simulator.
#AM_CONDITIONAL([simexample],[test x"$RTEMS_CPU_MODEL" = x"simulator"])
RTEMS_AMPOLISH3
# Explicitly list all Makefiles here
AC_CONFIG_FILES([Makefile])
AC_OUTPUT

View File

@@ -0,0 +1,25 @@
## Automatically generated by ampolish3 - Do not edit
if AMPOLISH3
$(srcdir)/preinstall.am: Makefile.am
$(AMPOLISH3) $(srcdir)/Makefile.am > $(srcdir)/preinstall.am
endif
PREINSTALL_DIRS =
DISTCLEANFILES = $(PREINSTALL_DIRS)
all-am: $(PREINSTALL_FILES)
PREINSTALL_FILES =
CLEANFILES = $(PREINSTALL_FILES)
if shared
$(PROJECT_INCLUDE)/libcpu/$(dirstamp):
@$(MKDIR_P) $(PROJECT_INCLUDE)/libcpu
@: > $(PROJECT_INCLUDE)/libcpu/$(dirstamp)
PREINSTALL_DIRS += $(PROJECT_INCLUDE)/libcpu/$(dirstamp)
$(PROJECT_INCLUDE)/libcpu/cache.h: ../shared/include/cache.h $(PROJECT_INCLUDE)/libcpu/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/libcpu/cache.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/libcpu/cache.h
endif

View File

@@ -0,0 +1,36 @@
/*
* Cache Management Support Routines for the SPARC 64
*
* $Id$
*/
#include <rtems.h>
#include "cache_.h"
#if defined(HAS_INSTRUCTION_CACHE)
void _CPU_cache_invalidate_entire_instruction ( void )
{
asm volatile ("flush");
}
/* XXX these need to be addressed */
void _CPU_cache_freeze_instruction ( void )
{
}
void _CPU_cache_unfreeze_instruction ( void )
{
}
void _CPU_cache_enable_instruction ( void )
{
}
void _CPU_cache_disable_instruction ( void )
{
}
#endif
/* end of file */

View File

@@ -0,0 +1,24 @@
/*
* SPARC64 Cache Manager Support
*/
#ifndef __SPARC_CACHE_h
#define __SPARC_CACHE_h
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Some functions simply have not been implemented.
*/
#if defined(HAS_INSTRUCTION_CACHE)
#define CPU_INSTRUCTION_CACHE_ALIGNMENT 0
#endif
#include <libcpu/cache.h>
#endif
/* end of include file */

View File

@@ -0,0 +1,23 @@
/*
* Install trap handlers for handling software interrupts.
* This file is deprecated, as the trap handlers are needed before this
* function is called. We still use this as for debugging purposes.
*
* Copyright 2010 Gedare Bloom.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*
* $Id$
*/
#include <rtems.h>
#include <rtems/asm.h>
#include <rtems/score/sparc64.h>
#include <rtems/bspIo.h>
void sparc64_install_isr_entries( void )
{
return;
}

View File

@@ -0,0 +1,257 @@
/*
* SPARC-v9 Dependent Source
*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
* This file is based on the SPARC cpu.c file. Modifications are made to
* provide support for the SPARC-v9.
* COPYRIGHT (c) 2010. Gedare Bloom.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*
* $Id$
*/
#include <rtems/system.h>
#include <rtems/asm.h>
#include <rtems/score/isr.h>
#include <rtems/rtems/cache.h>
/*
* This initializes the set of opcodes placed in each trap
* table entry. The routine which installs a handler is responsible
* for filling in the fields for the _handler address and the _vector
* trap type.
*
* The constants following this structure are masks for the fields which
* must be filled in when the handler is installed.
*/
/* 64-bit registers complicate this. Also, in sparc v9,
* each trap level gets its own set of global registers, but
* does not get its own dedicated register window. so we avoid
* using the local registers in the trap handler.
*/
const CPU_Trap_table_entry _CPU_Trap_slot_template = {
0x89508000, /* rdpr %tstate, %g4 */
0x05000000, /* sethi %hh(_handler), %g2 */
0x8410a000, /* or %g2, %hm(_handler), %g2 */
0x8528b020, /* sllx %g2, 32, %g2 */
0x07000000, /* sethi %hi(_handler), %g3 */
0x8610c002, /* or %g3, %g2, %g3 */
0x81c0e000, /* jmp %g3 + %lo(_handler) */
0x84102000 /* mov _vector, %g2 */
};
/*PAGE
*
* _CPU_ISR_Get_level
*
* Input Parameters: NONE
*
* Output Parameters:
* returns the current interrupt level (PIL field of the PSR)
*/
uint32_t _CPU_ISR_Get_level( void )
{
uint32_t level;
sparc64_get_interrupt_level( level );
return level;
}
/*PAGE
*
* _CPU_ISR_install_raw_handler
*
* This routine installs the specified handler as a "raw" non-executive
* supported trap handler (a.k.a. interrupt service routine).
*
* Input Parameters:
* vector - trap table entry number plus synchronous
* vs. asynchronous information
* new_handler - address of the handler to be installed
* old_handler - pointer to an address of the handler previously installed
*
* Output Parameters: NONE
* *new_handler - address of the handler previously installed
*
* NOTE:
*
* On the SPARC v9, there are really only 512 vectors. However, the executive
* has no easy, fast, reliable way to determine which traps are synchronous
* and which are asynchronous. By default, traps return to the
* instruction which caused the interrupt. So if you install a software
* trap handler as an executive interrupt handler (which is desirable since
* RTEMS takes care of window and register issues), then the executive needs
* to know that the return address is to the trap rather than the instruction
* following the trap.
*
* So vectors 0 through 511 are treated as regular asynchronous traps which
* provide the "correct" return address. Vectors 512 through 1023 are assumed
* by the executive to be synchronous and to require that the return be to the
* trapping instruction.
*
* If you use this mechanism to install a trap handler which must reexecute
* the instruction which caused the trap, then it should be installed as
* a synchronous trap. This will avoid the executive changing the return
* address.
*/
/* Verified this is working properly from sparc64_install_isr_entries */
void _CPU_ISR_install_raw_handler(
uint32_t vector,
proc_ptr new_handler,
proc_ptr *old_handler
)
{
uint32_t real_vector;
CPU_Trap_table_entry *tba;
CPU_Trap_table_entry *slot;
uint64_t u64_tba;
uint64_t u64_handler;
/*
* Get the "real" trap number for this vector ignoring the synchronous
* versus asynchronous indicator included with our vector numbers.
*/
real_vector = SPARC_REAL_TRAP_NUMBER( vector );
/*
* Get the current base address of the trap table and calculate a pointer
* to the slot we are interested in.
*/
sparc64_get_tba( u64_tba );
/* u32_tbr &= 0xfffff000; */
u64_tba &= 0xffffffffffff8000; /* keep only trap base address */
tba = (CPU_Trap_table_entry *) u64_tba;
/* use array indexing to fill in lower bits -- require
* CPU_Trap_table_entry to be full-sized. */
slot = &tba[ real_vector ];
/*
* Get the address of the old_handler from the trap table.
*
* NOTE: The old_handler returned will be bogus if it does not follow
* the RTEMS model.
*/
/* shift amount to shift of hi bits (31:10) */
#define HI_BITS_SHIFT 10
/* shift amount of hm bits (41:32) */
#define HM_BITS_SHIFT 32
/* shift amount of hh bits (63:42) */
#define HH_BITS_SHIFT 42
/* We're only interested in bits 0-9 of the immediate field*/
#define IMM_MASK 0x000003FF
if ( slot->rdpr_tstate_g4 == _CPU_Trap_slot_template.rdpr_tstate_g4 ) {
u64_handler =
(((uint64_t)((slot->sethi_of_hh_handler_to_g2 << HI_BITS_SHIFT) |
(slot->or_g2_hm_handler_to_g2 & IMM_MASK))) << HM_BITS_SHIFT) |
((slot->sethi_of_handler_to_g3 << HI_BITS_SHIFT) |
(slot->jmp_to_low_of_handler_plus_g3 & IMM_MASK));
*old_handler = (proc_ptr) u64_handler;
} else
*old_handler = 0;
/*
* Copy the template to the slot and then fix it.
*/
*slot = _CPU_Trap_slot_template;
u64_handler = (uint64_t) new_handler;
/* mask for extracting %hh */
#define HH_BITS_MASK 0xFFFFFC0000000000
/* mask for extracting %hm */
#define HM_BITS_MASK 0x000003FF00000000
/* mask for extracting %hi */
#define HI_BITS_MASK 0x00000000FFFFFC00
/* mask for extracting %lo */
#define LO_BITS_MASK 0x00000000000003FF
slot->mov_vector_g2 |= vector;
slot->sethi_of_hh_handler_to_g2 |=
(u64_handler & HH_BITS_MASK) >> HH_BITS_SHIFT;
slot->or_g2_hm_handler_to_g2 |=
(u64_handler & HM_BITS_MASK) >> HM_BITS_SHIFT;
slot->sethi_of_handler_to_g3 |=
(u64_handler & HI_BITS_MASK) >> HI_BITS_SHIFT;
slot->jmp_to_low_of_handler_plus_g3 |= (u64_handler & LO_BITS_MASK);
/* need to flush icache after this !!! */
/* need to flush icache in case old trap handler is in cache */
rtems_cache_invalidate_entire_instruction();
}
/*PAGE
*
* _CPU_ISR_install_vector
*
* This kernel routine installs the RTEMS handler for the
* specified vector.
*
* Input parameters:
* vector - interrupt vector number
* new_handler - replacement ISR for this vector number
* old_handler - pointer to former ISR for this vector number
*
* Output parameters:
* *old_handler - former ISR for this vector number
*
*/
void _CPU_ISR_install_vector(
uint64_t vector,
proc_ptr new_handler,
proc_ptr *old_handler
)
{
uint64_t real_vector;
proc_ptr ignored;
/*
* Get the "real" trap number for this vector ignoring the synchronous
* versus asynchronous indicator included with our vector numbers.
*/
real_vector = SPARC_REAL_TRAP_NUMBER( vector );
/*
* Return the previous ISR handler.
*/
*old_handler = _ISR_Vector_table[ vector ];
/*
* Install the wrapper so this ISR can be invoked properly.
*/
_CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
/*
* We put the actual user ISR address in '_ISR_vector_table'. This will
* be used by the _ISR_Handler so the user gets control.
*/
_ISR_Vector_table[ real_vector ] = new_handler;
}

View File

@@ -0,0 +1,586 @@
/* cpu_asm.s
*
* This file contains the basic algorithms for all assembly code used
* in an specific CPU port of RTEMS. These algorithms must be implemented
* in assembly language.
*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*
* Ported to ERC32 implementation of the SPARC by On-Line Applications
* Research Corporation (OAR) under contract to the European Space
* Agency (ESA).
*
* ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
* European Space Agency.
*
* Ported to Niagara and UltraSPARC III (US3) implementations of the SPARC-v9.
* Niagara and US3 modifications of respective RTEMS file:
* COPYRIGHT (c) 2010. Gedare Bloom.
*
* $Id$
*/
#include <rtems/asm.h>
/*
* The assembler needs to be told that we know what to do with
* the global registers.
*/
.register %g2, #scratch
.register %g3, #scratch
.register %g6, #scratch
.register %g7, #scratch
/*
* void _ISR_Handler()
*
* This routine provides the RTEMS interrupt management.
*
* We enter this handler from the 8 instructions in the trap table with
* the following registers assumed to be set as shown:
*
* g4 = tstate (old l0)
* g2 = trap type (vector) (old l3)
*
* NOTE: By an executive defined convention:
* if trap type is between 0 and 511 it is an asynchronous trap
* if trap type is between 512 and 1023 it is an asynchonous trap
*/
.align 4
PUBLIC(_ISR_Handler)
SYM(_ISR_Handler):
/*
* The ISR is called at TL = 1.
* On sun4u we use the alternate globals set.
*
* On entry:
* g4 = tstate (from trap table)
* g2 = trap vector #
*
* In either case, note that trap handlers share a register window with
* the interrupted context, unless we explicitly enter a new window. This
* differs from Sparc v8, in which a dedicated register window is saved
* for trap handling. This means we have to avoid overwriting any registers
* that we don't save.
*
*/
/*
* save some or all context on stack
*/
/*
* Save the state of the interrupted task -- especially the global
* registers -- in the Interrupt Stack Frame. Note that the ISF
* includes a regular minimum stack frame which will be used if
* needed by register window overflow and underflow handlers.
*
* This is slightly wasteful, since the stack already has the window
* overflow space reserved, but there is no obvious way to ensure
* we can store the interrupted state and still handle window
* spill/fill correctly, since there is no room for the ISF.
*
*/
/* this is for debugging purposes, make sure that TL = 1, otherwise
* things might get dicey */
rdpr %tl, %g1
cmp %g1, 1
be 1f
nop
0: ba 0b
nop
1:
/* first store the sp of the interrupted task temporarily in g1 */
mov %sp, %g1
sub %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
! make space for Stack_Frame||ISF
/* save tstate, tpc, tnpc, pil */
stx %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET]
rdpr %pil, %g3
rdpr %tpc, %g4
rdpr %tnpc, %g5
stx %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET]
stx %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET]
stx %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET]
stx %g2, [%sp + STACK_BIAS + ISF_TVEC_NUM]
rd %y, %g4 ! save y
stx %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET]
! save interrupted frame's output regs
stx %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET] ! save o0
stx %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET] ! save o1
stx %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET] ! save o2
stx %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET] ! save o3
stx %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET] ! save o4
stx %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET] ! save o5
stx %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET] ! save o6/sp
stx %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET] ! save o7
mov %g1, %o5 ! hold the old sp here for now
mov %g2, %o1 ! we'll need trap # later
/* switch to TL[0] */
wrpr %g0, 0, %tl
/* switch to normal globals */
#if defined (SUN4U)
/* the assignment to pstate below will mask out the AG bit */
#elif defined (SUN4V)
wrpr %g0, 0, %gl
#endif
/* get pstate to known state */
wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate
! save globals
stx %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET] ! save g1
stx %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET] ! save g2
stx %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET] ! save g3
stx %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET] ! save g4
stx %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET] ! save g5
stx %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET] ! save g6
stx %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET] ! save g7
mov %o1, %g2 ! get the trap #
mov %o5, %g7 ! store the interrupted %sp (preserve)
mov %sp, %o1 ! 2nd arg to ISR Handler = address of ISF
/*
* Increment ISR nest level and Thread dispatch disable level.
*
* Register usage for this section: (note, these are used later)
*
* g3 = _Thread_Dispatch_disable_level pointer
* g5 = _Thread_Dispatch_disable_level value (uint32_t)
* g6 = _ISR_Nest_level pointer
* g4 = _ISR_Nest_level value (uint32_t)
* o5 = temp
*
* NOTE: It is assumed that g6 - g7 will be preserved until the ISR
* nest and thread dispatch disable levels are unnested.
*/
setx SYM(_Thread_Dispatch_disable_level), %o5, %g3
lduw [%g3], %g5
setx SYM(_ISR_Nest_level), %o5, %g6
lduw [%g6], %g4
add %g5, 1, %g5
stuw %g5, [%g3]
add %g4, 1, %g4
stuw %g4, [%g6]
/*
* If ISR nest level was zero (now 1), then switch stack.
*/
subcc %g4, 1, %g4 ! outermost interrupt handler?
bnz dont_switch_stacks ! No, then do not switch stacks
setx SYM(_CPU_Interrupt_stack_high), %o5, %g1
ldx [%g1], %sp
/*
* Adjust the stack for the stack bias
*/
sub %sp, STACK_BIAS, %sp
/*
* Make sure we have a place on the stack for the window overflow
* trap handler to write into. At this point it is safe to
* enable traps again.
*/
sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
dont_switch_stacks:
/*
* Check if we have an external interrupt (trap 0x41 - 0x4f). If so,
* set the PIL to mask off interrupts with lower priority.
*
* The original PIL is not modified since it will be restored
* when the interrupt handler returns.
*/
and %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]?
subcc %g1, 0x41, %g0
bl dont_fix_pil
subcc %g1, 0x4f, %g0
bg dont_fix_pil
nop
wrpr %g0, %g1, %pil
dont_fix_pil:
/* We need to be careful about enabling traps here.
*
* We already stored off the tstate, tpc, and tnpc, and switched to
* TL = 0, so it should be safe.
*/
/* zero out g4 so that ofw calls work */
mov %g0, %g4
! **** ENABLE TRAPS ****
wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
SPARC_PSTATE_IE_MASK, %pstate
/*
* Vector to user's handler.
*
* NOTE: TBR may no longer have vector number in it since
* we just enabled traps. It is definitely in g2.
*/
setx SYM(_ISR_Vector_table), %o5, %g1
ldx [%g1], %g1
and %g2, 0x1FF, %o5 ! remove synchronous trap indicator
sll %o5, 3, %o5 ! o5 = offset into table
ldx [%g1 + %o5], %g1 ! g1 = _ISR_Vector_table[ vector ]
! o1 = 2nd arg = address of the ISF
! WAS LOADED WHEN ISF WAS SAVED!!!
mov %g2, %o0 ! o0 = 1st arg = vector number
call %g1, 0
nop ! delay slot
/*
* Redisable traps so we can finish up the interrupt processing.
* This is a conservative place to do this.
*/
! **** DISABLE TRAPS ****
wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate
/*
* We may safely use any of the %o and %g registers, because
* we saved them earlier (and any other interrupt that uses
* them will also save them). Right now, the state of those
* registers are as follows:
* %o registers: unknown (user's handler may have destroyed)
* %g1,g4,g5: scratch
* %g2: unknown: was trap vector
* %g3: uknown: was _Thread_Dispatch_Disable_level pointer
* %g6: _ISR_Nest_level
* %g7: interrupted task's sp
*/
/*
* Increment ISR nest level and Thread dispatch disable level.
*
* Register usage for this section: (note: as used above)
*
* g3 = _Thread_Dispatch_disable_level pointer
* g5 = _Thread_Dispatch_disable_level value
* g6 = _ISR_Nest_level pointer
* g4 = _ISR_Nest_level value
* o5 = temp
*/
/* We have to re-load the values from memory, because there are
* not enough registers that we know will be preserved across the
* user's handler. If this is a problem, we can create a register
* window for _ISR_Handler.
*/
setx SYM(_Thread_Dispatch_disable_level), %o5, %g3
lduw [%g3],%g5
lduw [%g6],%g4
sub %g5, 1, %g5
stuw %g5, [%g3]
sub %g4, 1, %g4
stuw %g4, [%g6]
orcc %g4, %g0, %g0 ! ISRs still nested?
bnz dont_restore_stack ! Yes then don't restore stack yet
nop
/*
* This is the outermost interrupt handler. Need to get off the
* CPU Interrupt Stack and back to the tasks stack.
*
* The following subtract should get us back on the interrupted
* tasks stack and add enough room to invoke the dispatcher.
* When we enable traps, we are mostly back in the context
* of the task and subsequent interrupts can operate normally.
*
* Now %sp points to the bottom of the ISF.
*
*/
sub %g7, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
dont_restore_stack:
/*
* If dispatching is disabled (includes nested interrupt case),
* then do a "simple" exit.
*/
orcc %g5, %g0, %g0 ! Is dispatching disabled?
bnz simple_return ! Yes, then do a "simple" exit
! NOTE: Use the delay slot
mov %g0, %g4 ! clear g4 for ofw
! Are we dispatching from a previous ISR in the interrupted thread?
setx SYM(_CPU_ISR_Dispatch_disable), %o5, %g5
lduw [%g5], %o5
orcc %o5, %g0, %g0 ! Is this thread already doing an ISR?
bnz simple_return ! Yes, then do a "simple" exit
nop
setx SYM(_Context_Switch_necessary), %o5, %g7
/*
* If a context switch is necessary, then do fudge stack to
* return to the interrupt dispatcher.
*/
ldub [%g7], %o5
orcc %o5, %g0, %g0 ! Is thread switch necessary?
bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
nop
/*
* Finally, check to see if signals were sent to the currently
* executing task. If so, we need to invoke the interrupt dispatcher.
*/
setx SYM(_ISR_Signals_to_thread_executing), %o5, %g5
ldub [%g5], %o5
orcc %o5, %g0, %g0 ! Were signals sent to the currently
! executing thread?
bz simple_return ! yes, then invoke the dispatcher
! use the delay slot to clear the signals
! to the currently executing task flag
stb %g0, [%g5]
/*
* Invoke interrupt dispatcher.
*/
PUBLIC(_ISR_Dispatch)
SYM(_ISR_Dispatch):
! Set ISR dispatch nesting prevention flag
mov 1, %o1
setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o2
stuw %o1, [%o2]
! **** ENABLE TRAPS ****
wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
SPARC_PSTATE_IE_MASK, %pstate
isr_dispatch:
call SYM(_Thread_Dispatch), 0
nop
/*
* We invoked _Thread_Dispatch in a state similar to the interrupted
* task. In order to safely be able to tinker with the register
* windows and get the task back to its pre-interrupt state,
* we need to disable interrupts.
*/
mov 2, %g4 ! syscall (disable interrupts)
ta 0 ! syscall (disable interrupts)
mov 0, %g4
/*
* While we had ISR dispatching disabled in this thread,
* did we miss anything. If so, then we need to do another
* _Thread_Dispatch before leaving this ISR Dispatch context.
*/
setx SYM(_Context_Switch_necessary), %o5, %o1
ldub [%o1], %o2
! NOTE: Use some of delay slot to start loading this
setx SYM(_ISR_Signals_to_thread_executing), %o5, %o1
ldub [%o1], %o3
orcc %o2, %g0, %g0 ! Is thread switch necessary?
bnz dispatchAgain ! yes, then invoke the dispatcher AGAIN
! NOTE: Use the delay slot to catch the orcc below
/*
* Finally, check to see if signals were sent to the currently
* executing task. If so, we need to invoke the interrupt dispatcher.
*/
! NOTE: Delay slots above were used to perform the load AND
! this orcc falls into the delay slot for bnz above
orcc %o3, %g0, %g0 ! Were signals sent to the currently
! executing thread?
bz allow_nest_again ! No, then clear out and return
nop
! Yes, then invoke the dispatcher
dispatchAgain:
mov 3, %g4 ! syscall (enable interrupts)
ta 0 ! syscall (enable interrupts)
ba isr_dispatch
mov 0, %g4
allow_nest_again:
! Zero out ISR stack nesting prevention flag
setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o1
stuw %g0,[%o1]
/*
* The CWP in place at this point may be different from
* that which was in effect at the beginning of the ISR if we
* have been context switched between the beginning of this invocation
* of _ISR_Handler and this point. Thus the CWP and WIM should
* not be changed back to their values at ISR entry time. Any
* changes to the PSR must preserve the CWP.
*/
simple_return:
flushw ! get register windows to a 'clean' state
! **** DISABLE TRAPS ****
wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate
ldx [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1 ! restore y
wr %o1, 0, %y
ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
! see if cwp is proper (tstate.cwp == cwp)
and %g1, 0x1F, %g6
rdpr %cwp, %g7
cmp %g6, %g7
bz good_window
nop
/*
* Fix the CWP. Need the cwp to be the proper cwp that
* gets restored when returning from the trap via retry/done. Do
* this before reloading the task's output regs. Basically fake a
* window spill/fill.
*
* Is this necessary on sun4v? Why not just re-write
* tstate.cwp to be equal to the current cwp?
*/
mov %sp, %g1
stx %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET]
stx %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET]
stx %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET]
stx %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET]
stx %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET]
stx %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET]
stx %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET]
stx %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET]
stx %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET]
stx %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET]
stx %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET]
stx %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET]
stx %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET]
stx %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET]
stx %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET]
stx %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET]
wrpr %g0, %g6, %cwp
mov %g1, %sp
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7
good_window:
/*
* Restore tasks global and out registers
*/
ldx [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1 ! restore g1
ldx [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2 ! restore g2
ldx [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3 ! restore g3
ldx [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4 ! restore g4
ldx [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5 ! restore g5
ldx [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6 ! restore g6
ldx [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7 ! restore g7
! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC
wrpr %g0, 1, %tl
! return to GL=1 or AG
#if defined(SUN4U)
rdpr %pstate, %g1
andn %g1, SPARC_PSTATE_AG_MASK, %g1
wrpr %g1, %g0, %pstate ! go to regular global
#elif defined(SUN4V)
wrpr %g0, 1, %gl
#endif
! now we can use global registers (at gl=1 or AG)
ldx [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3
ldx [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4
ldx [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5
ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
ldx [%sp + STACK_BIAS + ISF_TVEC_NUM], %g2
wrpr %g0, %g3, %pil
wrpr %g0, %g4, %tpc
wrpr %g0, %g5, %tnpc
wrpr %g0, %g1, %tstate
ldx [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0 ! restore o0
ldx [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1 ! restore o1
ldx [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2 ! restore o2
ldx [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3 ! restore o3
ldx [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4 ! restore o4
ldx [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5 ! restore o5
! sp is restored later
ldx [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7 ! restore o7
ldx [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp
/*
* Determine whether to re-execute the trapping instruction
* (asynchronous trap) or to skip the trapping instruction
* (synchronous trap).
*/
andcc %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
! Is this a synchronous trap?
be not_synch ! No, then skip trapping instruction
mov 0, %g4
retry ! re-execute trapping instruction
not_synch:
done ! skip trapping instruction
/* end of file */

View File

@@ -0,0 +1,128 @@
/*
* systrap.S
*
* This file contains emulated system calls using software trap 0.
* The following calls are supported:
*
* + SYS_exit (halt)
* + SYS_irqdis (disable interrupts)
* + SYS_irqset (set interrupt level)
*
* COPYRIGHT:
*
* COPYRIGHT (c) 1995. European Space Agency.
* COPYRIGHT (c) 2010. Gedare Bloom.
*
* This terms of the RTEMS license apply to this file.
*
*/
#include <rtems/asm.h>
#include "sparc64-syscall.h"
.seg "text"
/*
* system call
*
* On entry:
* g4[AG | GL=1] = tstate (from trap table)
* g2[AG | GL=1] = trap vector # (256)
* g3[AG | GL=1] = address of SYM(syscall)
* g4[AG | GL-1] = system call id
* if arch = sun4v:
* We need to back to GL-1 to read the system call id.
* on sun4u:
* We need to go back to the normal globals to read the system call id.
*
* First thing is to return to the previous set of globals, so
* that the system call id can be read. The syscall code needs
* to re-read tstate.
*
* syscall should only ever be entered by ta 0 being called explicitly
* by a function that knows what is happening. This means the syscall
* code can safely use any scratch registers and the %o registers.
*/
PUBLIC(syscall)
SYM(syscall):
mov %g0, %g4 ! clear %g4 at this GL
#if defined (SUN4U)
rdpr %pstate, %g1
andn %g1, SPARC_PSTATE_AG_MASK, %g1
wrpr %g1, %g0, %pstate ! go to regular globals
#elif defined (SUN4V)
rdpr %gl, %g1
dec %g1
wrpr %g0, %g1, %gl ! go back to GL = GL - 1
#endif
subcc %g4, 2, %g0
bne 3f
rdpr %tstate, %g5 ! re-read tstate, use delay slot
! syscall 2, disable interrupts
rdpr %pil, %g1
and %g5, SPARC_TSTATE_IE_MASK, %o0
or %o0, %g1, %o0 ! return TSTATE_IE | PIL
wrpr %g0, 0xf, %pil ! set PIL to 15
andn %g5, SPARC_TSTATE_IE_MASK, %g1
wrpr %g0, %g1, %tstate ! disable interrupts in trap state
ba,a 9f
3: ! syscall 3, enable interrupts
subcc %g4, 3, %g0
bne 1f
and %o0, 0xf, %g1
wrpr %g0, %g1, %pil ! restore PIL
! and %o0, SPARC_TSTATE_IE_MASK, %g1
! or %g5, %g1, %g1 ! restore saved IE
or %g5, SPARC_TSTATE_IE_MASK, %g1 ! restore IE (safe?)
wrpr %g0, %g1, %tstate
ba,a 9f
1:
ba,a 1b ! spin. taking a trap here -> htrap
9: ! leave
mov 0, %g4 ! clear %g4
DONE
PUBLIC(sparc_disable_interrupts)
SYM(sparc_disable_interrupts):
mov SYS_irqdis, %g4
ta 0
#if 0
rdpr %pstate, %g5
rdpr %pil, %g1
and %g5, SPARC_PSTATE_IE_MASK, %o0
or %o0, %g1, %o0 ! return PSTATE_IE | PIL
wrpr %g0, 0xf, %pil ! set PIL to 15
andn %g5, SPARC_PSTATE_IE_MASK, %g1
wrpr %g0, %g1, %pstate ! disable interrupts
#endif
retl
nop
PUBLIC(sparc_enable_interrupts)
SYM(sparc_enable_interrupts):
mov SYS_irqen, %g4
ta 0
#if 0
rdpr %pstate, %g5
and %o0, 0xf, %g1
wrpr %g0, %g1, %pil ! restore PIL
and %o0, SPARC_PSTATE_IE_MASK, %g1
or %g5, %g1, %g1 ! restore saved IE
! or %g5, SPARC_PSTATE_IE_MASK, %g1 ! set IE regardless of old (safe?)
wrpr %g0, %g1, %pstate
#endif
retl
nop
/* end of file */

View File

@@ -0,0 +1,4 @@
#define SYS_exit 1
#define SYS_irqdis 2
#define SYS_irqen 3