mirror of
https://gitlab.rtems.org/rtems/rtos/rtems.git
synced 2025-12-07 16:13:07 +00:00
The e500 machine state was added when gcc 10 support appeared to deal with the assembler's -many option being removed. Close #5043
1096 lines
22 KiB
C
1096 lines
22 KiB
C
/* SPDX-License-Identifier: BSD-2-Clause */
|
|
|
|
/**
|
|
* @file
|
|
*
|
|
* @ingroup RTEMSBSPsPowerPCShared
|
|
*
|
|
* @brief General purpose assembler macros, linker command file support and
|
|
* some inline functions for direct register access.
|
|
*/
|
|
|
|
/*
|
|
* Copyright (C) 2008, 2015 embedded brains GmbH & Co. KG
|
|
*
|
|
* access function for Device Control Registers inspired by "ppc405common.h"
|
|
* from Michael Hamel ADInstruments May 2008
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/**
|
|
* @defgroup RTEMSBSPsPowerPCSharedUtility PowerPC Utility Module
|
|
*
|
|
* @ingroup RTEMSBSPsPowerPCShared
|
|
*
|
|
* @brief PowerPC Utility Module.
|
|
*
|
|
* @{
|
|
*/
|
|
|
|
#ifndef __LIBCPU_POWERPC_UTILITY_H
|
|
#define __LIBCPU_POWERPC_UTILITY_H
|
|
|
|
#if !defined(ASM)
|
|
#include <rtems.h>
|
|
#endif
|
|
|
|
#include <rtems/score/cpu.h>
|
|
#include <rtems/powerpc/registers.h>
|
|
#include <rtems/powerpc/powerpc.h>
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#if !defined(ASM)
|
|
|
|
#include <rtems/bspIo.h>
|
|
|
|
#include <libcpu/cpuIdent.h>
|
|
|
|
#define LINKER_SYMBOL(sym) extern char sym [];
|
|
|
|
/**
|
|
* @brief Read one byte from @a src.
|
|
*/
|
|
static inline uint8_t ppc_read_byte(const volatile void *src)
|
|
{
|
|
uint8_t value;
|
|
|
|
__asm__ volatile (
|
|
"lbz %0, 0(%1)"
|
|
: "=r" (value)
|
|
: "b" (src)
|
|
);
|
|
|
|
return value;
|
|
}
|
|
|
|
/**
|
|
* @brief Read one half word from @a src.
|
|
*/
|
|
static inline uint16_t ppc_read_half_word(const volatile void *src)
|
|
{
|
|
uint16_t value;
|
|
|
|
__asm__ volatile (
|
|
"lhz %0, 0(%1)"
|
|
: "=r" (value)
|
|
: "b" (src)
|
|
);
|
|
|
|
return value;
|
|
}
|
|
|
|
/**
|
|
* @brief Read one word from @a src.
|
|
*/
|
|
static inline uint32_t ppc_read_word(const volatile void *src)
|
|
{
|
|
uint32_t value;
|
|
|
|
__asm__ volatile (
|
|
"lwz %0, 0(%1)"
|
|
: "=r" (value)
|
|
: "b" (src)
|
|
);
|
|
|
|
return value;
|
|
}
|
|
|
|
/**
|
|
* @brief Write one byte @a value to @a dest.
|
|
*/
|
|
static inline void ppc_write_byte(uint8_t value, volatile void *dest)
|
|
{
|
|
__asm__ volatile (
|
|
"stb %0, 0(%1)"
|
|
:
|
|
: "r" (value), "b" (dest)
|
|
);
|
|
}
|
|
|
|
/**
|
|
* @brief Write one half word @a value to @a dest.
|
|
*/
|
|
static inline void ppc_write_half_word(uint16_t value, volatile void *dest)
|
|
{
|
|
__asm__ volatile (
|
|
"sth %0, 0(%1)"
|
|
:
|
|
: "r" (value), "b" (dest)
|
|
);
|
|
}
|
|
|
|
/**
|
|
* @brief Write one word @a value to @a dest.
|
|
*/
|
|
static inline void ppc_write_word(uint32_t value, volatile void *dest)
|
|
{
|
|
__asm__ volatile (
|
|
"stw %0, 0(%1)" :
|
|
: "r" (value), "b" (dest)
|
|
);
|
|
}
|
|
|
|
|
|
static inline void *ppc_stack_pointer(void)
|
|
{
|
|
void *sp;
|
|
|
|
__asm__ volatile (
|
|
"mr %0, 1"
|
|
: "=r" (sp)
|
|
);
|
|
|
|
return sp;
|
|
}
|
|
|
|
static inline void ppc_set_stack_pointer(void *sp)
|
|
{
|
|
__asm__ volatile (
|
|
"mr 1, %0"
|
|
:
|
|
: "r" (sp)
|
|
);
|
|
}
|
|
|
|
static inline void *ppc_link_register(void)
|
|
{
|
|
void *lr;
|
|
|
|
__asm__ volatile (
|
|
"mflr %0"
|
|
: "=r" (lr)
|
|
);
|
|
|
|
return lr;
|
|
}
|
|
|
|
static inline void ppc_set_link_register(void *lr)
|
|
{
|
|
__asm__ volatile (
|
|
"mtlr %0"
|
|
:
|
|
: "r" (lr)
|
|
);
|
|
}
|
|
|
|
static inline uint32_t ppc_machine_state_register(void)
|
|
{
|
|
uint32_t msr;
|
|
|
|
__asm__ volatile (
|
|
"mfmsr %0"
|
|
: "=r" (msr)
|
|
);
|
|
|
|
return msr;
|
|
}
|
|
|
|
static inline void ppc_set_machine_state_register(uint32_t msr)
|
|
{
|
|
__asm__ volatile (
|
|
"mtmsr %0"
|
|
:
|
|
: "r" (msr)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_synchronize_data(void)
|
|
{
|
|
RTEMS_COMPILER_MEMORY_BARRIER();
|
|
|
|
__asm__ volatile ("sync");
|
|
}
|
|
|
|
static inline void ppc_light_weight_synchronize(void)
|
|
{
|
|
RTEMS_COMPILER_MEMORY_BARRIER();
|
|
|
|
__asm__ volatile ("lwsync");
|
|
}
|
|
|
|
static inline void ppc_synchronize_instructions(void)
|
|
{
|
|
RTEMS_COMPILER_MEMORY_BARRIER();
|
|
|
|
__asm__ volatile ("isync");
|
|
}
|
|
|
|
static inline void ppc_enforce_in_order_execution_of_io(void)
|
|
{
|
|
RTEMS_COMPILER_MEMORY_BARRIER();
|
|
|
|
__asm__ volatile (
|
|
".machine \"push\"\n"
|
|
".machine \"any\"\n"
|
|
"eieio\n"
|
|
".machine \"pop\""
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_flush(void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbf 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_flush_2(
|
|
void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbf %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_invalidate(void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbi 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_invalidate_2(
|
|
void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbi %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_store(const void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbst 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_store_2(
|
|
const void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbst %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_touch(const void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbt 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_touch_2(
|
|
const void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbt %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_touch_for_store(const void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbtst 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_touch_for_store_2(
|
|
const void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbtst %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_clear_to_zero(void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbz 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_data_cache_block_clear_to_zero_2(
|
|
void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"dcbz %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_instruction_cache_block_invalidate(void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"icbi 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
);
|
|
}
|
|
|
|
static inline void ppc_instruction_cache_block_invalidate_2(
|
|
void *base,
|
|
uintptr_t offset
|
|
)
|
|
{
|
|
__asm__ volatile (
|
|
"icbi %0, %1"
|
|
:
|
|
: "b" (base), "r" (offset)
|
|
);
|
|
}
|
|
|
|
/**
|
|
* @brief Enables external exceptions.
|
|
*
|
|
* You can use this function to enable the external exceptions and restore the
|
|
* machine state with ppc_external_exceptions_disable() later.
|
|
*/
|
|
static inline uint32_t ppc_external_exceptions_enable(void)
|
|
{
|
|
uint32_t current_msr;
|
|
uint32_t new_msr;
|
|
|
|
RTEMS_COMPILER_MEMORY_BARRIER();
|
|
|
|
__asm__ volatile (
|
|
"mfmsr %0;"
|
|
"ori %1, %0, 0x8000;"
|
|
"mtmsr %1"
|
|
: "=r" (current_msr), "=r" (new_msr)
|
|
);
|
|
|
|
return current_msr;
|
|
}
|
|
|
|
/**
|
|
* @brief Restores machine state.
|
|
*
|
|
* @see ppc_external_exceptions_enable()
|
|
*/
|
|
static inline void ppc_external_exceptions_disable(uint32_t msr)
|
|
{
|
|
ppc_set_machine_state_register(msr);
|
|
|
|
RTEMS_COMPILER_MEMORY_BARRIER();
|
|
}
|
|
|
|
static inline uint32_t ppc_count_leading_zeros(uint32_t value)
|
|
{
|
|
uint32_t count;
|
|
|
|
__asm__ (
|
|
"cntlzw %0, %1;"
|
|
: "=r" (count)
|
|
: "r" (value)
|
|
);
|
|
|
|
return count;
|
|
}
|
|
|
|
/*
|
|
* Simple spin delay in microsecond units for device drivers.
|
|
* This is very dependent on the clock speed of the target.
|
|
*/
|
|
|
|
#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
|
|
/* Wonderful bookE doesn't have mftb/mftbu; they only
|
|
* define the TBRU/TBRL SPRs so we use these. Luckily,
|
|
* we run in supervisory mode so that should work on
|
|
* all CPUs. In user mode we'd have a problem...
|
|
* 2007/11/30, T.S.
|
|
*
|
|
* OTOH, PSIM currently lacks support for reading
|
|
* SPRs 268/269. You need GDB patch sim/2376 to avoid
|
|
* a crash...
|
|
* OTOH, the MPC8xx do not allow to read the timebase registers via mfspr.
|
|
* we NEED a mftb to access the time base.
|
|
* 2009/10/30 Th. D.
|
|
*/
|
|
#define CPU_Get_timebase_low( _value ) \
|
|
__asm__ volatile( "mftb %0" : "=r" (_value) )
|
|
#else
|
|
#define CPU_Get_timebase_low( _value ) \
|
|
__asm__ volatile( "mfspr %0,268" : "=r" (_value) )
|
|
#endif
|
|
|
|
/* Must be provided for rtems_bsp_delay to work */
|
|
extern uint32_t bsp_clicks_per_usec;
|
|
|
|
#define rtems_bsp_delay( _microseconds ) \
|
|
do { \
|
|
uint32_t start, ticks, now; \
|
|
CPU_Get_timebase_low( start ) ; \
|
|
ticks = (_microseconds) * bsp_clicks_per_usec; \
|
|
do \
|
|
CPU_Get_timebase_low( now ) ; \
|
|
while (now - start < ticks); \
|
|
} while (0)
|
|
|
|
#define rtems_bsp_delay_in_bus_cycles( _cycles ) \
|
|
do { \
|
|
uint32_t start, now; \
|
|
CPU_Get_timebase_low( start ); \
|
|
do \
|
|
CPU_Get_timebase_low( now ); \
|
|
while (now - start < (_cycles)); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Routines to access the decrementer register
|
|
*/
|
|
|
|
#define PPC_Set_decrementer( _clicks ) \
|
|
do { \
|
|
__asm__ volatile( "mtdec %0" : : "r" ((_clicks)) ); \
|
|
} while (0)
|
|
|
|
#define PPC_Get_decrementer( _clicks ) \
|
|
__asm__ volatile( "mfdec %0" : "=r" (_clicks) )
|
|
|
|
/*
|
|
* Routines to access the time base register
|
|
*/
|
|
|
|
static inline uint64_t PPC_Get_timebase_register( void )
|
|
{
|
|
uint32_t tbr_low;
|
|
uint32_t tbr_high;
|
|
uint32_t tbr_high_old;
|
|
uint64_t tbr;
|
|
|
|
do {
|
|
#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
|
|
/* See comment above (CPU_Get_timebase_low) */
|
|
__asm__ volatile( "mftbu %0" : "=r" (tbr_high_old));
|
|
__asm__ volatile( "mftb %0" : "=r" (tbr_low));
|
|
__asm__ volatile( "mftbu %0" : "=r" (tbr_high));
|
|
#else
|
|
__asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high_old));
|
|
__asm__ volatile( "mfspr %0, 268" : "=r" (tbr_low));
|
|
__asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high));
|
|
#endif
|
|
} while ( tbr_high_old != tbr_high );
|
|
|
|
tbr = tbr_high;
|
|
tbr <<= 32;
|
|
tbr |= tbr_low;
|
|
return tbr;
|
|
}
|
|
|
|
static inline void PPC_Set_timebase_register (uint64_t tbr)
|
|
{
|
|
uint32_t tbr_low;
|
|
uint32_t tbr_high;
|
|
|
|
tbr_low = (uint32_t) tbr;
|
|
tbr_high = (uint32_t) (tbr >> 32);
|
|
__asm__ volatile( "mtspr 284, %0" : : "r" (tbr_low));
|
|
__asm__ volatile( "mtspr 285, %0" : : "r" (tbr_high));
|
|
|
|
}
|
|
|
|
static inline uint32_t ppc_decrementer_register(void)
|
|
{
|
|
uint32_t dec;
|
|
|
|
PPC_Get_decrementer(dec);
|
|
|
|
return dec;
|
|
}
|
|
|
|
static inline void ppc_set_decrementer_register(uint32_t dec)
|
|
{
|
|
PPC_Set_decrementer(dec);
|
|
}
|
|
|
|
/**
|
|
* @brief Preprocessor magic for stringification of @a x.
|
|
*/
|
|
#define PPC_STRINGOF(x) #x
|
|
|
|
/**
|
|
* @brief Returns the value of the Special Purpose Register with number @a spr.
|
|
*
|
|
* @note This macro uses a GNU C extension.
|
|
*/
|
|
#define PPC_SPECIAL_PURPOSE_REGISTER(spr, val) \
|
|
__asm__ volatile (\
|
|
"mfspr %0, " PPC_STRINGOF(spr) \
|
|
: "=r" (val) \
|
|
)
|
|
|
|
/**
|
|
* @brief Sets the Special Purpose Register with number @a spr to the value in
|
|
* @a val.
|
|
*/
|
|
#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val) \
|
|
do { \
|
|
__asm__ volatile (\
|
|
"mtspr " PPC_STRINGOF(spr) ", %0" \
|
|
: \
|
|
: "r" (val) \
|
|
); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Sets in the Special Purpose Register with number @a spr all bits
|
|
* which are set in @a bits.
|
|
*
|
|
* Interrupts are disabled throughout this operation.
|
|
*/
|
|
#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
|
|
do { \
|
|
ISR_Level level; \
|
|
uint32_t val; \
|
|
uint32_t mybits = bits; \
|
|
_ISR_Local_disable(level); \
|
|
PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
|
|
val |= mybits; \
|
|
PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
|
|
_ISR_Local_enable(level); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Sets in the Special Purpose Register with number @a spr all bits
|
|
* which are set in @a bits. The previous register value will be masked with
|
|
* @a mask.
|
|
*
|
|
* Interrupts are disabled throughout this operation.
|
|
*/
|
|
#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS_MASKED(spr, bits, mask) \
|
|
do { \
|
|
ISR_Level level; \
|
|
uint32_t val; \
|
|
uint32_t mybits = bits; \
|
|
uint32_t mymask = mask; \
|
|
_ISR_Local_disable(level); \
|
|
PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
|
|
val &= ~mymask; \
|
|
val |= mybits; \
|
|
PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
|
|
_ISR_Local_enable(level); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Clears in the Special Purpose Register with number @a spr all bits
|
|
* which are set in @a bits.
|
|
*
|
|
* Interrupts are disabled throughout this operation.
|
|
*/
|
|
#define PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
|
|
do { \
|
|
ISR_Level level; \
|
|
uint32_t val; \
|
|
uint32_t mybits = bits; \
|
|
_ISR_Local_disable(level); \
|
|
PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
|
|
val &= ~mybits; \
|
|
PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
|
|
_ISR_Local_enable(level); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Returns the value of the Thread Management Register with number @a tmr.
|
|
*
|
|
* @note This macro uses a GNU C extension.
|
|
*/
|
|
#define PPC_THREAD_MGMT_REGISTER(tmr) \
|
|
({ \
|
|
uint32_t val; \
|
|
__asm__ volatile (\
|
|
"mftmr %0, " PPC_STRINGOF(tmr) \
|
|
: "=r" (val) \
|
|
); \
|
|
val;\
|
|
} )
|
|
|
|
/**
|
|
* @brief Sets the Thread Management Register with number @a tmr to the value in
|
|
* @a val.
|
|
*/
|
|
#define PPC_SET_THREAD_MGMT_REGISTER(tmr, val) \
|
|
do { \
|
|
__asm__ volatile (\
|
|
"mttmr " PPC_STRINGOF(tmr) ", %0" \
|
|
: \
|
|
: "r" (val) \
|
|
); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Returns the value of the Device Control Register with number @a dcr.
|
|
*
|
|
* The PowerPC 4XX family has Device Control Registers.
|
|
*
|
|
* @note This macro uses a GNU C extension.
|
|
*/
|
|
#define PPC_DEVICE_CONTROL_REGISTER(dcr) \
|
|
({ \
|
|
uint32_t val; \
|
|
__asm__ volatile (\
|
|
"mfdcr %0, " PPC_STRINGOF(dcr) \
|
|
: "=r" (val) \
|
|
); \
|
|
val;\
|
|
} )
|
|
|
|
/**
|
|
* @brief Sets the Device Control Register with number @a dcr to the value in
|
|
* @a val.
|
|
*
|
|
* The PowerPC 4XX family has Device Control Registers.
|
|
*/
|
|
#define PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val) \
|
|
do { \
|
|
__asm__ volatile (\
|
|
"mtdcr " PPC_STRINGOF(dcr) ", %0" \
|
|
: \
|
|
: "r" (val) \
|
|
); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Sets in the Device Control Register with number @a dcr all bits
|
|
* which are set in @a bits.
|
|
*
|
|
* Interrupts are disabled throughout this operation.
|
|
*/
|
|
#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
|
|
do { \
|
|
ISR_Level level; \
|
|
uint32_t val; \
|
|
uint32_t mybits = bits; \
|
|
_ISR_Local_disable(level); \
|
|
val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
|
|
val |= mybits; \
|
|
PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
|
|
_ISR_Local_enable(level); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Sets in the Device Control Register with number @a dcr all bits
|
|
* which are set in @a bits. The previous register value will be masked with
|
|
* @a mask.
|
|
*
|
|
* Interrupts are disabled throughout this operation.
|
|
*/
|
|
#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS_MASKED(dcr, bits, mask) \
|
|
do { \
|
|
ISR_Level level; \
|
|
uint32_t val; \
|
|
uint32_t mybits = bits; \
|
|
uint32_t mymask = mask; \
|
|
_ISR_Local_disable(level); \
|
|
val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
|
|
val &= ~mymask; \
|
|
val |= mybits; \
|
|
PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
|
|
_ISR_Local_enable(level); \
|
|
} while (0)
|
|
|
|
/**
|
|
* @brief Clears in the Device Control Register with number @a dcr all bits
|
|
* which are set in @a bits.
|
|
*
|
|
* Interrupts are disabled throughout this operation.
|
|
*/
|
|
#define PPC_CLEAR_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
|
|
do { \
|
|
ISR_Level level; \
|
|
uint32_t val; \
|
|
uint32_t mybits = bits; \
|
|
_ISR_Local_disable(level); \
|
|
val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
|
|
val &= ~mybits; \
|
|
PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
|
|
_ISR_Local_enable(level); \
|
|
} while (0)
|
|
|
|
static inline uint32_t ppc_time_base(void)
|
|
{
|
|
uint32_t val;
|
|
|
|
CPU_Get_timebase_low(val);
|
|
|
|
return val;
|
|
}
|
|
|
|
static inline void ppc_set_time_base(uint32_t val)
|
|
{
|
|
PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWL, val);
|
|
}
|
|
|
|
static inline uint32_t ppc_time_base_upper(void)
|
|
{
|
|
uint32_t val;
|
|
PPC_SPECIAL_PURPOSE_REGISTER(TBRU, val);
|
|
return val;
|
|
}
|
|
|
|
static inline void ppc_set_time_base_upper(uint32_t val)
|
|
{
|
|
PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWU, val);
|
|
}
|
|
|
|
static inline uint64_t ppc_time_base_64(void)
|
|
{
|
|
return PPC_Get_timebase_register();
|
|
}
|
|
|
|
static inline void ppc_set_time_base_64(uint64_t val)
|
|
{
|
|
PPC_Set_timebase_register(val);
|
|
}
|
|
|
|
static inline uint32_t ppc_alternate_time_base(void)
|
|
{
|
|
uint32_t val;
|
|
PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBL, val);
|
|
return val;
|
|
}
|
|
|
|
static inline uint32_t ppc_alternate_time_base_upper(void)
|
|
{
|
|
uint32_t val;
|
|
PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBU, val);
|
|
return val;
|
|
}
|
|
|
|
static inline uint64_t ppc_alternate_time_base_64(void)
|
|
{
|
|
uint32_t atbl;
|
|
uint32_t atbu_0;
|
|
uint32_t atbu_1;
|
|
|
|
do {
|
|
atbu_0 = ppc_alternate_time_base_upper();
|
|
atbl = ppc_alternate_time_base();
|
|
atbu_1 = ppc_alternate_time_base_upper();
|
|
} while (atbu_0 != atbu_1);
|
|
|
|
return (((uint64_t) atbu_1) << 32) | ((uint64_t) atbl);
|
|
}
|
|
|
|
static inline uint32_t ppc_processor_id(void)
|
|
{
|
|
uint32_t val;
|
|
PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
|
|
return val;
|
|
}
|
|
|
|
static inline void ppc_set_processor_id(uint32_t val)
|
|
{
|
|
PPC_SET_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version(void)
|
|
{
|
|
uint32_t val;
|
|
PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_SVR, val);
|
|
return val;
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version_cid(uint32_t svr)
|
|
{
|
|
return (svr >> 28) & 0xf;
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version_sid(uint32_t svr)
|
|
{
|
|
return (svr >> 16) & 0xfff;
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version_proc(uint32_t svr)
|
|
{
|
|
return (svr >> 12) & 0xf;
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version_mfg(uint32_t svr)
|
|
{
|
|
return (svr >> 8) & 0xf;
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version_mjrev(uint32_t svr)
|
|
{
|
|
return (svr >> 4) & 0xf;
|
|
}
|
|
|
|
static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
|
|
{
|
|
return (svr >> 0) & 0xf;
|
|
}
|
|
|
|
static inline void ppc_msync(void)
|
|
{
|
|
__asm__ volatile (
|
|
"msync"
|
|
:
|
|
:
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_tlbre(void)
|
|
{
|
|
__asm__ volatile (
|
|
"tlbre"
|
|
:
|
|
:
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_tlbwe(void)
|
|
{
|
|
__asm__ volatile (
|
|
"tlbwe"
|
|
:
|
|
:
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_tlbsx(void *addr)
|
|
{
|
|
__asm__ volatile (
|
|
"tlbsx 0, %0"
|
|
:
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
static inline void ppc_mtivpr(void *prefix)
|
|
{
|
|
__asm__ volatile (
|
|
"mtivpr %0"
|
|
:
|
|
: "r" (prefix)
|
|
);
|
|
}
|
|
|
|
#define ppc_mtivor(x, vec) __asm__ volatile ( \
|
|
"mtivor" RTEMS_XSTRING(x) " %0" \
|
|
: \
|
|
: "r" (vec) \
|
|
)
|
|
|
|
void ppc_code_copy(void *dest, const void *src, size_t n);
|
|
|
|
/* FIXME: Do not use this function */
|
|
void printBAT(int bat, uint32_t upper, uint32_t lower);
|
|
|
|
/* FIXME: Do not use this function */
|
|
void ShowBATS(void);
|
|
|
|
#endif /* ifndef ASM */
|
|
|
|
#if defined(ASM)
|
|
#include <rtems/asm.h>
|
|
|
|
.macro LA reg, addr
|
|
#if defined(__powerpc64__)
|
|
lis \reg, (\addr)@highest
|
|
ori \reg, \reg, (\addr)@higher
|
|
rldicr \reg, \reg, 32, 31
|
|
oris \reg, \reg, (\addr)@h
|
|
ori \reg, \reg, (\addr)@l
|
|
#else
|
|
lis \reg, (\addr)@h
|
|
ori \reg, \reg, (\addr)@l
|
|
#endif
|
|
.endm
|
|
|
|
.macro LA32 reg, addr
|
|
lis \reg, (\addr)@h
|
|
ori \reg, \reg, (\addr)@l
|
|
.endm
|
|
|
|
.macro LWI reg, value
|
|
lis \reg, (\value)@h
|
|
ori \reg, \reg, (\value)@l
|
|
.endm
|
|
|
|
.macro LW reg, addr
|
|
lis \reg, \addr@ha
|
|
lwz \reg, \addr@l(\reg)
|
|
.endm
|
|
|
|
/*
|
|
* Tests the bits in reg1 against the bits set in mask. A match is indicated
|
|
* by EQ = 0 in CR0. A mismatch is indicated by EQ = 1 in CR0. The register
|
|
* reg2 is used to load the mask.
|
|
*/
|
|
.macro TSTBITS reg1, reg2, mask
|
|
LWI \reg2, \mask
|
|
and \reg1, \reg1, \reg2
|
|
cmplw \reg1, \reg2
|
|
.endm
|
|
|
|
.macro SETBITS reg1, reg2, mask
|
|
LWI \reg2, \mask
|
|
or \reg1, \reg1, \reg2
|
|
.endm
|
|
|
|
.macro CLRBITS reg1, reg2, mask
|
|
LWI \reg2, \mask
|
|
andc \reg1, \reg1, \reg2
|
|
.endm
|
|
|
|
.macro GLOBAL_FUNCTION name
|
|
.global \name
|
|
.type \name, @function
|
|
\name:
|
|
.endm
|
|
|
|
/*
|
|
* Obtain interrupt mask
|
|
*/
|
|
.macro GET_INTERRUPT_MASK mask
|
|
lis \mask, _PPC_INTERRUPT_DISABLE_MASK@h
|
|
ori \mask, \mask, _PPC_INTERRUPT_DISABLE_MASK@l
|
|
.endm
|
|
|
|
/*
|
|
* Disables all asynchronous exeptions (interrupts) which may cause a context
|
|
* switch.
|
|
*/
|
|
.macro INTERRUPT_DISABLE level, mask
|
|
mfmsr \level
|
|
GET_INTERRUPT_MASK mask=\mask
|
|
andc \mask, \level, \mask
|
|
mtmsr \mask
|
|
.endm
|
|
|
|
/*
|
|
* Restore previous machine state.
|
|
*/
|
|
.macro INTERRUPT_ENABLE level
|
|
mtmsr \level
|
|
.endm
|
|
|
|
.macro SET_SELF_CPU_CONTROL reg_0, reg_1
|
|
#if defined(RTEMS_SMP)
|
|
/* Use Book E Processor ID Register (PIR) */
|
|
mfspr \reg_0, 286
|
|
slwi \reg_0, \reg_0, PER_CPU_CONTROL_SIZE_LOG2
|
|
#if defined(__powerpc64__)
|
|
LA \reg_1, _Per_CPU_Information
|
|
add \reg_0, \reg_0, \reg_1
|
|
#else
|
|
addis \reg_0, \reg_0, _Per_CPU_Information@ha
|
|
addi \reg_0, \reg_0, _Per_CPU_Information@l
|
|
#endif
|
|
mtspr PPC_PER_CPU_CONTROL_REGISTER, \reg_0
|
|
#endif
|
|
.endm
|
|
|
|
.macro GET_SELF_CPU_CONTROL reg
|
|
#if defined(RTEMS_SMP)
|
|
mfspr \reg, PPC_PER_CPU_CONTROL_REGISTER
|
|
#else
|
|
lis \reg, _Per_CPU_Information@h
|
|
ori \reg, \reg, _Per_CPU_Information@l
|
|
#endif
|
|
.endm
|
|
|
|
.macro SHIFT_RIGHT_IMMEDIATE rd, rs, imm
|
|
#if defined(__powerpc64__)
|
|
srdi \rd, \rs, \imm
|
|
#else
|
|
srwi \rd, \rs, \imm
|
|
#endif
|
|
.endm
|
|
|
|
.macro COMPARE_LOGICAL cr, ra, rb
|
|
#if defined(__powerpc64__)
|
|
cmpld \cr, \ra, \rb
|
|
#else
|
|
cmplw \cr, \ra, \rb
|
|
#endif
|
|
.endm
|
|
|
|
.macro CLEAR_RIGHT_IMMEDIATE rd, rs, imm
|
|
#if defined(__powerpc64__)
|
|
clrrdi \rd, \rs, \imm
|
|
#else
|
|
clrrwi \rd, \rs, \imm
|
|
#endif
|
|
.endm
|
|
|
|
#define LINKER_SYMBOL(sym) .extern sym
|
|
|
|
#endif /* ASM */
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
/** @} */
|
|
|
|
#endif /* __LIBCPU_POWERPC_UTILITY_H */
|