Moved PowerPC cache management code to libcpu. Also compiled

mpc8xx libcpu support for the first time and remove includes
of bsp.h, references to BSP_Configuration, and Cpu_table.  All
of these can be obtained directly from RTEMS now.
This commit is contained in:
Joel Sherrill
2000-06-14 15:52:24 +00:00
parent e4d7169f1c
commit 61bd030179
12 changed files with 55 additions and 510 deletions

View File

@@ -384,157 +384,6 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT" #error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif #endif
#ifndef ASM
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Some functions simply have not been implemented.
*/
#if defined(ppc603) /* And possibly others */
#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
/* Helpful macros */
#define PPC_Get_HID0( _value ) \
do { \
_value = 0; /* to avoid warnings */ \
asm volatile( \
"mfspr %0, 0x3f0;" /* get HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
#define PPC_Set_HID0( _value ) \
do { \
asm volatile( \
"isync;" \
"mtspr 0x3f0, %0;" /* load HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
static inline void _CPU_enable_data_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value |= 0x00004000; /* set DCE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_disable_data_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value &= 0xFFFFBFFF; /* clear DCE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_enable_inst_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value |= 0x00008000; /* Set ICE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_disable_inst_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value &= 0xFFFF7FFF; /* Clear ICE bit */
PPC_Set_HID0( value );
}
#elif ( defined(mpc860) || defined(mpc821) )
#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
#define isync __asm__ volatile ("isync\n"::)
static inline void _CPU_flush_1_data_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "dcbf 0,%0" :: "r" (__address) );
}
static inline void _CPU_invalidate_1_data_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "dcbi 0,%0" :: "r" (__address) );
}
static inline void _CPU_flush_entire_data_cache ( void ) {}
static inline void _CPU_invalidate_entire_data_cache ( void ) {}
static inline void _CPU_freeze_data_cache ( void ) {}
static inline void _CPU_unfreeze_data_cache ( void ) {}
static inline void _CPU_enable_data_cache (
void )
{
unsigned32 r1;
r1 = (0x2<<24);
mtspr( 568, r1 );
isync;
}
static inline void _CPU_disable_data_cache (
void )
{
unsigned32 r1;
r1 = (0x4<<24);
mtspr( 568, r1 );
isync;
}
static inline void _CPU_invalidate_1_inst_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "icbi 0,%0" :: "r" (__address) );
}
static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
static inline void _CPU_freeze_inst_cache ( void ) {}
static inline void _CPU_unfreeze_inst_cache ( void ) {}
static inline void _CPU_enable_inst_cache (
void )
{
unsigned32 r1;
r1 = (0x2<<24);
mtspr( 560, r1 );
isync;
}
static inline void _CPU_disable_inst_cache (
void )
{
unsigned32 r1;
r1 = (0x4<<24);
mtspr( 560, r1 );
isync;
}
#endif
#endif /* !ASM */
/* /*
* Unless otherwise specified, assume the model has an IP/EP bit to * Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix. * set the exception address prefix.

View File

@@ -384,157 +384,6 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT" #error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif #endif
#ifndef ASM
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Some functions simply have not been implemented.
*/
#if defined(ppc603) /* And possibly others */
#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
/* Helpful macros */
#define PPC_Get_HID0( _value ) \
do { \
_value = 0; /* to avoid warnings */ \
asm volatile( \
"mfspr %0, 0x3f0;" /* get HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
#define PPC_Set_HID0( _value ) \
do { \
asm volatile( \
"isync;" \
"mtspr 0x3f0, %0;" /* load HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
static inline void _CPU_enable_data_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value |= 0x00004000; /* set DCE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_disable_data_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value &= 0xFFFFBFFF; /* clear DCE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_enable_inst_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value |= 0x00008000; /* Set ICE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_disable_inst_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value &= 0xFFFF7FFF; /* Clear ICE bit */
PPC_Set_HID0( value );
}
#elif ( defined(mpc860) || defined(mpc821) )
#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
#define isync __asm__ volatile ("isync\n"::)
static inline void _CPU_flush_1_data_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "dcbf 0,%0" :: "r" (__address) );
}
static inline void _CPU_invalidate_1_data_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "dcbi 0,%0" :: "r" (__address) );
}
static inline void _CPU_flush_entire_data_cache ( void ) {}
static inline void _CPU_invalidate_entire_data_cache ( void ) {}
static inline void _CPU_freeze_data_cache ( void ) {}
static inline void _CPU_unfreeze_data_cache ( void ) {}
static inline void _CPU_enable_data_cache (
void )
{
unsigned32 r1;
r1 = (0x2<<24);
mtspr( 568, r1 );
isync;
}
static inline void _CPU_disable_data_cache (
void )
{
unsigned32 r1;
r1 = (0x4<<24);
mtspr( 568, r1 );
isync;
}
static inline void _CPU_invalidate_1_inst_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "icbi 0,%0" :: "r" (__address) );
}
static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
static inline void _CPU_freeze_inst_cache ( void ) {}
static inline void _CPU_unfreeze_inst_cache ( void ) {}
static inline void _CPU_enable_inst_cache (
void )
{
unsigned32 r1;
r1 = (0x2<<24);
mtspr( 560, r1 );
isync;
}
static inline void _CPU_disable_inst_cache (
void )
{
unsigned32 r1;
r1 = (0x4<<24);
mtspr( 560, r1 );
isync;
}
#endif
#endif /* !ASM */
/* /*
* Unless otherwise specified, assume the model has an IP/EP bit to * Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix. * set the exception address prefix.

View File

@@ -70,4 +70,6 @@ mpc6xx/mmu/Makefile
mpc6xx/timer/Makefile mpc6xx/timer/Makefile
mpc6xx/wrapup/Makefile mpc6xx/wrapup/Makefile
shared/Makefile shared/Makefile
shared/include/Makefile
shared/src/Makefile
wrapup/Makefile) wrapup/Makefile)

View File

@@ -36,15 +36,13 @@
* $Id$ * $Id$
*/ */
#include <bsp.h> #include <rtems.h>
#include <clockdrv.h> #include <clockdrv.h>
#include <rtems/libio.h> #include <rtems/libio.h>
#include <stdlib.h> /* for atexit() */ #include <stdlib.h> /* for atexit() */
#include <mpc8xx.h> #include <mpc8xx.h>
extern rtems_cpu_table Cpu_table; /* owned by BSP */
volatile rtems_unsigned32 Clock_driver_ticks; volatile rtems_unsigned32 Clock_driver_ticks;
extern volatile m8xx_t m8xx; extern volatile m8xx_t m8xx;
@@ -78,37 +76,35 @@ void Install_clock(rtems_isr_entry clock_isr)
Clock_driver_ticks = 0; Clock_driver_ticks = 0;
pit_value = (BSP_Configuration.microseconds_per_tick * pit_value = (rtems_configuration_get_microseconds_per_tick() *
Cpu_table.clicks_per_usec) - 1 ; rtems_cpu_configuration_get_clicks_per_usec()) - 1 ;
if (pit_value > 0xffff) { /* pit is only 16 bits long */ if (pit_value > 0xffff) { /* pit is only 16 bits long */
rtems_fatal_error_occurred(-1); rtems_fatal_error_occurred(-1);
} }
if (BSP_Configuration.ticks_per_timeslice) {
/*
/* * initialize the interval here
* initialize the interval here * First tick is set to right amount of time in the future
* First tick is set to right amount of time in the future * Future ticks will be incremented over last value set
* Future ticks will be incremented over last value set * in order to provide consistent clicks in the face of
* in order to provide consistent clicks in the face of * interrupt overhead
* interrupt overhead */
*/
rtems_interrupt_catch(clock_isr, PPC_IRQ_LVL0, &previous_isr);
rtems_interrupt_catch(clock_isr, PPC_IRQ_LVL0, &previous_isr);
m8xx.sccr &= ~(1<<24);
m8xx.sccr &= ~(1<<24); m8xx.pitc = pit_value;
m8xx.pitc = pit_value;
/* set PIT irq level, enable PIT, PIT interrupts */
/* set PIT irq level, enable PIT, PIT interrupts */ /* and clear int. status */
/* and clear int. status */ m8xx.piscr = M8xx_PISCR_PIRQ(0) |
m8xx.piscr = M8xx_PISCR_PIRQ(0) | M8xx_PISCR_PTE | M8xx_PISCR_PS | M8xx_PISCR_PIE;
M8xx_PISCR_PTE | M8xx_PISCR_PS | M8xx_PISCR_PIE;
#ifdef EPPCBUG_SMC1 #ifdef EPPCBUG_SMC1
simask_copy = m8xx.simask | M8xx_SIMASK_LVM0; simask_copy = m8xx.simask | M8xx_SIMASK_LVM0;
#endif /* EPPCBUG_SMC1 */ #endif /* EPPCBUG_SMC1 */
m8xx.simask |= M8xx_SIMASK_LVM0; m8xx.simask |= M8xx_SIMASK_LVM0;
}
atexit(Clock_exit); atexit(Clock_exit);
} }
@@ -133,12 +129,10 @@ ReInstall_clock(rtems_isr_entry new_clock_isr)
void void
Clock_exit(void) Clock_exit(void)
{ {
if ( BSP_Configuration.ticks_per_timeslice ) { /* disable PIT and PIT interrupts */
/* disable PIT and PIT interrupts */ m8xx.piscr &= ~(M8xx_PISCR_PTE | M8xx_PISCR_PIE);
m8xx.piscr &= ~(M8xx_PISCR_PTE | M8xx_PISCR_PIE);
(void) set_vector(0, PPC_IRQ_LVL0, 1);
(void) set_vector(0, PPC_IRQ_LVL0, 1);
}
} }
rtems_device_driver Clock_initialize( rtems_device_driver Clock_initialize(

View File

@@ -46,10 +46,11 @@
* $Id$ * $Id$
*/ */
#include <bsp.h> #include <rtems.h>
#include <rtems/libio.h> #include <rtems/libio.h>
#include <mpc8xx.h> #include <mpc8xx.h>
#include <mpc8xx/console.h> #include <mpc8xx/console.h>
#include <mpc8xx/cpm.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h> #include <unistd.h>
#include <termios.h> #include <termios.h>
@@ -399,7 +400,7 @@ m8xx_scc2_interrupt_handler (rtems_vector_number v)
/* Check that the buffer is ours */ /* Check that the buffer is ours */
if ((RxBd[SCC2_MINOR]->status & M8xx_BD_EMPTY) == 0) { if ((RxBd[SCC2_MINOR]->status & M8xx_BD_EMPTY) == 0) {
rtems_invalidate_multiple_data_cache_lines( rtems_invalidate_multiple_data_cache_lines(
RxBd[SCC2_MINOR]->buffer, (const void *) RxBd[SCC2_MINOR]->buffer,
RxBd[SCC2_MINOR]->length ); RxBd[SCC2_MINOR]->length );
nb_overflow = rtems_termios_enqueue_raw_characters( nb_overflow = rtems_termios_enqueue_raw_characters(
(void *)ttyp[SCC2_MINOR], (void *)ttyp[SCC2_MINOR],
@@ -442,7 +443,7 @@ m8xx_scc3_interrupt_handler (rtems_vector_number v)
/* Check that the buffer is ours */ /* Check that the buffer is ours */
if ((RxBd[SCC3_MINOR]->status & M8xx_BD_EMPTY) == 0) { if ((RxBd[SCC3_MINOR]->status & M8xx_BD_EMPTY) == 0) {
rtems_invalidate_multiple_data_cache_lines( rtems_invalidate_multiple_data_cache_lines(
RxBd[SCC3_MINOR]->buffer, (const void *) RxBd[SCC3_MINOR]->buffer,
RxBd[SCC3_MINOR]->length ); RxBd[SCC3_MINOR]->length );
nb_overflow = rtems_termios_enqueue_raw_characters( nb_overflow = rtems_termios_enqueue_raw_characters(
(void *)ttyp[SCC3_MINOR], (void *)ttyp[SCC3_MINOR],
@@ -484,7 +485,7 @@ m8xx_scc4_interrupt_handler (rtems_vector_number v)
/* Check that the buffer is ours */ /* Check that the buffer is ours */
if ((RxBd[SCC4_MINOR]->status & M8xx_BD_EMPTY) == 0) { if ((RxBd[SCC4_MINOR]->status & M8xx_BD_EMPTY) == 0) {
rtems_invalidate_multiple_data_cache_lines( rtems_invalidate_multiple_data_cache_lines(
RxBd[SCC4_MINOR]->buffer, (const void *) RxBd[SCC4_MINOR]->buffer,
RxBd[SCC4_MINOR]->length ); RxBd[SCC4_MINOR]->length );
nb_overflow = rtems_termios_enqueue_raw_characters( nb_overflow = rtems_termios_enqueue_raw_characters(
(void *)ttyp[SCC4_MINOR], (void *)ttyp[SCC4_MINOR],
@@ -526,7 +527,7 @@ m8xx_smc1_interrupt_handler (rtems_vector_number v)
/* Check that the buffer is ours */ /* Check that the buffer is ours */
if ((RxBd[SMC1_MINOR]->status & M8xx_BD_EMPTY) == 0) { if ((RxBd[SMC1_MINOR]->status & M8xx_BD_EMPTY) == 0) {
rtems_invalidate_multiple_data_cache_lines( rtems_invalidate_multiple_data_cache_lines(
RxBd[SMC1_MINOR]->buffer, (const void *) RxBd[SMC1_MINOR]->buffer,
RxBd[SMC1_MINOR]->length ); RxBd[SMC1_MINOR]->length );
nb_overflow = rtems_termios_enqueue_raw_characters( nb_overflow = rtems_termios_enqueue_raw_characters(
(void *)ttyp[SMC1_MINOR], (void *)ttyp[SMC1_MINOR],
@@ -568,7 +569,7 @@ m8xx_smc2_interrupt_handler (rtems_vector_number v)
/* Check that the buffer is ours */ /* Check that the buffer is ours */
if ((RxBd[SMC2_MINOR]->status & M8xx_BD_EMPTY) == 0) { if ((RxBd[SMC2_MINOR]->status & M8xx_BD_EMPTY) == 0) {
rtems_invalidate_multiple_data_cache_lines( rtems_invalidate_multiple_data_cache_lines(
RxBd[SMC2_MINOR]->buffer, (const void *) RxBd[SMC2_MINOR]->buffer,
RxBd[SMC2_MINOR]->length ); RxBd[SMC2_MINOR]->length );
nb_overflow = rtems_termios_enqueue_raw_characters( nb_overflow = rtems_termios_enqueue_raw_characters(
(void *)ttyp[SMC2_MINOR], (void *)ttyp[SMC2_MINOR],
@@ -983,7 +984,10 @@ m8xx_uart_pollRead(
if (RxBd[minor]->status & M8xx_BD_EMPTY) { if (RxBd[minor]->status & M8xx_BD_EMPTY) {
return -1; return -1;
} }
_CPU_Data_Cache_Block_Invalidate( RxBd[minor]->buffer ); rtems_invalidate_multiple_data_cache_lines(
(const void *) RxBd[minor]->buffer,
RxBd[minor]->length
);
c = ((char *)RxBd[minor]->buffer)[0]; c = ((char *)RxBd[minor]->buffer)[0];
RxBd[minor]->status = M8xx_BD_EMPTY | M8xx_BD_WRAP; RxBd[minor]->status = M8xx_BD_EMPTY | M8xx_BD_WRAP;
return c; return c;
@@ -1019,7 +1023,10 @@ m8xx_uart_pollWrite(
while (TxBd[minor]->status & M8xx_BD_READY) while (TxBd[minor]->status & M8xx_BD_READY)
continue; continue;
txBuf[minor] = *buf++; txBuf[minor] = *buf++;
_CPU_Data_Cache_Block_Flush( &txBuf[minor] ); rtems_flush_multiple_data_cache_lines(
(const void *) TxBd[minor]->buffer,
TxBd[minor]->length
);
TxBd[minor]->buffer = &txBuf[minor]; TxBd[minor]->buffer = &txBuf[minor];
TxBd[minor]->length = 1; TxBd[minor]->length = 1;
TxBd[minor]->status = M8xx_BD_READY | M8xx_BD_WRAP; TxBd[minor]->status = M8xx_BD_READY | M8xx_BD_WRAP;

View File

@@ -12,13 +12,14 @@
* Copyright (c) 1999, National Research Council of Canada * Copyright (c) 1999, National Research Council of Canada
*/ */
#include <bsp.h> #include <rtems.h>
#include <rtems/rtems/intr.h> #include <mpc8xx.h>
#include <rtems/error.h> #include <mpc8xx/cpm.h>
/* /*
* Send a command to the CPM RISC processer * Send a command to the CPM RISC processer
*/ */
void m8xx_cp_execute_cmd( unsigned16 command ) void m8xx_cp_execute_cmd( unsigned16 command )
{ {
rtems_unsigned16 lvl; rtems_unsigned16 lvl;

View File

@@ -13,9 +13,9 @@
* Copyright (c) 1999, National Research Council of Canada * Copyright (c) 1999, National Research Council of Canada
*/ */
#include <bsp.h> #include <rtems.h>
#include <rtems/rtems/intr.h> #include <mpc8xx.h>
#include <rtems/error.h> #include <mpc8xx/cpm.h>
/* /*
* Allocation order: * Allocation order:

View File

@@ -18,8 +18,6 @@
extern "C" { extern "C" {
#endif #endif
#include <bsp.h>
/* Functions */ /* Functions */
void m8xx_cp_execute_cmd( unsigned16 command ); void m8xx_cp_execute_cmd( unsigned16 command );

View File

@@ -18,8 +18,6 @@
extern "C" { extern "C" {
#endif #endif
#include <bsp.h>
/* /*
* The MMU_TLB_table is used to statically initialize the Table Lookaside * The MMU_TLB_table is used to statically initialize the Table Lookaside
* Buffers in the MMU of an MPC8xx. * Buffers in the MMU of an MPC8xx.

View File

@@ -11,7 +11,8 @@
* http://www.OARcorp.com/rtems/license.html. * http://www.OARcorp.com/rtems/license.html.
*/ */
#include <bsp.h> #include <rtems.h>
#include <mpc8xx.h>
#include <mpc8xx/mmu.h> #include <mpc8xx/mmu.h>
/* /*

View File

@@ -43,12 +43,9 @@
* $Id$ * $Id$
*/ */
#include <bsp.h>
#include <rtems.h> #include <rtems.h>
#include <mpc8xx.h> #include <mpc8xx.h>
extern rtems_cpu_table Cpu_table; /* owned by BSP */
static volatile rtems_unsigned32 Timer_starting; static volatile rtems_unsigned32 Timer_starting;
static rtems_boolean Timer_driver_Find_average_overhead; static rtems_boolean Timer_driver_Find_average_overhead;
@@ -86,10 +83,10 @@ int Read_timer(void)
return total; /* in XXX microsecond units */ return total; /* in XXX microsecond units */
else { else {
if ( total < Cpu_table.timer_least_valid ) { if ( total < rtems_cpu_configuration_get_timer_least_valid() ) {
return 0; /* below timer resolution */ return 0; /* below timer resolution */
} }
return (total - Cpu_table.timer_average_overhead); return (total - rtems_cpu_configuration_get_timer_average_overhead());
} }
} }

View File

@@ -384,157 +384,6 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT" #error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif #endif
#ifndef ASM
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Some functions simply have not been implemented.
*/
#if defined(ppc603) /* And possibly others */
#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
/* Helpful macros */
#define PPC_Get_HID0( _value ) \
do { \
_value = 0; /* to avoid warnings */ \
asm volatile( \
"mfspr %0, 0x3f0;" /* get HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
#define PPC_Set_HID0( _value ) \
do { \
asm volatile( \
"isync;" \
"mtspr 0x3f0, %0;" /* load HID0 */ \
"isync" \
: "=r" (_value) \
: "0" (_value) \
); \
} while (0)
static inline void _CPU_enable_data_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value |= 0x00004000; /* set DCE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_disable_data_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value &= 0xFFFFBFFF; /* clear DCE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_enable_inst_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value |= 0x00008000; /* Set ICE bit */
PPC_Set_HID0( value );
}
static inline void _CPU_disable_inst_cache (
void )
{
unsigned32 value;
PPC_Get_HID0( value );
value &= 0xFFFF7FFF; /* Clear ICE bit */
PPC_Set_HID0( value );
}
#elif ( defined(mpc860) || defined(mpc821) )
#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
#define isync __asm__ volatile ("isync\n"::)
static inline void _CPU_flush_1_data_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "dcbf 0,%0" :: "r" (__address) );
}
static inline void _CPU_invalidate_1_data_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "dcbi 0,%0" :: "r" (__address) );
}
static inline void _CPU_flush_entire_data_cache ( void ) {}
static inline void _CPU_invalidate_entire_data_cache ( void ) {}
static inline void _CPU_freeze_data_cache ( void ) {}
static inline void _CPU_unfreeze_data_cache ( void ) {}
static inline void _CPU_enable_data_cache (
void )
{
unsigned32 r1;
r1 = (0x2<<24);
mtspr( 568, r1 );
isync;
}
static inline void _CPU_disable_data_cache (
void )
{
unsigned32 r1;
r1 = (0x4<<24);
mtspr( 568, r1 );
isync;
}
static inline void _CPU_invalidate_1_inst_cache_line(
const void * _address )
{
register const void *__address = _address;
asm volatile ( "icbi 0,%0" :: "r" (__address) );
}
static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
static inline void _CPU_freeze_inst_cache ( void ) {}
static inline void _CPU_unfreeze_inst_cache ( void ) {}
static inline void _CPU_enable_inst_cache (
void )
{
unsigned32 r1;
r1 = (0x2<<24);
mtspr( 560, r1 );
isync;
}
static inline void _CPU_disable_inst_cache (
void )
{
unsigned32 r1;
r1 = (0x4<<24);
mtspr( 560, r1 );
isync;
}
#endif
#endif /* !ASM */
/* /*
* Unless otherwise specified, assume the model has an IP/EP bit to * Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix. * set the exception address prefix.