Moved i386 and m68k cache management code to libcpu. Everything

now is an implementation of the prototypes in rtems/rtems/cache.h.
The libcpu/i386/wrapup directory is no longer needed.
The PowerPC needs this done to it.
This commit is contained in:
Joel Sherrill
2000-06-13 21:53:38 +00:00
parent f0b11d63bc
commit cf1f72ea33
31 changed files with 607 additions and 1048 deletions

View File

@@ -419,26 +419,4 @@ void _free_r(
free( ptr ); free( ptr );
} }
/*
* rtems_cache_aligned_malloc
*
* DESCRIPTION:
*
* This function is used to allocate storage that spans an
* integral number of cache blocks.
*/
RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
size_t nbytes
)
{
/*
* Arrange to have the user storage start on the first cache
* block beyond the header.
*/
return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
+ _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
}
#endif #endif

View File

@@ -42,6 +42,7 @@ extern "C" {
#include <rtems/init.h> #include <rtems/init.h>
#include <rtems/rtems/tasks.h> #include <rtems/rtems/tasks.h>
#include <rtems/rtems/intr.h> #include <rtems/rtems/intr.h>
#include <rtems/rtems/cache.h>
#include <rtems/rtems/clock.h> #include <rtems/rtems/clock.h>
#include <rtems/extension.h> #include <rtems/extension.h>
#include <rtems/rtems/timer.h> #include <rtems/rtems/timer.h>

View File

@@ -1,140 +0,0 @@
/* cache.h
*
* Cache Manager
*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
*
* The functions in this file define the API to the RTEMS Cache Manager and
* are divided into data cache and instruction cache functions. Data cache
* functions are only declared if a data cache is supported. Instruction
* cache functions are only declared if an instruction cache is supported.
* Support for a particular cache exists only if _CPU_x_CACHE_ALIGNMENT is
* defined, where x E {DATA, INST}. These definitions are found in the CPU
* dependent source files in the supercore, often
*
* rtems/c/src/exec/score/cpu/CPU/rtems/score/CPU.h
*
* The functions below are implemented with CPU dependent inline routines
* also found in the above file. In the event that a CPU does not support a
* specific function, the CPU dependent routine does nothing (but does exist).
*
* At this point, the Cache Manager makes no considerations, and provides no
* support for BSP specific issues such as a secondary cache. In such a system,
* the CPU dependent routines would have to be modified, or a BSP layer added
* to this Manager.
*/
#ifndef __CACHE_h
#define __CACHE_h
#ifdef __cplusplus
extern "C" {
#endif
#include <rtems/system.h>
#include <sys/types.h>
/* THESE FUNCTIONS ONLY EXIST IF WE HAVE A DATA CACHE */
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
/*
* This function is called to flush the data cache by performing cache
* copybacks. It must determine how many cache lines need to be copied
* back and then perform the copybacks.
*/
void rtems_flush_multiple_data_cache_lines( const void *, size_t );
/*
* This function is responsible for performing a data cache invalidate.
* It must determine how many cache lines need to be invalidated and then
* perform the invalidations.
*/
void rtems_invalidate_multiple_data_cache_lines( const void *, size_t );
/*
* This function is responsible for performing a data cache flush.
* It flushes the entire cache.
*/
void rtems_flush_entire_data_cache( void );
/*
* This function is responsible for performing a data cache
* invalidate. It invalidates the entire cache.
*/
void rtems_invalidate_entire_data_cache( void );
/*
* This function returns the data cache granularity.
*/
int rtems_get_data_cache_line_size( void );
/*
* This function freezes the data cache.
*/
void rtems_freeze_data_cache( void );
/*
* This function unfreezes the data cache.
*/
void rtems_unfreeze_data_cache( void );
/*
* These functions enable/disable the data cache.
*/
void rtems_enable_data_cache( void );
void rtems_disable_data_cache( void );
#endif
/* THESE FUNCTIONS ONLY EXIST IF WE HAVE AN INSTRUCTION CACHE */
#if defined(_CPU_INST_CACHE_ALIGNMENT)
/*
* This function is responsible for performing an instruction cache
* invalidate. It must determine how many cache lines need to be invalidated
* and then perform the invalidations.
*/
void rtems_invalidate_multiple_inst_cache_lines( const void *, size_t );
/*
* This function is responsible for performing an instruction cache
* invalidate. It invalidates the entire cache.
*/
void rtems_invalidate_entire_inst_cache( void );
/*
* This function returns the instruction cache granularity.
*/
int rtems_get_inst_cache_line_size( void );
/*
* This function freezes the instruction cache.
*/
void rtems_freeze_inst_cache( void );
/*
* This function unfreezes the instruction cache.
*/
void rtems_unfreeze_inst_cache( void );
/*
* These functions enable/disable the instruction cache.
*/
void rtems_enable_inst_cache( void );
void rtems_disable_inst_cache( void );
#endif
#ifdef __cplusplus
}
#endif
#endif
/* end of include file */

View File

@@ -50,7 +50,7 @@ PARTITION_C_FILES = part.c partcreate.c partdelete.c partgetbuffer.c \
DPMEM_C_FILES = dpmem.c dpmemcreate.c dpmemdelete.c dpmemexternal2internal.c \ DPMEM_C_FILES = dpmem.c dpmemcreate.c dpmemdelete.c dpmemexternal2internal.c \
dpmemident.c dpmeminternal2external.c dpmemident.c dpmeminternal2external.c
STD_C_FILES = attr.c cache.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \ STD_C_FILES = attr.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
$(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \ $(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \
$(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \ $(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \
$(PARTITION_C_FILES) $(REGION_C_FILES) $(DPMEM_C_FILES) $(PARTITION_C_FILES) $(REGION_C_FILES) $(DPMEM_C_FILES)

View File

@@ -185,101 +185,6 @@ static inline void i386_set_cr3(unsigned int segment)
asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) ); asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) );
} }
/*
* Disable the entire cache
*/
void _CPU_disable_cache() {
cr0 regCr0;
regCr0.i = i386_get_cr0();
regCr0.cr0.page_level_cache_disable = 1;
regCr0.cr0.no_write_through = 1;
i386_set_cr0( regCr0.i );
rtems_flush_entire_data_cache();
}
/*
* Enable the entire cache
*/
static inline void _CPU_enable_cache() {
cr0 regCr0;
regCr0.i = i386_get_cr0();
regCr0.cr0.page_level_cache_disable = 0;
regCr0.cr0.no_write_through = 0;
i386_set_cr0( regCr0.i );
/*rtems_flush_entire_data_cache();*/
}
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
* each CPU. The routines below should be implemented per CPU,
* to accomodate the capabilities of each.
*/
/* FIXME: I don't belong here. */
#define I386_CACHE_ALIGNMENT 16
#if defined(I386_CACHE_ALIGNMENT)
#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
static inline void _CPU_freeze_data_cache (void) {}
static inline void _CPU_unfreeze_data_cache (void) {}
static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
static inline void _CPU_freeze_inst_cache (void) {}
static inline void _CPU_unfreeze_inst_cache (void) {}
static inline void _CPU_flush_entire_data_cache (
const void * d_addr )
{
asm ("wbinvd");
}
static inline void _CPU_invalidate_entire_data_cache (
const void * d_addr )
{
asm ("invd");
}
static inline void _CPU_enable_data_cache (
void )
{
_CPU_enable_cache();
}
static inline void _CPU_disable_data_cache (
void )
{
_CPU_disable_cache();
}
static inline void _CPU_invalidate_entire_inst_cache (
const void * i_addr )
{
asm ("invd");
}
static inline void _CPU_enable_inst_cache (
void )
{
_CPU_enable_cache();
}
static inline void _CPU_disable_inst_cache (
void )
{
_CPU_disable_cache();
}
#endif
/* routines */ /* routines */
/* /*

View File

@@ -373,217 +373,6 @@ static inline void * _CPU_virtual_to_physical (
} }
/*
* Since the cacr is common to all mc680x0, provide macros
* for masking values in that register.
*/
/*
* Used to clear bits in the cacr.
*/
#define _CPU_CACR_AND(mask) \
{ \
register unsigned long _value = mask; \
register unsigned long _ctl = 0; \
asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
andl %2, %0; /* and with _val */ \
movec %1, %%cacr" /* write the cacr */ \
: "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
/*
* Used to set bits in the cacr.
*/
#define _CPU_CACR_OR(mask) \
{ \
register unsigned long _value = mask; \
register unsigned long _ctl = 0; \
asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
orl %2, %0; /* or with _val */ \
movec %1, %%cacr" /* write the cacr */ \
: "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*/
#if ( defined(__mc68020__) || defined(__mc68030__) )
#define M68K_INST_CACHE_ALIGNMENT 16
#if defined(__mc68030__)
#define M68K_DATA_CACHE_ALIGNMENT 16
/* Only the mc68030 has a data cache; it is writethrough only. */
static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
static inline void _CPU_invalidate_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
_CPU_CACR_OR(0x00000400);
}
static inline void _CPU_invalidate_entire_data_cache (
void )
{
_CPU_CACR_OR( 0x00000800 );
}
static inline void _CPU_freeze_data_cache (
void )
{
_CPU_CACR_OR( 0x00000200 );
}
static inline void _CPU_unfreeze_data_cache (
void )
{
_CPU_CACR_AND( 0xFFFFFDFF );
}
static inline void _CPU_enable_data_cache ( void )
{
_CPU_CACR_OR( 0x00000100 );
}
static inline void _CPU_disable_data_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFEFF );
}
#endif
/* Both the 68020 and 68030 have instruction caches */
static inline void _CPU_invalidate_1_inst_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
_CPU_CACR_OR( 0x00000004 );
}
static inline void _CPU_invalidate_entire_inst_cache (
void )
{
_CPU_CACR_OR( 0x00000008 );
}
static inline void _CPU_freeze_inst_cache (
void )
{
_CPU_CACR_OR( 0x00000002);
}
static inline void _CPU_unfreeze_inst_cache (
void )
{
_CPU_CACR_AND( 0xFFFFFFFD );
}
static inline void _CPU_enable_inst_cache ( void )
{
_CPU_CACR_OR( 0x00000001 );
}
static inline void _CPU_disable_inst_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFFFE );
}
#elif ( defined(__mc68040__) || defined (__mc68060__) )
#define M68K_INST_CACHE_ALIGNMENT 16
#define M68K_DATA_CACHE_ALIGNMENT 16
/* Cannot be frozen */
static inline void _CPU_freeze_data_cache ( void ) {}
static inline void _CPU_unfreeze_data_cache ( void ) {}
static inline void _CPU_freeze_inst_cache ( void ) {}
static inline void _CPU_unfreeze_inst_cache ( void ) {}
static inline void _CPU_flush_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
}
static inline void _CPU_invalidate_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
}
static inline void _CPU_flush_entire_data_cache (
void )
{
asm volatile ( "cpusha %%dc" :: );
}
static inline void _CPU_invalidate_entire_data_cache (
void )
{
asm volatile ( "cinva %%dc" :: );
}
static inline void _CPU_enable_data_cache (
void )
{
_CPU_CACR_OR( 0x80000000 );
}
static inline void _CPU_disable_data_cache (
void )
{
_CPU_CACR_AND( 0x7FFFFFFF );
}
static inline void _CPU_invalidate_1_inst_cache_line (
const void * i_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( i_addr );
asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
}
static inline void _CPU_invalidate_entire_inst_cache (
void )
{
asm volatile ( "cinva %%ic" :: );
}
static inline void _CPU_enable_inst_cache (
void )
{
_CPU_CACR_OR( 0x00008000 );
}
static inline void _CPU_disable_inst_cache (
void )
{
_CPU_CACR_AND( 0xFFFF7FFF );
}
#endif
#if defined(M68K_DATA_CACHE_ALIGNMENT)
#define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
#endif
#if defined(M68K_INST_CACHE_ALIGNMENT)
#define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
#endif
#endif /* !ASM */ #endif /* !ASM */
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -13,7 +13,8 @@ endif
BSP_FILES = startup clock console timer $(NETWORK) BSP_FILES = startup clock console timer $(NETWORK)
# bummer; have to use $foreach since % pattern subst rules only replace 1x # bummer; have to use $foreach since % pattern subst rules only replace 1x
OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
$(wildcard ../../../../libcpu/i386/$(ARCH)/*.o)
LIB = $(ARCH)/libbsp.a LIB = $(ARCH)/libbsp.a
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg

View File

@@ -13,7 +13,8 @@ endif
BSP_FILES = startup clock console timer $(NETWORK) BSP_FILES = startup clock console timer $(NETWORK)
# bummer; have to use $foreach since % pattern subst rules only replace 1x # bummer; have to use $foreach since % pattern subst rules only replace 1x
OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
$(wildcard ../../../../libcpu/i386/$(ARCH)/*.o)
LIB = $(ARCH)/libbsp.a LIB = $(ARCH)/libbsp.a
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg

View File

@@ -13,7 +13,8 @@ endif
BSP_FILES = startup clock console timer $(NETWORK) BSP_FILES = startup clock console timer $(NETWORK)
# bummer; have to use $foreach since % pattern subst rules only replace 1x # bummer; have to use $foreach since % pattern subst rules only replace 1x
OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
$(wildcard ../../../../libcpu/i386/$(ARCH)/*.o)
LIB = $(ARCH)/libbsp.a LIB = $(ARCH)/libbsp.a
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg

View File

@@ -13,6 +13,7 @@ BSP_PIECES = clock console fatal startup timer $(NETWORKING_DRIVER)
# bummer; have to use $foreach since % pattern subst rules only replace 1x # bummer; have to use $foreach since % pattern subst rules only replace 1x
OBJS = $(foreach piece, $(BSP_PIECES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \ OBJS = $(foreach piece, $(BSP_PIECES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
$(wildcard ../../../../libcpu/$(RTEMS_CPU)/shared/*/$(ARCH)/*.o) \
$(wildcard ../../../../libcpu/$(RTEMS_CPU)/$(RTEMS_CPU_MODEL)/fpsp/$(ARCH)/fpsp.rel) $(wildcard ../../../../libcpu/$(RTEMS_CPU)/$(RTEMS_CPU_MODEL)/fpsp/$(ARCH)/fpsp.rel)
LIB = $(ARCH)/libbsp.a LIB = $(ARCH)/libbsp.a

View File

@@ -419,26 +419,4 @@ void _free_r(
free( ptr ); free( ptr );
} }
/*
* rtems_cache_aligned_malloc
*
* DESCRIPTION:
*
* This function is used to allocate storage that spans an
* integral number of cache blocks.
*/
RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
size_t nbytes
)
{
/*
* Arrange to have the user storage start on the first cache
* block beyond the header.
*/
return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
+ _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
}
#endif #endif

View File

@@ -5,15 +5,15 @@
AUTOMAKE_OPTIONS = foreign 1.4 AUTOMAKE_OPTIONS = foreign 1.4
ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal
LIBNAME = libcpu VPATH = @srcdir@:@srcdir@/../shared/src
LIB = $(ARCH)/$(LIBNAME).a
C_FILES = cpu.c displayCpu.c page.c C_FILES = cache.c cache_aligned_malloc.c cache_manager.c displayCpu.c idt.c page.c
C_O_FILES = $(C_FILES:%.c=$(ARCH)/%.o) C_O_FILES = $(C_FILES:%.c=$(ARCH)/%.o)
H_FILES = cpu.h registers.h cpuModel.h H_FILES = cache_.h
INSTALLED_H_FILES = cpu.h registers.h cpuModel.h
S_FILES = cpu_asm.S cpuModel.S S_FILES = cpuModel.S idtr.S
S_O_FILES = $(S_FILES:%.S=$(ARCH)/%.o) S_O_FILES = $(S_FILES:%.S=$(ARCH)/%.o)
OBJS = $(C_O_FILES) $(S_O_FILES) OBJS = $(C_O_FILES) $(S_O_FILES)
@@ -21,8 +21,7 @@ OBJS = $(C_O_FILES) $(S_O_FILES)
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
include $(top_srcdir)/../../../../../automake/lib.am include $(top_srcdir)/../../../../../automake/lib.am
$(LIB): $(OBJS) AM_CPPFLAGS += -I$(srcdir)
$(make-library)
$(PROJECT_INCLUDE)/libcpu: $(PROJECT_INCLUDE)/libcpu:
$(mkinstalldirs) $@ $(mkinstalldirs) $@
@@ -30,19 +29,16 @@ $(PROJECT_INCLUDE)/libcpu:
$(PROJECT_INCLUDE)/libcpu/%.h: %.h $(PROJECT_INCLUDE)/libcpu/%.h: %.h
$(INSTALL_DATA) $< $@ $(INSTALL_DATA) $< $@
$(PROJECT_RELEASE)/lib/$(LIBNAME)$(LIB_VARIANT).a: $(LIB) $(PROJECT_INCLUDE)/libcpu/cache.h: $(top_srcdir)/../shared/include/cache.h
$(INSTALL_DATA) $< $@ $(INSTALL_DATA) $< $@
PREINSTALL_FILES += $(PROJECT_INCLUDE)/libcpu \ PREINSTALL_FILES += $(PROJECT_INCLUDE)/libcpu \
$(H_FILES:%=$(PROJECT_INCLUDE)/libcpu/%) $(PROJECT_INCLUDE)/libcpu/cache.h \
$(INSTALLED_H_FILES:%=$(PROJECT_INCLUDE)/libcpu/%)
TMPINSTALL_FILES += $(PROJECT_RELEASE)/lib/$(LIBNAME)$(LIB_VARIANT).a all-local: $(ARCH) $(PREINSTALL_FILES) $(OBJS)
all-local: $(ARCH) $(PREINSTALL_FILES) $(OBJS) $(LIB) $(TMPINSTALL_FILES) EXTRA_DIST = cache.c cache_.h cpu.h cpuModel.S cpuModel.h \
displayCpu.c idt.c idtr.S page.c registers.h
.PRECIOUS: $(LIB)
EXTRA_DIST = cpu.c cpu.h cpuModel.S cpuModel.h cpu_asm.S displayCpu.c page.c \
registers.h
include $(top_srcdir)/../../../../../automake/local.am include $(top_srcdir)/../../../../../automake/local.am

View File

@@ -0,0 +1,99 @@
/*
* Cache Management Support Routines for the i386
*
* $Id$
*/
#include <rtems.h>
#include <libcpu/registers.h>
#include "cache_.h"
void _CPU_disable_cache() {
cr0 regCr0;
regCr0.i = i386_get_cr0();
regCr0.cr0.page_level_cache_disable = 1;
regCr0.cr0.no_write_through = 1;
i386_set_cr0( regCr0.i );
rtems_flush_entire_data_cache();
}
/*
* Enable the entire cache
*/
void _CPU_enable_cache() {
cr0 regCr0;
regCr0.i = i386_get_cr0();
regCr0.cr0.page_level_cache_disable = 0;
regCr0.cr0.no_write_through = 0;
i386_set_cr0( regCr0.i );
/*rtems_flush_entire_data_cache();*/
}
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
* each CPU. The routines below should be implemented per CPU,
* to accomodate the capabilities of each.
*/
/* FIXME: I don't belong here. */
#define I386_CACHE_ALIGNMENT 16
#if defined(I386_CACHE_ALIGNMENT)
#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
void _CPU_flush_1_data_cache_line(const void *d_addr) {}
void _CPU_invalidate_1_data_cache_line(const void *d_addr) {}
void _CPU_freeze_data_cache(void) {}
void _CPU_unfreeze_data_cache(void) {}
void _CPU_invalidate_1_inst_cache_line ( const void *d_addr ) {}
void _CPU_freeze_inst_cache(void) {}
void _CPU_unfreeze_inst_cache(void) {}
void _CPU_flush_entire_data_cache(
const void * d_addr
)
{
asm volatile ("wbinvd");
}
void _CPU_invalidate_entire_data_cache(
const void * d_addr
)
{
asm volatile ("invd");
}
void _CPU_enable_data_cache(void)
{
_CPU_enable_cache();
}
void _CPU_disable_data_cache(void)
{
_CPU_disable_cache();
}
void _CPU_invalidate_entire_inst_cache(void)
{
asm volatile ("invd");
}
void _CPU_enable_inst_cache(void)
{
_CPU_enable_cache();
}
void _CPU_disable_inst_cache( void )
{
_CPU_disable_cache();
}
#endif

View File

@@ -0,0 +1,16 @@
/*
* i386 Cache Manager Wrapper
*/
#ifndef __i386_CACHE_h
#define __i386_CACHE_h
#define I386_CACHE_ALIGNMENT 16
#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
#include <libcpu/cache_.h>
#endif
/* end of include file */

View File

@@ -5,7 +5,15 @@
AUTOMAKE_OPTIONS = foreign 1.4 AUTOMAKE_OPTIONS = foreign 1.4
ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal
SUBDIRS = m68040 if shared
SHARED_LIB = shared
endif
if m68040
CPU_SUBDIR = m68040
endif
SUBDIRS = $(SHARED_LIB) $(CPU_SUBDIR)
include $(top_srcdir)/../../../../../automake/subdirs.am include $(top_srcdir)/../../../../../automake/subdirs.am
include $(top_srcdir)/../../../../../automake/local.am include $(top_srcdir)/../../../../../automake/local.am

View File

@@ -26,10 +26,19 @@ RTEMS_CANONICALIZE_TOOLS
RTEMS_CHECK_CUSTOM_BSP(RTEMS_BSP) RTEMS_CHECK_CUSTOM_BSP(RTEMS_BSP)
RTEMS_CHECK_BSP_CACHE(RTEMS_BSP) RTEMS_CHECK_BSP_CACHE(RTEMS_BSP)
AM_CONDITIONAL(shared, test "$RTEMS_CPU_MODEL" = "m68020" \
|| test "$RTEMS_CPU_MODEL" = "m68020" \
|| test "$RTEMS_CPU_MODEL" = "m68030" \
|| test "$RTEMS_CPU_MODEL" = "m68lc040" \
|| test "$RTEMS_CPU_MODEL" = "m68040" \
|| test "$RTEMS_CPU_MODEL" = "m68060" )
AM_CONDITIONAL(m68040, test "$RTEMS_CPU_MODEL" = "m68040") AM_CONDITIONAL(m68040, test "$RTEMS_CPU_MODEL" = "m68040")
# Explicitly list all Makefiles here # Explicitly list all Makefiles here
AC_OUTPUT( AC_OUTPUT(
Makefile Makefile
shared/Makefile
shared/cache/Makefile
m68040/Makefile m68040/Makefile
m68040/fpsp/Makefile) m68040/fpsp/Makefile)

View File

@@ -0,0 +1,10 @@
##
## $Id$
##
AUTOMAKE_OPTIONS = foreign 1.4
SUBDIRS = cache
include $(top_srcdir)/../../../../../automake/subdirs.am
include $(top_srcdir)/../../../../../automake/local.am

View File

@@ -0,0 +1,38 @@
##
## $Id$
##
AUTOMAKE_OPTIONS = foreign 1.4
ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal
VPATH = @srcdir@:@srcdir@/../../../shared/src
C_FILES = cache.c cache_aligned_malloc.c cache_manager.c
C_O_FILES = $(C_FILES:%.c=$(ARCH)/%.o)
H_FILES = cache_.h
INSTALLED_H_FILES =
OBJS = $(C_O_FILES)
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
include $(top_srcdir)/../../../../../automake/lib.am
AM_CPPFLAGS += -I$(srcdir)
$(PROJECT_INCLUDE)/libcpu:
$(mkinstalldirs) $@
$(PROJECT_INCLUDE)/libcpu/%.h: %.h
$(INSTALL_DATA) $< $@
$(PROJECT_INCLUDE)/libcpu/cache.h: $(top_srcdir)/../shared/include/cache.h
$(INSTALL_DATA) $< $@
PREINSTALL_FILES += $(PROJECT_INCLUDE)/libcpu $(PROJECT_INCLUDE)/libcpu/cache.h
all-local: $(ARCH) $(PREINSTALL_FILES) $(OBJS)
EXTRA_DIST = cache.c cache_.h
include $(top_srcdir)/../../../../../automake/local.am

View File

@@ -0,0 +1,192 @@
/*
* Cache Management Support Routines for the MC68040
*
* $Id$
*/
#include <rtems.h>
#include "cache_.h"
/*
* Since the cacr is common to all mc680x0, provide macros
* for masking values in that register.
*/
/*
* Used to clear bits in the cacr.
*/
#define _CPU_CACR_AND(mask) \
{ \
register unsigned long _value = mask; \
register unsigned long _ctl = 0; \
asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
andl %2, %0; /* and with _val */ \
movec %1, %%cacr" /* write the cacr */ \
: "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
/*
* Used to set bits in the cacr.
*/
#define _CPU_CACR_OR(mask) \
{ \
register unsigned long _value = mask; \
register unsigned long _ctl = 0; \
asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
orl %2, %0; /* or with _val */ \
movec %1, %%cacr" /* write the cacr */ \
: "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*/
#if ( defined(__mc68020__) || defined(__mc68030__) )
#if defined(__mc68030__)
/* Only the mc68030 has a data cache; it is writethrough only. */
void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
void _CPU_invalidate_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
_CPU_CACR_OR(0x00000400);
}
void _CPU_invalidate_entire_data_cache ( void )
{
_CPU_CACR_OR( 0x00000800 );
}
void _CPU_freeze_data_cache ( void )
{
_CPU_CACR_OR( 0x00000200 );
}
void _CPU_unfreeze_data_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFDFF );
}
void _CPU_enable_data_cache ( void )
{
_CPU_CACR_OR( 0x00000100 );
}
void _CPU_disable_data_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFEFF );
}
#endif
/* Both the 68020 and 68030 have instruction caches */
void _CPU_invalidate_1_inst_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
_CPU_CACR_OR( 0x00000004 );
}
void _CPU_invalidate_entire_inst_cache ( void )
{
_CPU_CACR_OR( 0x00000008 );
}
void _CPU_freeze_inst_cache ( void )
{
_CPU_CACR_OR( 0x00000002);
}
void _CPU_unfreeze_inst_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFFFD );
}
void _CPU_enable_inst_cache ( void )
{
_CPU_CACR_OR( 0x00000001 );
}
void _CPU_disable_inst_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFFFE );
}
#elif ( defined(__mc68040__) || defined (__mc68060__) )
/* Cannot be frozen */
void _CPU_freeze_data_cache ( void ) {}
void _CPU_unfreeze_data_cache ( void ) {}
void _CPU_freeze_inst_cache ( void ) {}
void _CPU_unfreeze_inst_cache ( void ) {}
void _CPU_flush_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
}
void _CPU_invalidate_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
}
void _CPU_flush_entire_data_cache ( void )
{
asm volatile ( "cpusha %%dc" :: );
}
void _CPU_invalidate_entire_data_cache ( void )
{
asm volatile ( "cinva %%dc" :: );
}
void _CPU_enable_data_cache ( void )
{
_CPU_CACR_OR( 0x80000000 );
}
void _CPU_disable_data_cache ( void )
{
_CPU_CACR_AND( 0x7FFFFFFF );
}
void _CPU_invalidate_1_inst_cache_line (
const void * i_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( i_addr );
asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
}
void _CPU_invalidate_entire_inst_cache ( void )
{
asm volatile ( "cinva %%ic" :: );
}
void _CPU_enable_inst_cache ( void )
{
_CPU_CACR_OR( 0x00008000 );
}
void _CPU_disable_inst_cache ( void )
{
_CPU_CACR_AND( 0xFFFF7FFF );
}
#endif
/* end of file */

View File

@@ -0,0 +1,29 @@
/*
* M68K Cache Manager Support
*/
#ifndef __M68K_CACHE_h
#define __M68K_CACHE_h
#if defined(__mc68020__)
#define M68K_INST_CACHE_ALIGNMENT 16
#elif defined(__mc68030__)
#define M68K_INST_CACHE_ALIGNMENT 16
#define M68K_DATA_CACHE_ALIGNMENT 16
#elif ( defined(__mc68040__) || defined (__mc68060__) )
#define M68K_INST_CACHE_ALIGNMENT 16
#define M68K_DATA_CACHE_ALIGNMENT 16
#endif
#if defined(M68K_DATA_CACHE_ALIGNMENT)
#define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
#endif
#if defined(M68K_INST_CACHE_ALIGNMENT)
#define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
#endif
#include <libcpu/cache.h>
#endif
/* end of include file */

View File

@@ -0,0 +1,32 @@
/*
* libcpu Cache Manager Support
*
* $Id$
*/
#ifndef __LIBCPU_CACHE_h
#define __LIBCPU_CACHE_h
#include <sys/types.h>
void _CPU_disable_cache();
void _CPU_enable_cache();
void _CPU_flush_1_data_cache_line(const void *d_addr);
void _CPU_invalidate_1_data_cache_line(const void *d_addr);
void _CPU_freeze_data_cache(void);
void _CPU_unfreeze_data_cache(void);
void _CPU_invalidate_1_inst_cache_line(const void *d_addr);
void _CPU_freeze_inst_cache(void);
void _CPU_unfreeze_inst_cache(void);
void _CPU_flush_entire_data_cache(void);
void _CPU_invalidate_entire_data_cache(void);
void _CPU_enable_data_cache(void);
void _CPU_disable_data_cache(void);
void _CPU_invalidate_entire_inst_cache(void);
void _CPU_enable_inst_cache(void);
void _CPU_disable_inst_cache(void);
#endif
/* end of include file */

View File

@@ -0,0 +1,43 @@
/*
* RTEMS Cache Aligned Malloc
*
*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#include <rtems.h>
#include <cache_.h>
/*
* rtems_cache_aligned_malloc
*
* DESCRIPTION:
*
* This function is used to allocate storage that spans an
* integral number of cache blocks.
*/
void *rtems_cache_aligned_malloc (
size_t nbytes
)
{
/*
* Arrange to have the user storage start on the first cache
* block beyond the header.
*/
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
return (void *) ((((unsigned long)
malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
+ _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
#else
return malloc( nbytes );
#endif
}

View File

@@ -1,5 +1,4 @@
/* cache.c /*
*
* Cache Manager * Cache Manager
* *
* COPYRIGHT (c) 1989-1999. * COPYRIGHT (c) 1989-1999.
@@ -10,7 +9,7 @@
* http://www.OARcorp.com/rtems/license.html. * http://www.OARcorp.com/rtems/license.html.
* *
* *
* The functions in this file define the API to the RTEMS Cache Manager and * The functions in this file implement the API to the RTEMS Cache Manager and
* are divided into data cache and instruction cache functions. Data cache * are divided into data cache and instruction cache functions. Data cache
* functions are only declared if a data cache is supported. Instruction * functions are only declared if a data cache is supported. Instruction
* cache functions are only declared if an instruction cache is supported. * cache functions are only declared if an instruction cache is supported.
@@ -30,15 +29,15 @@
* to this Manager. * to this Manager.
*/ */
#include <rtems/system.h> #include <rtems.h>
#include <sys/types.h> #include <sys/types.h>
#include <rtems/rtems/cache.h> #include <libcpu/cache.h>
#include "cache_.h"
/* /*
* THESE FUNCTIONS ONLY EXIST IF WE HAVE A DATA CACHE * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
*/ */
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
/* /*
* This function is called to flush the data cache by performing cache * This function is called to flush the data cache by performing cache
@@ -48,18 +47,22 @@
void void
rtems_flush_multiple_data_cache_lines( const void * d_addr, size_t n_bytes ) rtems_flush_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
const void * final_address; const void * final_address;
/* /*
* Set d_addr to the beginning of the cache line; final_address indicates * Set d_addr to the beginning of the cache line; final_address indicates
* the last address_t which needs to be pushed. Increment d_addr and push * the last address_t which needs to be pushed. Increment d_addr and push
* the resulting line until final_address is passed. * the resulting line until final_address is passed.
*/ */
final_address = (void *)((size_t)d_addr + n_bytes - 1); final_address = (void *)((size_t)d_addr + n_bytes - 1);
d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1)); d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
while( d_addr <= final_address ) { while( d_addr <= final_address ) {
_CPU_flush_1_data_cache_line( d_addr ); _CPU_flush_1_data_cache_line( d_addr );
d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT); d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
} }
#endif
} }
@@ -68,21 +71,26 @@ rtems_flush_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
* It must determine how many cache lines need to be invalidated and then * It must determine how many cache lines need to be invalidated and then
* perform the invalidations. * perform the invalidations.
*/ */
void void
rtems_invalidate_multiple_data_cache_lines( const void * d_addr, size_t n_bytes ) rtems_invalidate_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
const void * final_address; const void * final_address;
/* /*
* Set d_addr to the beginning of the cache line; final_address indicates * Set d_addr to the beginning of the cache line; final_address indicates
* the last address_t which needs to be invalidated. Increment d_addr and * the last address_t which needs to be invalidated. Increment d_addr and
* invalidate the resulting line until final_address is passed. * invalidate the resulting line until final_address is passed.
*/ */
final_address = (void *)((size_t)d_addr + n_bytes - 1); final_address = (void *)((size_t)d_addr + n_bytes - 1);
d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1)); d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
while( final_address > d_addr ) { while( final_address > d_addr ) {
_CPU_invalidate_1_data_cache_line( d_addr ); _CPU_invalidate_1_data_cache_line( d_addr );
d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT); d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
} }
#endif
} }
@@ -93,11 +101,12 @@ rtems_invalidate_multiple_data_cache_lines( const void * d_addr, size_t n_bytes
void void
rtems_flush_entire_data_cache( void ) rtems_flush_entire_data_cache( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
/* /*
* Call the CPU-specific routine * Call the CPU-specific routine
*/ */
_CPU_flush_entire_data_cache(); _CPU_flush_entire_data_cache();
#endif
} }
@@ -108,10 +117,13 @@ rtems_flush_entire_data_cache( void )
void void
rtems_invalidate_entire_data_cache( void ) rtems_invalidate_entire_data_cache( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
/* /*
* Call the CPU-specific routine * Call the CPU-specific routine
*/ */
_CPU_invalidate_entire_data_cache(); _CPU_invalidate_entire_data_cache();
#endif
} }
@@ -121,7 +133,11 @@ rtems_invalidate_entire_data_cache( void )
int int
rtems_get_data_cache_line_size( void ) rtems_get_data_cache_line_size( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
return _CPU_DATA_CACHE_ALIGNMENT; return _CPU_DATA_CACHE_ALIGNMENT;
#else
return 0;
#endif
} }
@@ -132,7 +148,9 @@ rtems_get_data_cache_line_size( void )
void void
rtems_freeze_data_cache( void ) rtems_freeze_data_cache( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
_CPU_freeze_data_cache(); _CPU_freeze_data_cache();
#endif
} }
@@ -141,7 +159,9 @@ rtems_freeze_data_cache( void )
*/ */
void rtems_unfreeze_data_cache( void ) void rtems_unfreeze_data_cache( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
_CPU_unfreeze_data_cache(); _CPU_unfreeze_data_cache();
#endif
} }
@@ -149,7 +169,9 @@ void rtems_unfreeze_data_cache( void )
void void
rtems_enable_data_cache( void ) rtems_enable_data_cache( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
_CPU_enable_data_cache(); _CPU_enable_data_cache();
#endif
} }
@@ -157,16 +179,16 @@ rtems_enable_data_cache( void )
void void
rtems_disable_data_cache( void ) rtems_disable_data_cache( void )
{ {
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
_CPU_disable_data_cache(); _CPU_disable_data_cache();
}
#endif #endif
}
/* /*
* THESE FUNCTIONS ONLY EXIST IF WE HAVE AN INSTRUCTION CACHE * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
*/ */
#if defined(_CPU_INST_CACHE_ALIGNMENT)
/* /*
* This function is responsible for performing an instruction cache * This function is responsible for performing an instruction cache
@@ -176,18 +198,22 @@ rtems_disable_data_cache( void )
void void
rtems_invalidate_multiple_inst_cache_lines( const void * i_addr, size_t n_bytes ) rtems_invalidate_multiple_inst_cache_lines( const void * i_addr, size_t n_bytes )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
const void * final_address; const void * final_address;
/* /*
* Set i_addr to the beginning of the cache line; final_address indicates * Set i_addr to the beginning of the cache line; final_address indicates
* the last address_t which needs to be invalidated. Increment i_addr and * the last address_t which needs to be invalidated. Increment i_addr and
* invalidate the resulting line until final_address is passed. * invalidate the resulting line until final_address is passed.
*/ */
final_address = (void *)((size_t)i_addr + n_bytes - 1); final_address = (void *)((size_t)i_addr + n_bytes - 1);
i_addr = (void *)((size_t)i_addr & ~(_CPU_INST_CACHE_ALIGNMENT - 1)); i_addr = (void *)((size_t)i_addr & ~(_CPU_INST_CACHE_ALIGNMENT - 1));
while( final_address > i_addr ) { while( final_address > i_addr ) {
_CPU_invalidate_1_inst_cache_line( i_addr ); _CPU_invalidate_1_inst_cache_line( i_addr );
i_addr = (void *)((size_t)i_addr + _CPU_INST_CACHE_ALIGNMENT); i_addr = (void *)((size_t)i_addr + _CPU_INST_CACHE_ALIGNMENT);
} }
#endif
} }
@@ -198,10 +224,13 @@ rtems_invalidate_multiple_inst_cache_lines( const void * i_addr, size_t n_bytes
void void
rtems_invalidate_entire_inst_cache( void ) rtems_invalidate_entire_inst_cache( void )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
/* /*
* Call the CPU-specific routine * Call the CPU-specific routine
*/ */
_CPU_invalidate_entire_inst_cache(); _CPU_invalidate_entire_inst_cache();
#endif
} }
@@ -211,7 +240,11 @@ rtems_invalidate_entire_inst_cache( void )
int int
rtems_get_inst_cache_line_size( void ) rtems_get_inst_cache_line_size( void )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
return _CPU_INST_CACHE_ALIGNMENT; return _CPU_INST_CACHE_ALIGNMENT;
#else
return 0;
#endif
} }
@@ -222,7 +255,9 @@ rtems_get_inst_cache_line_size( void )
void void
rtems_freeze_inst_cache( void ) rtems_freeze_inst_cache( void )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
_CPU_freeze_inst_cache(); _CPU_freeze_inst_cache();
#endif
} }
@@ -231,7 +266,9 @@ rtems_freeze_inst_cache( void )
*/ */
void rtems_unfreeze_inst_cache( void ) void rtems_unfreeze_inst_cache( void )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
_CPU_unfreeze_inst_cache(); _CPU_unfreeze_inst_cache();
#endif
} }
@@ -239,7 +276,9 @@ void rtems_unfreeze_inst_cache( void )
void void
rtems_enable_inst_cache( void ) rtems_enable_inst_cache( void )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
_CPU_enable_inst_cache(); _CPU_enable_inst_cache();
#endif
} }
@@ -247,6 +286,7 @@ rtems_enable_inst_cache( void )
void void
rtems_disable_inst_cache( void ) rtems_disable_inst_cache( void )
{ {
#if defined(_CPU_INST_CACHE_ALIGNMENT)
_CPU_disable_inst_cache(); _CPU_disable_inst_cache();
}
#endif #endif
}

View File

@@ -44,7 +44,7 @@ RTEMS_CHECK_ITRON_API(RTEMS_BSP)
if test "$tests_enabled" = "yes"; then if test "$tests_enabled" = "yes"; then
# do functionality tests first, then performance tests # do functionality tests first, then performance tests
cfg_subdirs="libtests sptests" cfg_subdirs="libtests sptests libffi"
if test "$HAS_MP" = "yes"; then if test "$HAS_MP" = "yes"; then
cfg_subdirs="$cfg_subdirs mptests" cfg_subdirs="$cfg_subdirs mptests"
fi fi
@@ -67,7 +67,6 @@ AC_SUBST(BARE_CPU_MODEL)
AC_CONFIG_SUBDIRS(tools) AC_CONFIG_SUBDIRS(tools)
AC_CONFIG_SUBDIRS(support) AC_CONFIG_SUBDIRS(support)
AC_CONFIG_SUBDIRS(samples) AC_CONFIG_SUBDIRS(samples)
AC_CONFIG_SUBDIRS(libffi)
AC_CONFIG_SUBDIRS($cfg_subdirs) AC_CONFIG_SUBDIRS($cfg_subdirs)
# Explicitly list all Makefiles here # Explicitly list all Makefiles here

View File

@@ -419,26 +419,4 @@ void _free_r(
free( ptr ); free( ptr );
} }
/*
* rtems_cache_aligned_malloc
*
* DESCRIPTION:
*
* This function is used to allocate storage that spans an
* integral number of cache blocks.
*/
RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
size_t nbytes
)
{
/*
* Arrange to have the user storage start on the first cache
* block beyond the header.
*/
return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
+ _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
}
#endif #endif

View File

@@ -42,6 +42,7 @@ extern "C" {
#include <rtems/init.h> #include <rtems/init.h>
#include <rtems/rtems/tasks.h> #include <rtems/rtems/tasks.h>
#include <rtems/rtems/intr.h> #include <rtems/rtems/intr.h>
#include <rtems/rtems/cache.h>
#include <rtems/rtems/clock.h> #include <rtems/rtems/clock.h>
#include <rtems/extension.h> #include <rtems/extension.h>
#include <rtems/rtems/timer.h> #include <rtems/rtems/timer.h>

View File

@@ -1,140 +0,0 @@
/* cache.h
*
* Cache Manager
*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
*
* The functions in this file define the API to the RTEMS Cache Manager and
* are divided into data cache and instruction cache functions. Data cache
* functions are only declared if a data cache is supported. Instruction
* cache functions are only declared if an instruction cache is supported.
* Support for a particular cache exists only if _CPU_x_CACHE_ALIGNMENT is
* defined, where x E {DATA, INST}. These definitions are found in the CPU
* dependent source files in the supercore, often
*
* rtems/c/src/exec/score/cpu/CPU/rtems/score/CPU.h
*
* The functions below are implemented with CPU dependent inline routines
* also found in the above file. In the event that a CPU does not support a
* specific function, the CPU dependent routine does nothing (but does exist).
*
* At this point, the Cache Manager makes no considerations, and provides no
* support for BSP specific issues such as a secondary cache. In such a system,
* the CPU dependent routines would have to be modified, or a BSP layer added
* to this Manager.
*/
#ifndef __CACHE_h
#define __CACHE_h
#ifdef __cplusplus
extern "C" {
#endif
#include <rtems/system.h>
#include <sys/types.h>
/* THESE FUNCTIONS ONLY EXIST IF WE HAVE A DATA CACHE */
#if defined(_CPU_DATA_CACHE_ALIGNMENT)
/*
* This function is called to flush the data cache by performing cache
* copybacks. It must determine how many cache lines need to be copied
* back and then perform the copybacks.
*/
void rtems_flush_multiple_data_cache_lines( const void *, size_t );
/*
* This function is responsible for performing a data cache invalidate.
* It must determine how many cache lines need to be invalidated and then
* perform the invalidations.
*/
void rtems_invalidate_multiple_data_cache_lines( const void *, size_t );
/*
* This function is responsible for performing a data cache flush.
* It flushes the entire cache.
*/
void rtems_flush_entire_data_cache( void );
/*
* This function is responsible for performing a data cache
* invalidate. It invalidates the entire cache.
*/
void rtems_invalidate_entire_data_cache( void );
/*
* This function returns the data cache granularity.
*/
int rtems_get_data_cache_line_size( void );
/*
* This function freezes the data cache.
*/
void rtems_freeze_data_cache( void );
/*
* This function unfreezes the data cache.
*/
void rtems_unfreeze_data_cache( void );
/*
* These functions enable/disable the data cache.
*/
void rtems_enable_data_cache( void );
void rtems_disable_data_cache( void );
#endif
/* THESE FUNCTIONS ONLY EXIST IF WE HAVE AN INSTRUCTION CACHE */
#if defined(_CPU_INST_CACHE_ALIGNMENT)
/*
* This function is responsible for performing an instruction cache
* invalidate. It must determine how many cache lines need to be invalidated
* and then perform the invalidations.
*/
void rtems_invalidate_multiple_inst_cache_lines( const void *, size_t );
/*
* This function is responsible for performing an instruction cache
* invalidate. It invalidates the entire cache.
*/
void rtems_invalidate_entire_inst_cache( void );
/*
* This function returns the instruction cache granularity.
*/
int rtems_get_inst_cache_line_size( void );
/*
* This function freezes the instruction cache.
*/
void rtems_freeze_inst_cache( void );
/*
* This function unfreezes the instruction cache.
*/
void rtems_unfreeze_inst_cache( void );
/*
* These functions enable/disable the instruction cache.
*/
void rtems_enable_inst_cache( void );
void rtems_disable_inst_cache( void );
#endif
#ifdef __cplusplus
}
#endif
#endif
/* end of include file */

View File

@@ -50,7 +50,7 @@ PARTITION_C_FILES = part.c partcreate.c partdelete.c partgetbuffer.c \
DPMEM_C_FILES = dpmem.c dpmemcreate.c dpmemdelete.c dpmemexternal2internal.c \ DPMEM_C_FILES = dpmem.c dpmemcreate.c dpmemdelete.c dpmemexternal2internal.c \
dpmemident.c dpmeminternal2external.c dpmemident.c dpmeminternal2external.c
STD_C_FILES = attr.c cache.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \ STD_C_FILES = attr.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
$(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \ $(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \
$(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \ $(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \
$(PARTITION_C_FILES) $(REGION_C_FILES) $(DPMEM_C_FILES) $(PARTITION_C_FILES) $(REGION_C_FILES) $(DPMEM_C_FILES)

View File

@@ -185,101 +185,6 @@ static inline void i386_set_cr3(unsigned int segment)
asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) ); asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) );
} }
/*
* Disable the entire cache
*/
void _CPU_disable_cache() {
cr0 regCr0;
regCr0.i = i386_get_cr0();
regCr0.cr0.page_level_cache_disable = 1;
regCr0.cr0.no_write_through = 1;
i386_set_cr0( regCr0.i );
rtems_flush_entire_data_cache();
}
/*
* Enable the entire cache
*/
static inline void _CPU_enable_cache() {
cr0 regCr0;
regCr0.i = i386_get_cr0();
regCr0.cr0.page_level_cache_disable = 0;
regCr0.cr0.no_write_through = 0;
i386_set_cr0( regCr0.i );
/*rtems_flush_entire_data_cache();*/
}
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*
* FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
* each CPU. The routines below should be implemented per CPU,
* to accomodate the capabilities of each.
*/
/* FIXME: I don't belong here. */
#define I386_CACHE_ALIGNMENT 16
#if defined(I386_CACHE_ALIGNMENT)
#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
static inline void _CPU_freeze_data_cache (void) {}
static inline void _CPU_unfreeze_data_cache (void) {}
static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
static inline void _CPU_freeze_inst_cache (void) {}
static inline void _CPU_unfreeze_inst_cache (void) {}
static inline void _CPU_flush_entire_data_cache (
const void * d_addr )
{
asm ("wbinvd");
}
static inline void _CPU_invalidate_entire_data_cache (
const void * d_addr )
{
asm ("invd");
}
static inline void _CPU_enable_data_cache (
void )
{
_CPU_enable_cache();
}
static inline void _CPU_disable_data_cache (
void )
{
_CPU_disable_cache();
}
static inline void _CPU_invalidate_entire_inst_cache (
const void * i_addr )
{
asm ("invd");
}
static inline void _CPU_enable_inst_cache (
void )
{
_CPU_enable_cache();
}
static inline void _CPU_disable_inst_cache (
void )
{
_CPU_disable_cache();
}
#endif
/* routines */ /* routines */
/* /*

View File

@@ -373,217 +373,6 @@ static inline void * _CPU_virtual_to_physical (
} }
/*
* Since the cacr is common to all mc680x0, provide macros
* for masking values in that register.
*/
/*
* Used to clear bits in the cacr.
*/
#define _CPU_CACR_AND(mask) \
{ \
register unsigned long _value = mask; \
register unsigned long _ctl = 0; \
asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
andl %2, %0; /* and with _val */ \
movec %1, %%cacr" /* write the cacr */ \
: "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
/*
* Used to set bits in the cacr.
*/
#define _CPU_CACR_OR(mask) \
{ \
register unsigned long _value = mask; \
register unsigned long _ctl = 0; \
asm volatile ( "movec %%cacr, %0; /* read the cacr */ \
orl %2, %0; /* or with _val */ \
movec %1, %%cacr" /* write the cacr */ \
: "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
}
/*
* CACHE MANAGER: The following functions are CPU-specific.
* They provide the basic implementation for the rtems_* cache
* management routines. If a given function has no meaning for the CPU,
* it does nothing by default.
*/
#if ( defined(__mc68020__) || defined(__mc68030__) )
#define M68K_INST_CACHE_ALIGNMENT 16
#if defined(__mc68030__)
#define M68K_DATA_CACHE_ALIGNMENT 16
/* Only the mc68030 has a data cache; it is writethrough only. */
static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
static inline void _CPU_invalidate_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
_CPU_CACR_OR(0x00000400);
}
static inline void _CPU_invalidate_entire_data_cache (
void )
{
_CPU_CACR_OR( 0x00000800 );
}
static inline void _CPU_freeze_data_cache (
void )
{
_CPU_CACR_OR( 0x00000200 );
}
static inline void _CPU_unfreeze_data_cache (
void )
{
_CPU_CACR_AND( 0xFFFFFDFF );
}
static inline void _CPU_enable_data_cache ( void )
{
_CPU_CACR_OR( 0x00000100 );
}
static inline void _CPU_disable_data_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFEFF );
}
#endif
/* Both the 68020 and 68030 have instruction caches */
static inline void _CPU_invalidate_1_inst_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
_CPU_CACR_OR( 0x00000004 );
}
static inline void _CPU_invalidate_entire_inst_cache (
void )
{
_CPU_CACR_OR( 0x00000008 );
}
static inline void _CPU_freeze_inst_cache (
void )
{
_CPU_CACR_OR( 0x00000002);
}
static inline void _CPU_unfreeze_inst_cache (
void )
{
_CPU_CACR_AND( 0xFFFFFFFD );
}
static inline void _CPU_enable_inst_cache ( void )
{
_CPU_CACR_OR( 0x00000001 );
}
static inline void _CPU_disable_inst_cache ( void )
{
_CPU_CACR_AND( 0xFFFFFFFE );
}
#elif ( defined(__mc68040__) || defined (__mc68060__) )
#define M68K_INST_CACHE_ALIGNMENT 16
#define M68K_DATA_CACHE_ALIGNMENT 16
/* Cannot be frozen */
static inline void _CPU_freeze_data_cache ( void ) {}
static inline void _CPU_unfreeze_data_cache ( void ) {}
static inline void _CPU_freeze_inst_cache ( void ) {}
static inline void _CPU_unfreeze_inst_cache ( void ) {}
static inline void _CPU_flush_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
}
static inline void _CPU_invalidate_1_data_cache_line (
const void * d_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
}
static inline void _CPU_flush_entire_data_cache (
void )
{
asm volatile ( "cpusha %%dc" :: );
}
static inline void _CPU_invalidate_entire_data_cache (
void )
{
asm volatile ( "cinva %%dc" :: );
}
static inline void _CPU_enable_data_cache (
void )
{
_CPU_CACR_OR( 0x80000000 );
}
static inline void _CPU_disable_data_cache (
void )
{
_CPU_CACR_AND( 0x7FFFFFFF );
}
static inline void _CPU_invalidate_1_inst_cache_line (
const void * i_addr )
{
void * p_address = (void *) _CPU_virtual_to_physical( i_addr );
asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
}
static inline void _CPU_invalidate_entire_inst_cache (
void )
{
asm volatile ( "cinva %%ic" :: );
}
static inline void _CPU_enable_inst_cache (
void )
{
_CPU_CACR_OR( 0x00008000 );
}
static inline void _CPU_disable_inst_cache (
void )
{
_CPU_CACR_AND( 0xFFFF7FFF );
}
#endif
#if defined(M68K_DATA_CACHE_ALIGNMENT)
#define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
#endif
#if defined(M68K_INST_CACHE_ALIGNMENT)
#define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
#endif
#endif /* !ASM */ #endif /* !ASM */
#ifdef __cplusplus #ifdef __cplusplus