Moved from score/cpu/PowerPC.

This commit is contained in:
Joel Sherrill
2000-06-14 15:37:30 +00:00
parent e90e428e14
commit 90ff45b78d
8 changed files with 4 additions and 1150 deletions

View File

@@ -1,41 +1,10 @@
##
##
## $Id$
##
##
AUTOMAKE_OPTIONS = foreign 1.4
C_FILES = cpuIdent.c
C_O_FILES = $(C_FILES:%.c=$(ARCH)/%.o)
H_FILES = cpu.h spr.h io.h mmu.h page.h byteorder.h pgtable.h
OBJS = $(C_O_FILES)
include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
include $(top_srcdir)/../../../../../automake/lib.am
LIB = $(ARCH)/libcpuspec.a
#
# (OPTIONAL) Add local stuff here using +=
#
$(LIB): $(OBJS)
$(make-library)
$(PROJECT_INCLUDE)/libcpu:
$(mkinstalldirs) $@
$(PROJECT_INCLUDE)/libcpu/%.h: %.h
$(INSTALL_DATA) $< $@
PREINSTALL_FILES += $(PROJECT_INCLUDE)/libcpu \
$(H_FILES:%=$(PROJECT_INCLUDE)/libcpu/%)
all-local: $(ARCH) $(PREINSTALL_FILES) $(OBJS) $(LIB)
.PRECIOUS: $(LIB)
EXTRA_DIST = byteorder.h cpu.h cpuIdent.c io.h mmu.h page.h pgtable.h spr.h
SUBDIRS = include src
include $(top_srcdir)/../../../../../automake/subdirs.am
include $(top_srcdir)/../../../../../automake/local.am

View File

@@ -1,332 +0,0 @@
/*
* cpu.h
*
* This file contains some powerpc MSR and registers access definitions.
*
* It is a stripped down version of linux ppc processor.h file...
*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#ifndef __PPC_CPU_H
#define __PPC_CPU_H
#ifdef __cplusplus
extern "C" {
#endif
/* Bit encodings for Machine State Register (MSR) */
#define MSR_POW (1<<18) /* Enable Power Management */
#define MSR_TGPR (1<<17) /* TLB Update registers in use */
#define MSR_ILE (1<<16) /* Interrupt Little-Endian enable */
#define MSR_EE (1<<15) /* External Interrupt enable */
#define MSR_PR (1<<14) /* Supervisor/User privilege */
#define MSR_FP (1<<13) /* Floating Point enable */
#define MSR_ME (1<<12) /* Machine Check enable */
#define MSR_FE0 (1<<11) /* Floating Exception mode 0 */
#define MSR_SE (1<<10) /* Single Step */
#define MSR_BE (1<<9) /* Branch Trace */
#define MSR_FE1 (1<<8) /* Floating Exception mode 1 */
#define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */
#define MSR_IR (1<<5) /* Instruction MMU enable */
#define MSR_DR (1<<4) /* Data MMU enable */
#define MSR_RI (1<<1) /* Recoverable Exception */
#define MSR_LE (1<<0) /* Little-Endian enable */
#define MSR_ MSR_ME|MSR_RI
#define MSR_KERNEL MSR_|MSR_IR|MSR_DR
#define MSR_USER MSR_KERNEL|MSR_PR|MSR_EE
/* Bit encodings for Hardware Implementation Register (HID0)
on PowerPC 603, 604, etc. processors (not 601). */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
#define HID0_SBCLK (1<<27)
#define HID0_EICE (1<<26)
#define HID0_ECLK (1<<25)
#define HID0_PAR (1<<24)
#define HID0_DOZE (1<<23)
#define HID0_NAP (1<<22)
#define HID0_SLEEP (1<<21)
#define HID0_DPM (1<<20)
#define HID0_ICE (1<<15) /* Instruction Cache Enable */
#define HID0_DCE (1<<14) /* Data Cache Enable */
#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
#define HID0_DLOCK (1<<12) /* Data Cache Lock */
#define HID0_ICFI (1<<11) /* Instruction Cache Flash Invalidate */
#define HID0_DCI (1<<10) /* Data Cache Invalidate */
#define HID0_SIED (1<<7) /* Serial Instruction Execution [Disable] */
#define HID0_BHTE (1<<2) /* Branch History Table Enable */
#define HID0_BTCD (1<<1) /* Branch target cache disable */
/* fpscr settings */
#define FPSCR_FX (1<<31)
#define FPSCR_FEX (1<<30)
#define _MACH_prep 1
#define _MACH_Pmac 2 /* pmac or pmac clone (non-chrp) */
#define _MACH_chrp 4 /* chrp machine */
#define _MACH_mbx 8 /* Motorola MBX board */
#define _MACH_apus 16 /* amiga with phase5 powerup */
#define _MACH_fads 32 /* Motorola FADS board */
/* see residual.h for these */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
#define _PREP_Bull 0x03 /* bull prep */
/* these are arbitrary */
#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
#define _GLOBAL(n)\
.globl n;\
n:
#define TBRU 269 /* Time base Upper/Lower (Reading) */
#define TBRL 268
#define TBWU 284 /* Time base Upper/Lower (Writing) */
#define TBWL 285
#define XER 1
#define LR 8
#define CTR 9
#define HID0 1008 /* Hardware Implementation */
#define PVR 287 /* Processor Version */
#define IBAT0U 528 /* Instruction BAT #0 Upper/Lower */
#define IBAT0L 529
#define IBAT1U 530 /* Instruction BAT #1 Upper/Lower */
#define IBAT1L 531
#define IBAT2U 532 /* Instruction BAT #2 Upper/Lower */
#define IBAT2L 533
#define IBAT3U 534 /* Instruction BAT #3 Upper/Lower */
#define IBAT3L 535
#define DBAT0U 536 /* Data BAT #0 Upper/Lower */
#define DBAT0L 537
#define DBAT1U 538 /* Data BAT #1 Upper/Lower */
#define DBAT1L 539
#define DBAT2U 540 /* Data BAT #2 Upper/Lower */
#define DBAT2L 541
#define DBAT3U 542 /* Data BAT #3 Upper/Lower */
#define DBAT3L 543
#define DMISS 976 /* TLB Lookup/Refresh registers */
#define DCMP 977
#define HASH1 978
#define HASH2 979
#define IMISS 980
#define ICMP 981
#define RPA 982
#define SDR1 25 /* MMU hash base register */
#define DAR 19 /* Data Address Register */
#define SPR0 272 /* Supervisor Private Registers */
#define SPRG0 272
#define SPR1 273
#define SPRG1 273
#define SPR2 274
#define SPRG2 274
#define SPR3 275
#define SPRG3 275
#define DSISR 18
#define SRR0 26 /* Saved Registers (exception) */
#define SRR1 27
#define IABR 1010 /* Instruction Address Breakpoint */
#define DEC 22 /* Decrementer */
#define EAR 282 /* External Address Register */
#define L2CR 1017 /* PPC 750 L2 control register */
#define THRM1 1020
#define THRM2 1021
#define THRM3 1022
#define THRM1_TIN 0x1
#define THRM1_TIV 0x2
#define THRM1_THRES (0x7f<<2)
#define THRM1_TID (1<<29)
#define THRM1_TIE (1<<30)
#define THRM1_V (1<<31)
#define THRM3_E (1<<31)
/* Segment Registers */
#define SR0 0
#define SR1 1
#define SR2 2
#define SR3 3
#define SR4 4
#define SR5 5
#define SR6 6
#define SR7 7
#define SR8 8
#define SR9 9
#define SR10 10
#define SR11 11
#define SR12 12
#define SR13 13
#define SR14 14
#define SR15 15
#ifndef ASM
typedef enum {
PPC_601 = 0x1,
PPC_603 = 0x3,
PPC_604 = 0x4,
PPC_603e = 0x6,
PPC_603ev = 0x7,
PPC_750 = 0x8,
PPC_604e = 0x9,
PPC_604r = 0xA,
PPC_620 = 0x16,
PPC_860 = 0x50,
PPC_821 = PPC_860,
PPC_UNKNOWN = 0xff
} ppc_cpu_id_t;
typedef unsigned short ppc_cpu_revision_t;
extern ppc_cpu_id_t get_ppc_cpu_type();
extern ppc_cpu_id_t current_ppc_cpu;
extern ppc_cpu_revision_t get_ppc_cpu_revision();
extern ppc_cpu_revision_t current_ppc_revision;
/*
* Routines to access the time base register
*/
static inline unsigned long long PPC_Get_timebase_register( void )
{
unsigned long tbr_low;
unsigned long tbr_high;
unsigned long tbr_high_old;
unsigned long long tbr;
do {
asm volatile( "mftbu %0" : "=r" (tbr_high_old));
asm volatile( "mftb %0" : "=r" (tbr_low));
asm volatile( "mftbu %0" : "=r" (tbr_high));
} while ( tbr_high_old != tbr_high );
tbr = tbr_high;
tbr <<= 32;
tbr |= tbr_low;
return tbr;
}
static inline void PPC_Set_timebase_register (unsigned long long tbr)
{
unsigned long tbr_low;
unsigned long tbr_high;
tbr_low = (tbr & 0xffffffff) ;
tbr_high = (tbr >> 32) & 0xffffffff;
asm volatile( "mtspr 284, %0" : : "r" (tbr_low));
asm volatile( "mtspr 285, %0" : : "r" (tbr_high));
}
#endif
#define _CPU_MSR_GET( _msr_value ) \
do { \
_msr_value = 0; \
asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
} while (0)
#define _CPU_MSR_SET( _msr_value ) \
{ asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
#define _CPU_ISR_Disable( _isr_cookie ) \
{ register unsigned int _disable_mask = MSR_EE; \
_isr_cookie = 0; \
asm volatile ( \
"mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
"=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
"0" ((_isr_cookie)), "1" ((_disable_mask)) \
); \
}
/*
* Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
* This indicates the end of an RTEMS critical section. The parameter
* _isr_cookie is not modified.
*/
#define _CPU_ISR_Enable( _isr_cookie ) \
{ \
asm volatile ( "mtmsr %0" : \
"=r" ((_isr_cookie)) : \
"0" ((_isr_cookie))); \
}
/*
* This temporarily restores the interrupt to _isr_cookie before immediately
* disabling them again. This is used to divide long RTEMS critical
* sections into two or more parts. The parameter _isr_cookie is not
* modified.
*
* NOTE: The version being used is not very optimized but it does
* not trip a problem in gcc where the disable mask does not
* get loaded. Check this for future (post 10/97 gcc versions.
*/
#define _CPU_ISR_Flash( _isr_cookie ) \
{ register unsigned int _disable_mask = MSR_EE; \
asm volatile ( \
"mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
"=r" ((_isr_cookie)), "=r" ((_disable_mask)) : \
"0" ((_isr_cookie)), "1" ((_disable_mask)) \
); \
}
/* end of ISR handler macros */
/*
* Simple spin delay in microsecond units for device drivers.
* This is very dependent on the clock speed of the target.
*/
#define CPU_Get_timebase_low( _value ) \
asm volatile( "mftb %0" : "=r" (_value) )
#define delay( _microseconds ) \
do { \
unsigned32 start, ticks, now; \
CPU_Get_timebase_low( start ) ; \
ticks = (_microseconds) * rtems_cpu_configuration_get_clicks_per_usec(); \
do \
CPU_Get_timebase_low( now ) ; \
while (now - start < ticks); \
} while (0)
#define delay_in_bus_cycles( _cycles ) \
do { \
unsigned32 start, now; \
CPU_Get_timebase_low( start ); \
do \
CPU_Get_timebase_low( now ); \
while (now - start < (_cycles)); \
} while (0)
#define PPC_Set_decrementer( _clicks ) \
do { \
asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* __PPC_CPU_H */

View File

@@ -1,53 +0,0 @@
/*
* cpuIdent.c -- Cpu identification code
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*
*/
#include <libcpu/spr.h>
/*
* Generate inline code to read Processor Version Register
*/
SPR_RO(PVR)
ppc_cpu_id_t current_ppc_cpu = PPC_UNKNOWN;
ppc_cpu_revision_t current_ppc_revision = 0xff;
ppc_cpu_id_t get_ppc_cpu_type()
{
unsigned int pvr = (_read_PVR() >> 16) ;
current_ppc_cpu = (ppc_cpu_id_t) pvr;
switch (pvr) {
case PPC_601 :
case PPC_603 :
case PPC_604 :
case PPC_603e :
case PPC_603ev:
case PPC_750 :
case PPC_604e :
case PPC_604r :
case PPC_620 :
case PPC_860 :
current_ppc_cpu = (ppc_cpu_id_t) pvr;
return current_ppc_cpu;
default :
printk("Unknown PVR value. Please add it to <libcpu/powerpc/shared/cpu.h> \n");
return PPC_UNKNOWN;
}
}
ppc_cpu_revision_t get_ppc_cpu_revision()
{
ppc_cpu_revision_t rev = (ppc_cpu_revision_t) (_read_PVR() & 0xffff);
current_ppc_revision = rev;
return rev;
}

View File

@@ -1,130 +0,0 @@
/*
* io.h
*
* This file contains inline implementation of function to
* deal with IO.
*
* It is a stripped down version of linux ppc file...
*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#ifndef _LIBCPU_IO_H_
#define _LIBCPU_IO_H_
#define PREP_ISA_IO_BASE 0x80000000
#define PREP_ISA_MEM_BASE 0xc0000000
#define PREP_PCI_DRAM_OFFSET 0x80000000
#define _IO_BASE PREP_ISA_IO_BASE
#define _ISA_MEM_BASE PREP_ISA_MEM_BASE
#define PCI_DRAM_OFFSET PREP_PCI_DRAM_OFFSET
#ifndef ASM
#define inb(port) in_8((unsigned char *)((port)+_IO_BASE))
#define outb(val, port) out_8((unsigned char *)((port)+_IO_BASE), (val))
#define inw(port) in_le16((unsigned short *)((port)+_IO_BASE))
#define outw(val, port) out_le16((unsigned short *)((port)+_IO_BASE), (val))
#define inl(port) in_le32((unsigned *)((port)+_IO_BASE))
#define outl(val, port) out_le32((unsigned *)((port)+_IO_BASE), (val))
/*
* Enforce In-order Execution of I/O:
* Acts as a barrier to ensure all previous I/O accesses have
* completed before any further ones are issued.
*/
extern inline void eieio(void)
{
__asm__ __volatile__ ("eieio");
}
/* Enforce in-order execution of data I/O.
* No distinction between read/write on PPC; use eieio for all three.
*/
#define iobarrier_rw() eieio()
#define iobarrier_r() eieio()
#define iobarrier_w() eieio()
/*
* 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
*/
extern inline int in_8(volatile unsigned char *addr)
{
int ret;
__asm__ __volatile__("lbz%U1%X1 %0,%1; eieio" : "=r" (ret) : "m" (*addr));
return ret;
}
extern inline void out_8(volatile unsigned char *addr, int val)
{
__asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
}
extern inline int in_le16(volatile unsigned short *addr)
{
int ret;
__asm__ __volatile__("lhbrx %0,0,%1; eieio" : "=r" (ret) :
"r" (addr), "m" (*addr));
return ret;
}
extern inline int in_be16(volatile unsigned short *addr)
{
int ret;
__asm__ __volatile__("lhz%U1%X1 %0,%1; eieio" : "=r" (ret) : "m" (*addr));
return ret;
}
extern inline void out_le16(volatile unsigned short *addr, int val)
{
__asm__ __volatile__("sthbrx %1,0,%2; eieio" : "=m" (*addr) :
"r" (val), "r" (addr));
}
extern inline void out_be16(volatile unsigned short *addr, int val)
{
__asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
}
extern inline unsigned in_le32(volatile unsigned *addr)
{
unsigned ret;
__asm__ __volatile__("lwbrx %0,0,%1; eieio" : "=r" (ret) :
"r" (addr), "m" (*addr));
return ret;
}
extern inline unsigned in_be32(volatile unsigned *addr)
{
unsigned ret;
__asm__ __volatile__("lwz%U1%X1 %0,%1; eieio" : "=r" (ret) : "m" (*addr));
return ret;
}
extern inline void out_le32(volatile unsigned *addr, int val)
{
__asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) :
"r" (val), "r" (addr));
}
extern inline void out_be32(volatile unsigned *addr, int val)
{
__asm__ __volatile__("stw%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
}
#endif /* ASM */
#endif /* _LIBCPU_IO_H_ */

View File

@@ -1,306 +0,0 @@
/*
* mmu.h
*
* PowerPC memory management structures
*
* It is a stripped down version of linux ppc file...
*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#ifndef _PPC_MMU_H_
#define _PPC_MMU_H_
#ifndef ASM
/* Hardware Page Table Entry */
typedef struct _PTE {
unsigned long v:1; /* Entry is valid */
unsigned long vsid:24; /* Virtual segment identifier */
unsigned long h:1; /* Hash algorithm indicator */
unsigned long api:6; /* Abbreviated page index */
unsigned long rpn:20; /* Real (physical) page number */
unsigned long :3; /* Unused */
unsigned long r:1; /* Referenced */
unsigned long c:1; /* Changed */
unsigned long w:1; /* Write-thru cache mode */
unsigned long i:1; /* Cache inhibited */
unsigned long m:1; /* Memory coherence */
unsigned long g:1; /* Guarded */
unsigned long :1; /* Unused */
unsigned long pp:2; /* Page protection */
} PTE;
/* Values for PP (assumes Ks=0, Kp=1) */
#define PP_RWXX 0 /* Supervisor read/write, User none */
#define PP_RWRX 1 /* Supervisor read/write, User read */
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
#define PP_RXRX 3 /* Supervisor read, User read */
/* Segment Register */
typedef struct _SEGREG {
unsigned long t:1; /* Normal or I/O type */
unsigned long ks:1; /* Supervisor 'key' (normally 0) */
unsigned long kp:1; /* User 'key' (normally 1) */
unsigned long n:1; /* No-execute */
unsigned long :4; /* Unused */
unsigned long vsid:24; /* Virtual Segment Identifier */
} SEGREG;
/* Block Address Translation (BAT) Registers */
typedef struct _P601_BATU { /* Upper part of BAT for 601 processor */
unsigned long bepi:15; /* Effective page index (virtual address) */
unsigned long :8; /* unused */
unsigned long w:1;
unsigned long i:1; /* Cache inhibit */
unsigned long m:1; /* Memory coherence */
unsigned long ks:1; /* Supervisor key (normally 0) */
unsigned long kp:1; /* User key (normally 1) */
unsigned long pp:2; /* Page access protections */
} P601_BATU;
typedef struct _BATU { /* Upper part of BAT (all except 601) */
unsigned long bepi:15; /* Effective page index (virtual address) */
unsigned long :4; /* Unused */
unsigned long bl:11; /* Block size mask */
unsigned long vs:1; /* Supervisor valid */
unsigned long vp:1; /* User valid */
} BATU;
typedef struct _P601_BATL { /* Lower part of BAT for 601 processor */
unsigned long brpn:15; /* Real page index (physical address) */
unsigned long :10; /* Unused */
unsigned long v:1; /* Valid bit */
unsigned long bl:6; /* Block size mask */
} P601_BATL;
typedef struct _BATL { /* Lower part of BAT (all except 601) */
unsigned long brpn:15; /* Real page index (physical address) */
unsigned long :10; /* Unused */
unsigned long w:1; /* Write-thru cache */
unsigned long i:1; /* Cache inhibit */
unsigned long m:1; /* Memory coherence */
unsigned long g:1; /* Guarded (MBZ in IBAT) */
unsigned long :1; /* Unused */
unsigned long pp:2; /* Page access protections */
} BATL;
typedef struct _BAT {
BATU batu; /* Upper register */
BATL batl; /* Lower register */
} BAT;
typedef struct _P601_BAT {
P601_BATU batu; /* Upper register */
P601_BATL batl; /* Lower register */
} P601_BAT;
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
#define BL_512K 0x003
#define BL_1M 0x007
#define BL_2M 0x00F
#define BL_4M 0x01F
#define BL_8M 0x03F
#define BL_16M 0x07F
#define BL_32M 0x0FF
#define BL_64M 0x1FF
#define BL_128M 0x3FF
#define BL_256M 0x7FF
/* BAT Access Protection */
#define BPP_XX 0x00 /* No access */
#define BPP_RX 0x01 /* Read only */
#define BPP_RW 0x02 /* Read/write */
/*
* Simulated two-level MMU. This structure is used by the kernel
* to keep track of MMU mappings and is used to update/maintain
* the hardware HASH table which is really a cache of mappings.
*
* The simulated structures mimic the hardware available on other
* platforms, notably the 80x86 and 680x0.
*/
typedef struct _pte {
unsigned long page_num:20;
unsigned long flags:12; /* Page flags (some unused bits) */
} pte;
#define PD_SHIFT (10+12) /* Page directory */
#define PD_MASK 0x03FF
#define PT_SHIFT (12) /* Page Table */
#define PT_MASK 0x03FF
#define PG_SHIFT (12) /* Page Entry */
/* MMU context */
typedef struct _MMU_context {
SEGREG segs[16]; /* Segment registers */
pte **pmap; /* Two-level page-map structure */
} MMU_context;
/* Used to set up SDR1 register */
#define HASH_TABLE_SIZE_64K 0x00010000
#define HASH_TABLE_SIZE_128K 0x00020000
#define HASH_TABLE_SIZE_256K 0x00040000
#define HASH_TABLE_SIZE_512K 0x00080000
#define HASH_TABLE_SIZE_1M 0x00100000
#define HASH_TABLE_SIZE_2M 0x00200000
#define HASH_TABLE_SIZE_4M 0x00400000
#define HASH_TABLE_MASK_64K 0x000
#define HASH_TABLE_MASK_128K 0x001
#define HASH_TABLE_MASK_256K 0x003
#define HASH_TABLE_MASK_512K 0x007
#define HASH_TABLE_MASK_1M 0x00F
#define HASH_TABLE_MASK_2M 0x01F
#define HASH_TABLE_MASK_4M 0x03F
/* invalidate a TLB entry */
extern inline void _tlbie(unsigned long va)
{
asm volatile ("tlbie %0" : : "r"(va));
}
extern void _tlbia(void); /* invalidate all TLB entries */
#endif /* ASM */
/* Control/status registers for the MPC8xx.
* A write operation to these registers causes serialized access.
* During software tablewalk, the registers used perform mask/shift-add
* operations when written/read. A TLB entry is created when the Mx_RPN
* is written, and the contents of several registers are used to
* create the entry.
*/
#define MI_CTR 784 /* Instruction TLB control register */
#define MI_GPM 0x80000000 /* Set domain manager mode */
#define MI_PPM 0x40000000 /* Set subpage protection */
#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
#define MI_RESETVAL 0x00000000 /* Value of register at reset */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
*/
#define MI_AP 786
#define MI_Ks 0x80000000 /* Should not be set */
#define MI_Kp 0x40000000 /* Should always be set */
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
* this register are used to create the TLB entry.
*/
#define MI_EPN 787
#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
#define MI_EVALID 0x00000200 /* Entry is valid */
#define MI_ASIDMASK 0x0000000f /* ASID match value */
/* Reset value is undefined */
/* A "level 1" or "segment" or whatever you want to call it register.
* For the instruction TLB, it contains bits that get loaded into the
* TLB entry when the MI_RPN is written.
*/
#define MI_TWC 789
#define MI_APG 0x000001e0 /* Access protection group (0) */
#define MI_GUARDED 0x00000010 /* Guarded storage */
#define MI_PSMASK 0x0000000c /* Mask of page size bits */
#define MI_PS8MEG 0x0000000c /* 8M page size */
#define MI_PS512K 0x00000004 /* 512K page size */
#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
#define MI_SVALID 0x00000001 /* Segment entry is valid */
/* Reset value is undefined */
/* Real page number. Defined by the pte. Writing this register
* causes a TLB entry to be created for the instruction TLB, using
* additional information from the MI_EPN, and MI_TWC registers.
*/
#define MI_RPN 790
/* Define an RPN value for mapping kernel memory to large virtual
* pages for boot initialization. This has real page number of 0,
* large page size, shared page, cache enabled, and valid.
* Also mark all subpages valid and write access.
*/
#define MI_BOOTINIT 0x000001fd
#define MD_CTR 792 /* Data TLB control register */
#define MD_GPM 0x80000000 /* Set domain manager mode */
#define MD_PPM 0x40000000 /* Set subpage protection */
#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
#define MD_RESETVAL 0x04000000 /* Value of register at reset */
#define M_CASID 793 /* Address space ID (context) to match */
#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
*/
#define MD_AP 794
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
* this register are used to create the TLB entry.
*/
#define MD_EPN 795
#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
#define MD_EVALID 0x00000200 /* Entry is valid */
#define MD_ASIDMASK 0x0000000f /* ASID match value */
/* Reset value is undefined */
/* The pointer to the base address of the first level page table.
* During a software tablewalk, reading this register provides the address
* of the entry associated with MD_EPN.
*/
#define M_TWB 796
#define M_L1TB 0xfffff000 /* Level 1 table base address */
#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
/* Reset value is undefined */
/* A "level 1" or "segment" or whatever you want to call it register.
* For the data TLB, it contains bits that get loaded into the TLB entry
* when the MD_RPN is written. It is also provides the hardware assist
* for finding the PTE address during software tablewalk.
*/
#define MD_TWC 797
#define MD_L2TB 0xfffff000 /* Level 2 table base address */
#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
#define MD_APG 0x000001e0 /* Access protection group (0) */
#define MD_GUARDED 0x00000010 /* Guarded storage */
#define MD_PSMASK 0x0000000c /* Mask of page size bits */
#define MD_PS8MEG 0x0000000c /* 8M page size */
#define MD_PS512K 0x00000004 /* 512K page size */
#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
#define MD_WT 0x00000002 /* Use writethrough page attribute */
#define MD_SVALID 0x00000001 /* Segment entry is valid */
/* Reset value is undefined */
/* Real page number. Defined by the pte. Writing this register
* causes a TLB entry to be created for the data TLB, using
* additional information from the MD_EPN, and MD_TWC registers.
*/
#define MD_RPN 798
/* This is a temporary storage register that could be used to save
* a processor working register during a tablewalk.
*/
#define M_TW 799
#endif /* _PPC_MMU_H_ */

View File

@@ -1,68 +0,0 @@
/*
* page.h
*
* PowerPC memory management structures
*
* It is a stripped down version of linux ppc file...
*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#ifndef _PPC_PAGE_H
#define _PPC_PAGE_H
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_OFFSET 0xc0000000
#ifndef ASM
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
/* align addr on a size boundry - adjust address up if needed -- Cort */
#define _ALIGN(addr,size) (((addr)+size-1)&(~(size-1)))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
/* map phys->virtual and virtual->phys for RAM pages */
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define MAP_NR(addr) (((unsigned long)addr-PAGE_OFFSET) >> PAGE_SHIFT)
#define MAP_PAGE_RESERVED (1<<15)
extern unsigned long get_zero_page_fast(void);
#endif /* ASM */
#endif /* _PPC_PAGE_H */

View File

@@ -1,146 +0,0 @@
/*
* pgtable.h
*
* PowerPC memory management structures
*
* It is a stripped down version of linux ppc file...
*
* Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
* Canon Centre Recherche France.
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*/
#ifndef _PPC_PGTABLE_H
#define _PPC_PGTABLE_H
/*
* The PowerPC MMU uses a hash table containing PTEs, together with
* a set of 16 segment registers (on 32-bit implementations), to define
* the virtual to physical address mapping.
*
* We use the hash table as an extended TLB, i.e. a cache of currently
* active mappings. We maintain a two-level page table tree, much like
* that used by the i386, for the sake of the Linux memory management code.
* Low-level assembler code in head.S (procedure hash_page) is responsible
* for extracting ptes from the tree and putting them into the hash table
* when necessary, and updating the accessed and modified bits in the
* page table tree.
*
* The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
* We also use the two level tables, but we can put the real bits in them
* needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
* Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
* additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
* based upon user/super access. The TLB does not have accessed nor write
* protect. We assume that if the TLB get loaded with an entry it is
* accessed, and overload the changed bit for write protect. We use
* two bits in the software pte that are supposed to be set to zero in
* the TLB entry (24 and 25) for these indicators. Although the level 1
* descriptor contains the guarded and writethrough/copyback bits, we can
* set these at the page level since they get copied from the Mx_TWC
* register when the TLB entry is loaded. We will use bit 27 for guard, since
* that is where it exists in the MD_TWC, and bit 26 for writethrough.
* These will get masked from the level 2 descriptor at TLB load time, and
* copied to the MD_TWC before it gets loaded.
*/
/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
#define PMD_SHIFT 22
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT 22
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* entries per page directory level: our page-table tree is two-level, so
* we don't really have any PMD directory.
*/
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 64MB value just means that there will be a 64MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*
* We no longer map larger than phys RAM with the BATs so we don't have
* to worry about the VMALLOC_OFFSET causing problems. We do have to worry
* about clashes between our early calls to ioremap() that start growing down
* from ioremap_base being run into the VM area allocations (growing upwards
* from VMALLOC_START). For this reason we have ioremap_bot to check when
* we actually run into our mappings setup in the early boot with the VM
* system. This really does become a problem for machines with good amounts
* of RAM. -- Cort
*/
#define VMALLOC_OFFSET (0x4000000) /* 64M */
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END ioremap_bot
/*
* Bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible.
*/
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_USER 0x002 /* matches one of the PP bits */
#define _PAGE_RW 0x004 /* software: user write access allowed */
#define _PAGE_GUARDED 0x008
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
#define _PAGE_SHARED 0
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
_PAGE_SHARED)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
#define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
_PAGE_NO_CACHE )
/*
* The PowerPC can only do execute protection on a segment (256MB) basis,
* not on a page basis. So we consider execute permission the same as read.
* Also, write permissions imply read permissions.
* This is the closest we can get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
#endif /* _PPC_PGTABLE_H */

View File

@@ -1,80 +0,0 @@
/*
* spr.h -- Access to special purpose registers.
*
* Copyright (C) 1998 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
* $Id$
*
*/
#ifndef _PPC_SPR_H
#define _PPC_SPR_H
#include <libcpu/cpu.h>
#define __MFSPR(reg, val) \
__asm__ __volatile__("mfspr %0,"#reg : "=r" (val))
#define __MTSPR(val, reg) \
__asm__ __volatile__("mtspr "#reg",%0" : : "r" (val))
#define SPR_RW(reg) \
static inline unsigned long _read_##reg(void) \
{\
unsigned long val;\
__MFSPR(reg, val);\
return val;\
}\
static inline void _write_##reg(unsigned long val)\
{\
__MTSPR(val,reg);\
return;\
}
#define SPR_RO(reg) \
static inline unsigned long _read_##reg(void) \
{\
unsigned long val;\
__MFSPR(reg,val);\
return val;\
}
static inline unsigned long _read_MSR(void)
{
unsigned long val;
asm volatile("mfmsr %0" : "=r" (val));
return val;
}
static inline void _write_MSR(unsigned long val)
{
asm volatile("mtmsr %0" : : "r" (val));
return;
}
static inline unsigned long _read_SR(void * va)
{
unsigned long val;
asm volatile("mfsrin %0,%1" : "=r" (val): "r" (va));
return val;
}
static inline void _write_SR(unsigned long val, void * va)
{
asm volatile("mtsrin %0,%1" : : "r"(val), "r" (va): "memory");
return;
}
#endif