2001-05-14 Till Straumann <strauman@slac.stanford.edu>

* rtems/powerpc/registers.h, rtems/score/ppc.h: Per PR213, add
	the following:
	    - support for the MPC74000 (AKA G4); there is no
	      AltiVec support yet, however.
	    - the cache flushing assembly code uses hardware-flush on the G4.
	      Also, a couple of hardcoded numerical values were replaced
	      by more readable symbolic constants.
	    - extended interrupt-disabled code section so enclose the entire
	      cache flush/invalidate procedure (as recommended by the book).
	      This is not (latency) critical as it is only used by
	      init code but prevents possible corruption.
	    - Trivial page table support as been added.
	      (1:1 effective-virtual-physical address mapping which is only
	      useful only on CPUs which feature hardware TLB replacement,
	      e.g. >604.  This allows for write-protecting memory regions,
	      e.g. text/ro-data which makes catching corruptors a lot easier.
	      It also frees one DBAT/IBAT and gives more flexibility
	      for setting up address maps :-)
	    - setdbat() allows changing BAT0 also (since the BSP may use
	      a page table, BAT0 could be available...).
	    - asm_setdbatX() violated the SVR ABI by using
	      r20 as a scratch register; changed for r0
	    - according to the book, a context synchronizing instruction is
	      necessary prior to and after changing a DBAT -> isync added
This commit is contained in:
Joel Sherrill
2002-05-14 16:56:44 +00:00
parent 78f8c91747
commit 0d776cd247
8 changed files with 206 additions and 58 deletions

View File

@@ -1,3 +1,30 @@
2001-05-14 Till Straumann <strauman@slac.stanford.edu>
* rtems/powerpc/registers.h, rtems/score/ppc.h: Per PR213, add
the following:
- support for the MPC74000 (AKA G4); there is no
AltiVec support yet, however.
- the cache flushing assembly code uses hardware-flush on the G4.
Also, a couple of hardcoded numerical values were replaced
by more readable symbolic constants.
- extended interrupt-disabled code section so enclose the entire
cache flush/invalidate procedure (as recommended by the book).
This is not (latency) critical as it is only used by
init code but prevents possible corruption.
- Trivial page table support as been added.
(1:1 effective-virtual-physical address mapping which is only
useful only on CPUs which feature hardware TLB replacement,
e.g. >604. This allows for write-protecting memory regions,
e.g. text/ro-data which makes catching corruptors a lot easier.
It also frees one DBAT/IBAT and gives more flexibility
for setting up address maps :-)
- setdbat() allows changing BAT0 also (since the BSP may use
a page table, BAT0 could be available...).
- asm_setdbatX() violated the SVR ABI by using
r20 as a scratch register; changed for r0
- according to the book, a context synchronizing instruction is
necessary prior to and after changing a DBAT -> isync added
2002-04-30 Ralf Corsepius <corsepiu@faw.uni-ulm.de> 2002-04-30 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
* rtems/powerpc/cache.h: New file (extracted from * rtems/powerpc/cache.h: New file (extracted from

View File

@@ -28,6 +28,7 @@ RTEMS_CHECK_CUSTOM_BSP(RTEMS_BSP)
RTEMS_CHECK_BSP_CACHE(RTEMS_BSP) RTEMS_CHECK_BSP_CACHE(RTEMS_BSP)
AM_CONDITIONAL(shared, test "$RTEMS_CPU_MODEL" = "mpc750" \ AM_CONDITIONAL(shared, test "$RTEMS_CPU_MODEL" = "mpc750" \
|| test "$RTEMS_CPU_MODEL" = "mpc7400" \
|| test "$RTEMS_CPU_MODEL" = "ppc603e" \ || test "$RTEMS_CPU_MODEL" = "ppc603e" \
|| test "$RTEMS_CPU_MODEL" = "mpc604" \ || test "$RTEMS_CPU_MODEL" = "mpc604" \
|| test "$RTEMS_CPU_MODEL" = "mpc6xx" \ || test "$RTEMS_CPU_MODEL" = "mpc6xx" \
@@ -40,6 +41,7 @@ AM_CONDITIONAL(shared, test "$RTEMS_CPU_MODEL" = "mpc750" \
AM_CONDITIONAL(mpc505, test "$RTEMS_CPU_MODEL" = "mpc505") AM_CONDITIONAL(mpc505, test "$RTEMS_CPU_MODEL" = "mpc505")
AM_CONDITIONAL(mpc6xx, test "$RTEMS_CPU_MODEL" = "mpc6xx" \ AM_CONDITIONAL(mpc6xx, test "$RTEMS_CPU_MODEL" = "mpc6xx" \
|| test "$RTEMS_CPU_MODEL" = "mpc604" \ || test "$RTEMS_CPU_MODEL" = "mpc604" \
|| test "$RTEMS_CPU_MODEL" = "mpc7400" \
|| test "$RTEMS_CPU_MODEL" = "mpc750" ) || test "$RTEMS_CPU_MODEL" = "mpc750" )
AM_CONDITIONAL(mpc8xx, test "$RTEMS_CPU_MODEL" = "mpc8xx" \ AM_CONDITIONAL(mpc8xx, test "$RTEMS_CPU_MODEL" = "mpc8xx" \
|| test "$RTEMS_CPU_MODEL" = "mpc821" \ || test "$RTEMS_CPU_MODEL" = "mpc821" \

View File

@@ -113,6 +113,7 @@ int mpc604_vector_is_valid(rtems_vector vector)
int mpc60x_vector_is_valid(rtems_vector vector) int mpc60x_vector_is_valid(rtems_vector vector)
{ {
switch (current_ppc_cpu) { switch (current_ppc_cpu) {
case PPC_7400:
case PPC_750: case PPC_750:
if (!mpc750_vector_is_valid(vector)) { if (!mpc750_vector_is_valid(vector)) {
return 0; return 0;

View File

@@ -5,13 +5,13 @@
PGM = $(ARCH)/mmu.rel PGM = $(ARCH)/mmu.rel
C_FILES = bat.c C_FILES = bat.c pte121.c
S_FILES = mmuAsm.S S_FILES = mmuAsm.S
include_libcpudir = $(includedir)/libcpu include_libcpudir = $(includedir)/libcpu
include_libcpu_HEADERS = bat.h include_libcpu_HEADERS = bat.h pte121.h
mmu_rel_OBJECTS = $(C_FILES:%.c=$(ARCH)/%.o) $(S_FILES:%.S=$(ARCH)/%.o) mmu_rel_OBJECTS = $(C_FILES:%.c=$(ARCH)/%.o) $(S_FILES:%.S=$(ARCH)/%.o)
@@ -38,6 +38,6 @@ all-local: $(ARCH) $(PREINSTALL_FILES) $(mmu_rel_OBJECTS) $(PGM)
.PRECIOUS: $(PGM) .PRECIOUS: $(PGM)
EXTRA_DIST = bat.c bat.h mmuAsm.S EXTRA_DIST = bat.c bat.h mmuAsm.S pte121.c pte121.h
include $(top_srcdir)/../../../../../automake/local.am include $(top_srcdir)/../../../../../automake/local.am

View File

@@ -55,6 +55,7 @@ void setdbat(int bat_index, unsigned long virt, unsigned long phys,
bat_addrs[bat_index].limit = virt + ((bl + 1) << 17) - 1; bat_addrs[bat_index].limit = virt + ((bl + 1) << 17) - 1;
bat_addrs[bat_index].phys = phys; bat_addrs[bat_index].phys = phys;
switch (bat_index) { switch (bat_index) {
case 0 : asm_setdbat1(bat.word[0], bat.word[1]); break;
case 1 : asm_setdbat1(bat.word[0], bat.word[1]); break; case 1 : asm_setdbat1(bat.word[0], bat.word[1]); break;
case 2 : asm_setdbat2(bat.word[0], bat.word[1]); break; case 2 : asm_setdbat2(bat.word[0], bat.word[1]); break;
case 3 : asm_setdbat3(bat.word[0], bat.word[1]); break; case 3 : asm_setdbat3(bat.word[0], bat.word[1]); break;

View File

@@ -12,55 +12,115 @@
* found in found in the file LICENSE in this distribution or at * found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html. * http://www.OARcorp.com/rtems/license.html.
* *
* T. Straumann - 11/2001: added support for 7400 (no AltiVec yet)
*/ */
#include <asm.h> #include <asm.h>
#include <rtems/score/cpu.h> #include <rtems/score/cpu.h>
#include <libcpu/io.h> #include <libcpu/io.h>
/* Unfortunately, the CPU types defined in cpu.h are
* an 'enum' type and hence not available :-(
*/
#define PPC_601 0x1
#define PPC_603 0x3
#define PPC_604 0x4
#define PPC_603e 0x6
#define PPC_603ev 0x7
#define PPC_750 0x8
#define PPC_604e 0x9
#define PPC_604r 0xA
#define PPC_7400 0xC
#define PPC_620 0x16
#define PPC_860 0x50
#define PPC_821 PPC_860
#define PPC_8260 0x81
/* ALTIVEC instructions (not recognized by off-the shelf gcc yet) */
#define DSSALL .long 0x7e00066c /* DSSALL altivec instruction opcode */
/* A couple of defines to make the code more readable */
#define CACHE_LINE_SIZE 32
#ifndef MSSCR0
#define MSSCR0 1014
#else
#warning MSSCR0 seems to be known, update __FILE__
#endif
#define DL1HWF (1<<(31-8))
#define L2HWF (1<<(31-20))
/* /*
* Each setdbat routine start by invalidating the DBAT as some * Each setdbat routine start by invalidating the DBAT as some
* proc (604e) request the valid bit set to 0 before accepting * proc (604e) request the valid bit set to 0 before accepting
* to write in BAT * to write in BAT
*/ */
.globl asm_setdbat0
.type asm_setdbat0,@function
asm_setdbat0:
li r0,0
sync
isync
mtspr DBAT0U,r0
mtspr DBAT0L,r0
sync
isync
mtspr DBAT0L, r4
mtspr DBAT0U, r3
sync
isync
blr
.globl asm_setdbat1 .globl asm_setdbat1
.type asm_setdbat1,@function .type asm_setdbat1,@function
asm_setdbat1: asm_setdbat1:
li r20,0 li r0,0
SYNC sync
mtspr DBAT1U,r20 isync
mtspr DBAT1L,r20 mtspr DBAT1U,r0
SYNC mtspr DBAT1L,r0
sync
isync
mtspr DBAT1L, r4 mtspr DBAT1L, r4
mtspr DBAT1U, r3 mtspr DBAT1U, r3
SYNC sync
isync
blr blr
.globl asm_setdbat2 .globl asm_setdbat2
.type asm_setdbat2,@function .type asm_setdbat2,@function
asm_setdbat2: asm_setdbat2:
li r20,0 li r0,0
SYNC sync
mtspr DBAT2U,r20 isync
mtspr DBAT2L,r20 mtspr DBAT2U,r0
SYNC mtspr DBAT2L,r0
sync
isync
mtspr DBAT2L, r4 mtspr DBAT2L, r4
mtspr DBAT2U, r3 mtspr DBAT2U, r3
SYNC sync
isync
blr blr
.globl asm_setdbat3 .globl asm_setdbat3
.type asm_setdbat3,@function .type asm_setdbat3,@function
asm_setdbat3: asm_setdbat3:
li r20,0 li r0,0
SYNC sync
mtspr DBAT3U,r20 isync
mtspr DBAT3L,r20 mtspr DBAT3U,r0
SYNC mtspr DBAT3L,r0
sync
isync
mtspr DBAT3L, r4 mtspr DBAT3L, r4
mtspr DBAT3U, r3 mtspr DBAT3U, r3
SYNC sync
isync
blr blr
.globl L1_caches_enables .globl L1_caches_enables
@@ -72,7 +132,7 @@ L1_caches_enables:
*/ */
mfspr r9,PVR mfspr r9,PVR
rlwinm r9,r9,16,16,31 rlwinm r9,r9,16,16,31
cmpi 0,r9,1 cmpi 0,r9,PPC_601
beq 4f /* not needed for 601 */ beq 4f /* not needed for 601 */
mfspr r11,HID0 mfspr r11,HID0
andi. r0,r11,HID0_DCE andi. r0,r11,HID0_DCE
@@ -87,12 +147,19 @@ L1_caches_enables:
mtspr HID0,r11 /* enable caches */ mtspr HID0,r11 /* enable caches */
sync sync
isync isync
cmpi 0,r9,4 /* check for 604 */ cmpi 0,r9,PPC_604 /* check for 604 */
cmpi 1,r9,9 /* or 604e */ cmpi 1,r9,PPC_604e /* or 604e */
cmpi 2,r9,10 /* or mach5 */ cmpi 2,r9,PPC_604r /* or mach5 */
cror 2,2,6 cror 2,2,6
cror 2,2,10 cror 2,2,10
cmpi 1,r9,PPC_750 /* or 750 */
cror 2,2,6
cmpi 1,r9,PPC_7400 /* or 7400 */
bne 3f
ori r11,r11,HID0_BTIC /* enable branch tgt cache on 7400 */
3: cror 2,2,6
bne 4f bne 4f
/* on 7400 SIED is actually SGE (store gathering enable) */
ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */ ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */
bne 2,5f bne 2,5f
ori r11,r11,HID0_BTCD ori r11,r11,HID0_BTCD
@@ -103,13 +170,17 @@ L1_caches_enables:
.globl get_L2CR .globl get_L2CR
.type get_L2CR, @function .type get_L2CR, @function
get_L2CR: get_L2CR:
/* Make sure this is a 750 chip */ /* Make sure this is a > 750 chip */
mfspr r3,PVR mfspr r3,PVR
rlwinm r3,r3,16,16,31 rlwinm r3,r3,16,16,31
cmplwi r3,0x0008 cmplwi r3,PPC_750 /* it's a 750 */
beq 1f
cmplwi r3,PPC_7400 /* it's a 7400 */
beq 1f
li r3,0 li r3,0
bnelr blr
1:
/* Return the L2CR contents */ /* Return the L2CR contents */
mfspr r3,L2CR mfspr r3,L2CR
blr blr
@@ -146,10 +217,12 @@ set_L2CR:
*the L2 cache instead of to main memory. *the L2 cache instead of to main memory.
*/ */
/* Make sure this is a 750 chip */ /* Make sure this is a > 750 chip */
mfspr r4,PVR mfspr r0,PVR
rlwinm r4,r4,16,16,31 rlwinm r0,r0,16,16,31
cmplwi r4,0x0008 cmplwi r0,PPC_750
beq thisIs750
cmplwi r0,PPC_7400
beq thisIs750 beq thisIs750
li r3,-1 li r3,-1
blr blr
@@ -161,53 +234,74 @@ thisIs750:
/* See if we want to perform a global inval this time. */ /* See if we want to perform a global inval this time. */
rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */ rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */ rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */ rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
rlwinm r3,r3,0,1,31 /* Turn off the enable bit */ rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
or r3,r3,r4 /* Keep the enable bit the same as it was for now. */ or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */ mfmsr r7 /* shut off interrupts around critical flush/invalidate sections */
rlwinm r4,r7,0,17,15 /* Turn off EE bit - an external exception while we are flushing
the cache is fatal (comment this line and see!) */
mtmsr r4
bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
cmplwi r0,PPC_7400 /* > 7400 ? */
bne disableCache /* use traditional method */
/* On the 7400, they recommend using the hardware flush feature */
DSSALL /* stop all data streams */
sync
/* we wouldn't have to flush L1, but for sake of consistency with the other code we do it anyway */
mfspr r4, MSSCR0
oris r4, r4, DL1HWF@h
mtspr MSSCR0, r4
sync
/* L1 flushed */
mfspr r4, L2CR
ori r4, r4, L2HWF
mtspr L2CR, r4
sync
/* L2 flushed */
b flushDone
disableCache: disableCache:
/* Disable the cache. First, we turn off data relocation. */ /* Disable the cache. First, we turn off data relocation. */
mfmsr r7 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
rlwinm r4,r7,0,28,26 /* Turn off DR bit */
rlwinm r4,r4,0,17,15 /* Turn off EE bit - an external exception while we are flushing
the cache is fatal (comment this line and see!) */
sync
mtmsr r4 mtmsr r4
sync isync /* make sure memory accesses have completed */
/* /*
Now, read the first 2MB of memory to put new data in the cache. Now, read the first 2MB of memory to put new data in the cache.
(Actually we only need the size of the L2 cache plus (Actually we only need the size of the L2 cache plus
the size of the L1 cache, but 2MB will cover everything just to be safe). the size of the L1 cache, but 2MB will cover everything just to be safe).
*/ */
lis r4,0x0001 lis r4,0x0001
mtctr r4 mtctr r4
li r4,0 li r4,0
loadLoop: loadLoop:
lwzx r0,r0,r4 lwzx r0,r0,r4
addi r4,r4,0x0020 /* Go to start of next cache line */ addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
bdnz loadLoop bdnz loadLoop
/* Now, flush the first 2MB of memory */ /* Now, flush the first 2MB of memory */
lis r4,0x0001 lis r4,0x0001
mtctr r4 mtctr r4
li r4,0 li r4,0
sync sync
flushLoop: flushLoop:
dcbf r0,r4 dcbf r0,r4
addi r4,r4,0x0020 /* Go to start of next cache line */ addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
bdnz flushLoop bdnz flushLoop
sync
rlwinm r4,r7,0,17,15 /* still mask EE but reenable data relocation */
mtmsr r4
isync
flushDone:
/* Turn off the L2CR enable bit. */ /* Turn off the L2CR enable bit. */
rlwinm r3,r3,0,1,31 rlwinm r3,r3,0,1,31
/* Reenable data relocation. */
sync
mtmsr r7
sync
dontDisableCache: dontDisableCache:
/* Set up the L2CR configuration bits */ /* Set up the L2CR configuration bits */
sync sync
@@ -219,10 +313,10 @@ dontDisableCache:
/* Perform a global invalidation */ /* Perform a global invalidation */
oris r3,r3,0x0020 oris r3,r3,0x0020
sync sync
mtspr 1017,r3 mtspr L2CR,r3
sync sync
invalCompleteLoop: /* Wait for the invalidation to complete */ invalCompleteLoop: /* Wait for the invalidation to complete */
mfspr r3,1017 mfspr r3,L2CR
rlwinm. r4,r3,0,31,31 rlwinm. r4,r3,0,31,31
bne invalCompleteLoop bne invalCompleteLoop
@@ -232,6 +326,8 @@ invalCompleteLoop: /* Wait for the invalidation to complete */
sync sync
noInval: noInval:
/* re-enable interrupts, i.e. restore original MSR */
mtmsr r7 /* (no sync needed) */
/* See if we need to enable the cache */ /* See if we need to enable the cache */
cmplwi r5,0 cmplwi r5,0
beqlr beqlr

View File

@@ -26,10 +26,29 @@ SPR_RO(PVR)
ppc_cpu_id_t current_ppc_cpu = PPC_UNKNOWN; ppc_cpu_id_t current_ppc_cpu = PPC_UNKNOWN;
ppc_cpu_revision_t current_ppc_revision = 0xff; ppc_cpu_revision_t current_ppc_revision = 0xff;
char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu)
{
switch (cpu) {
case PPC_601: return "MPC601";
case PPC_603: return "MPC603";
case PPC_603ev: return "MPC603ev";
case PPC_604: return "MPC604";
case PPC_750: return "MPC750";
case PPC_7400: return "MPC7400";
case PPC_604e: return "MPC604e";
case PPC_604r: return "MPC604r";
case PPC_620: return "MPC620";
case PPC_860: return "MPC860";
case PPC_8260: return "MPC8260";
default:
printk("Unknown CPU value of 0x%x. Please add it to <libcpu/powerpc/shared/cpu.h>\n", cpu );
}
return "UNKNOWN";
}
ppc_cpu_id_t get_ppc_cpu_type() ppc_cpu_id_t get_ppc_cpu_type()
{ {
unsigned int pvr = (_read_PVR() >> 16); unsigned int pvr = (_read_PVR() >> 16);
current_ppc_cpu = (ppc_cpu_id_t) pvr; current_ppc_cpu = (ppc_cpu_id_t) pvr;
switch (pvr) { switch (pvr) {
case PPC_601: case PPC_601:
@@ -37,19 +56,19 @@ ppc_cpu_id_t get_ppc_cpu_type()
case PPC_603ev: case PPC_603ev:
case PPC_604: case PPC_604:
case PPC_750: case PPC_750:
case PPC_7400:
case PPC_604e: case PPC_604e:
case PPC_604r: case PPC_604r:
case PPC_620: case PPC_620:
case PPC_860: case PPC_860:
case PPC_8260: case PPC_8260:
current_ppc_cpu = (ppc_cpu_id_t) pvr;
return current_ppc_cpu; return current_ppc_cpu;
default: default:
printk("Unknown PVR value of 0x%x. Please add it to <libcpu/powerpc/shared/cpu.h>\n", pvr ); printk("Unknown PVR value of 0x%x. Please add it to <libcpu/powerpc/shared/cpu.h>\n", pvr );
return PPC_UNKNOWN; return PPC_UNKNOWN;
} }
} }
ppc_cpu_revision_t get_ppc_cpu_revision() ppc_cpu_revision_t get_ppc_cpu_revision()
{ {
ppc_cpu_revision_t rev = (ppc_cpu_revision_t) (_read_PVR() & 0xffff); ppc_cpu_revision_t rev = (ppc_cpu_revision_t) (_read_PVR() & 0xffff);

View File

@@ -27,6 +27,7 @@ typedef enum
PPC_750 = 0x8, PPC_750 = 0x8,
PPC_604e = 0x9, PPC_604e = 0x9,
PPC_604r = 0xA, PPC_604r = 0xA,
PPC_7400 = 0xA,
PPC_620 = 0x16, PPC_620 = 0x16,
PPC_860 = 0x50, PPC_860 = 0x50,
PPC_821 = PPC_860, PPC_821 = PPC_860,
@@ -38,6 +39,7 @@ typedef unsigned short ppc_cpu_revision_t;
extern ppc_cpu_id_t get_ppc_cpu_type (); extern ppc_cpu_id_t get_ppc_cpu_type ();
extern ppc_cpu_id_t current_ppc_cpu; extern ppc_cpu_id_t current_ppc_cpu;
extern char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu);
extern ppc_cpu_revision_t get_ppc_cpu_revision (); extern ppc_cpu_revision_t get_ppc_cpu_revision ();
extern ppc_cpu_revision_t current_ppc_revision; extern ppc_cpu_revision_t current_ppc_revision;
#endif /* ASM */ #endif /* ASM */