bsps/powerpc: Move bootloader to bsps

This bootloader is only used by the motorola_powerpc BSP.

This patch is a part of the BSP source reorganization.

Update #3285.
This commit is contained in:
Sebastian Huber
2018-04-24 07:06:36 +02:00
parent 0180acf23f
commit 03e1d8378e
22 changed files with 16 additions and 15 deletions

View File

@@ -54,7 +54,7 @@ librtemsbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/rtc/todcfg.c
librtemsbsp_a_SOURCES +=../../../../../../bsps/powerpc/shared/clock/p_clock.c
# console
librtemsbsp_a_SOURCES += ../../../../../../bsps/powerpc/motorola_powerpc/dev/polled_io.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/powerpc/motorola_powerpc/console/polled_io.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/console/uart.c
librtemsbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/console/console.c

View File

@@ -5,16 +5,17 @@ AUTOMAKE_OPTIONS = no-exeext
noinst_PROGRAMS = bootloader
## IMPORTANT: head.S must be first, T. Straumann 12/17/2001
bootloader_SOURCES = ../../../powerpc/shared/bootloader/head.S \
../../../powerpc/shared/bootloader/exception.S \
../../../powerpc/shared/bootloader/em86real.S \
../../../powerpc/shared/bootloader/misc.c \
../../../powerpc/shared/bootloader/pci.c \
../../../powerpc/shared/bootloader/zlib.c \
../../../powerpc/shared/bootloader/mm.c \
../../../powerpc/shared/bootloader/em86.c \
../../../../../../../bsps/powerpc/motorola_powerpc/dev/polled_io.c \
../../../powerpc/shared/bootloader/lib.c
bootloader_SOURCES =
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/head.S
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/exception.S
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/em86real.S
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/misc.c
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/pci.c
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/zlib.c
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/mm.c
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/em86.c
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/console/polled_io.c
bootloader_SOURCES += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/lib.c
# ----
# Remove references to EABI when compiling bootloader
@@ -43,7 +44,7 @@ bootloader.$(OBJEXT): bootloader$(EXEEXT)
cp $< $@
project_lib_DATA = bootloader.$(OBJEXT)
project_lib_DATA += ../../shared/bootloader/ppcboot.lds
project_lib_DATA += ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/ppcboot.lds
all-local: $(TMPINSTALL_FILES)
@@ -58,7 +59,7 @@ $(PROJECT_LIB)/bootloader.$(OBJEXT): bootloader.$(OBJEXT) $(PROJECT_LIB)/$(dirst
$(INSTALL_DATA) $< $(PROJECT_LIB)/bootloader.$(OBJEXT)
TMPINSTALL_FILES += $(PROJECT_LIB)/bootloader.$(OBJEXT)
$(PROJECT_LIB)/ppcboot.lds: ../../shared/bootloader/ppcboot.lds $(PROJECT_LIB)/$(dirstamp)
$(PROJECT_LIB)/ppcboot.lds: ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/ppcboot.lds $(PROJECT_LIB)/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_LIB)/ppcboot.lds
TMPINSTALL_FILES += $(PROJECT_LIB)/ppcboot.lds

View File

@@ -12,10 +12,10 @@ project_lib_DATA += qemu_fakerom.bin
## IMPORTANT: head.S must be first, T. Straumann 12/17/2001
qemu_fakerom.$(OBJEXT): ../../../powerpc/shared/bootloader/qemu_fakerom.S
qemu_fakerom.$(OBJEXT): ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/qemu_fakerom.S
$(CPPASCOMPILE) -c -o $@ $^
qemu_fakeres.$(OBJEXT): ../../../powerpc/shared/bootloader/qemu_fakeres.c
qemu_fakeres.$(OBJEXT): ../../../../../../../bsps/powerpc/motorola_powerpc/bootloader/qemu_fakeres.c
$(CC) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o $@ $^
qemu_fakerom.elf: qemu_fakerom.$(OBJEXT) qemu_fakeres.$(OBJEXT)

View File

@@ -1,45 +0,0 @@
The code in this directory has been taken WITH PERMISSION from
Gabriel Paubert, paubert@iram.es. The main reason for having
a separate bootloader for PreP compliant firmware is that the
initial code is relocated by firmware at an unknow address
(actually 0x5000 on motorola MCP750) and that as Gabriel I
think having a relocatable bootloder code is a must.
So the way of building a binary executable that can be booted via
hard disk or network boot goes like this :
- make a RTEMS executable,
- put is as data section in the bootloder binary,
- relink the loader (see make-exe macros for details),
I would like to thank Gabriel for his support and his code.
The original code can be found in form of a patch to official linux
kernel at (I insist not vger ppc kernel or Imac ppc kernels!!) :
<ftp://vlab1.iram.es/pub/linux-2.2/>
After applying the patch, the code is located in a new directory
called prepboot.
(NB : note use ftp not netscape...)
Note that the actual code differs a lot since Gabriel choose to use
a CHRP compliant mapping instead of a Prep Mapping to save
BATs. I had no time to upgrade the code to its new one allthough
I agree it should be done...
I have also splitted the original code to have a more modular
design enabling to reuse code between the loader and RTEMS
initialization (e.g printk, ...).
Eric Valette (valette@crf.canon.fr)
**************************************************
2003/5/7, Greg Menke, gregory.menke@gsfc.nasa.gov
Reworked the pci bus 0 initialization a little and added support for
configuring an arbitrary number of other busses & their respective
bridges. Also added support for configuring IO ranges below 0x10000,
which I think is reasonable given this is a PowerPC bsp.

View File

@@ -1,268 +0,0 @@
/*
* bootldr.h -- Include file for bootloader.
*/
/*
* Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifndef _PPC_BOOTLDR_H
#define _PPC_BOOTLDR_H
#ifndef ASM
#include <stdint.h>
#include <bsp/residual.h>
#include <bsp/consoleIo.h>
#include "pci.h"
#define abs __builtin_abs
#define PTE_REFD 0x100
#define PTE_CHNG (0x80|PTE_REFD) /* Modified implies referenced */
#define PTE_WTHR 0x040
#define PTE_CINH 0x020
#define PTE_COHER 0x010
#define PTE_GUAR 0x008
#define PTE_RO 0x003
#define PTE_RW 0x002
#define PTE_RAM (PTE_CHNG|PTE_COHER|PTE_RW)
#define PTE_ROM (PTE_REFD|PTE_RO)
#define PTE_IO (PTE_CHNG|PTE_CINH|PTE_GUAR|PTE_RW)
typedef struct {}opaque;
/* The context passed during MMU interrupts. */
typedef struct _ctxt {
u_long lr, ctr;
u_int cr, xer;
u_long nip, msr;
u_long regs[32];
} ctxt;
/* The main structure which is pointed to permanently by r13. Things
* are not separated very well between parts because it would cause
* too much code bloat for such a simple program like the bootloader.
* The code is designed to be compiled with the -m relocatable option and
* tries to minimize the number of relocations/fixups and the number of
* functions who have to access the .got2 sections (this increases the
* size of the prologue in every function).
*/
typedef struct _boot_data {
RESIDUAL *residual;
void *load_address;
void *of_entry;
void *r6, *r7, *r8, *r9, *r10;
u_long cache_lsize;
void *image; /* Where to copy ourselves */
void *stack;
void *mover; /* where to copy codemove to avoid overlays */
u_long o_msr, o_hid0, o_r31;
opaque * mm_private;
const struct pci_bootloader_config_access_functions* pci_functions;
opaque * pci_private;
struct pci_dev * pci_devices;
opaque * v86_private;
char cmd_line[256];
} boot_data;
register boot_data *bd __asm__("r13");
static inline int
pcibios_read_config_byte(u_char bus, u_char dev_fn,
u_char where, uint8_t *val) {
return bd->pci_functions->read_config_byte(bus, dev_fn, where, val);
}
static inline int
pcibios_read_config_word(u_char bus, u_char dev_fn,
u_char where, uint16_t *val) {
return bd->pci_functions->read_config_word(bus, dev_fn, where, val);
}
static inline int
pcibios_read_config_dword(u_char bus, u_char dev_fn,
u_char where, uint32_t *val) {
return bd->pci_functions->read_config_dword(bus, dev_fn, where, val);
}
static inline int
pcibios_write_config_byte(u_char bus, u_char dev_fn,
u_char where, uint8_t val) {
return bd->pci_functions->write_config_byte(bus, dev_fn, where, val);
}
static inline int
pcibios_write_config_word(u_char bus, u_char dev_fn,
u_char where, uint16_t val) {
return bd->pci_functions->write_config_word(bus, dev_fn, where, val);
}
static inline int
pcibios_write_config_dword(u_char bus, u_char dev_fn,
u_char where, uint32_t val) {
return bd->pci_functions->write_config_dword(bus, dev_fn, where, val);
}
static inline int
pci_bootloader_read_config_byte(struct pci_dev *dev, u_char where, uint8_t *val) {
return bd->pci_functions->read_config_byte(dev->bus->number,
dev->devfn,
where, val);
}
static inline int
pci_bootloader_read_config_word(struct pci_dev *dev, u_char where, uint16_t *val) {
return bd->pci_functions->read_config_word(dev->bus->number,
dev->devfn,
where, val);
}
static inline int
pci_bootloader_read_config_dword(struct pci_dev *dev, u_char where, uint32_t *val) {
return bd->pci_functions->read_config_dword(dev->bus->number,
dev->devfn,
where, val);
}
static inline int
pci_bootloader_write_config_byte(struct pci_dev *dev, u_char where, uint8_t val) {
return bd->pci_functions->write_config_byte(dev->bus->number,
dev->devfn,
where, val);
}
static inline int
pci_bootloader_write_config_word(struct pci_dev *dev, u_char where, uint16_t val) {
return bd->pci_functions->write_config_word(dev->bus->number,
dev->devfn,
where, val);
}
static inline int
pci_bootloader_write_config_dword(struct pci_dev *dev, u_char where, uint32_t val) {
return bd->pci_functions->write_config_dword(dev->bus->number,
dev->devfn,
where, val);
}
/* codemove is like memmove, but it also gets the cache line size
* as 4th parameter to synchronize them. If this last parameter is
* zero, it performs more or less like memmove. No copy is performed if
* source and destination addresses are equal. However the caches
* are synchronized. Note that the size is always rounded up to the
* next mutiple of 4.
*/
extern void * codemove(void *, const void *, size_t, unsigned long);
/* The physical memory allocator allows to align memory by
* powers of 2 given by the lower order bits of flags.
* By default it allocates from higher addresses towrds lower ones,
* setting PA_LOW reverses this behaviour.
*/
#define palloc(size) __palloc(size,0)
#define isa_io_base (bd->io_base)
void * __palloc(u_long, int);
void pfree(void *);
#define PA_LOW 0x100
#define PA_PERM 0x200 /* Not freeable by pfree */
#define PA_SUBALLOC 0x400 /* Allocate for suballocation by salloc */
#define PA_ALIGN_MASK 0x1f
void * valloc(u_long size);
void vfree(void *);
int vmap(void *, u_long, u_long);
void vunmap(void *);
void * salloc(u_long size);
void sfree(void *);
void pci_init(void);
void * memset(void *p, int c, size_t n);
void gunzip(void *, int, unsigned char *, int *);
void print_all_maps(const char *);
void print_hash_table(void);
void MMUon(void);
void MMUoff(void);
void hang(const char *, u_long, ctxt *) __attribute__((noreturn));
int init_v86(void);
void cleanup_v86_mess(void);
void em86_main(struct pci_dev *);
int find_max_mem(struct pci_dev *);
/*
* Prototypes for calls from assembly and across files.
*/
typedef struct _x86 x86;
int em86_trap(x86 *p);
void decompress_kernel(int kernel_size, void * zimage_start, int len,
void * initrd_start, int initrd_len );
void boot_udelay(uint32_t _microseconds);
void setup_hw(void);
void _handler(int vec, ctxt *p);
int early_setup(u_long image_size);
void mm_init(u_long image_size);
#endif
#ifdef ASM
/* These definitions simplify the ugly declarations necessary for
* GOT definitions.
*/
#define GOT_ENTRY(NAME) .L_ ## NAME = . - .LCTOC1 ; .long NAME
#define GOT(NAME) .L_ ## NAME (r30)
#define START_GOT \
.section ".got2","aw"; \
.LCTOC1 = .+ 0x8000
#define END_GOT \
.text
#define GET_GOT \
bl 1f; \
.text 2; \
0: .long .LCTOC1-1f; \
.text ; \
1: mflr r30; \
lwz r0,0b-1b(r30); \
add r30,r0,r30
#define bd r13
#define cache_lsize 32 /* Offset into bd area */
#define image 36
#define stack 40
#define mover 44
#define o_msr 48
#define o_hid0 52
#define o_r31 56
/* Stack offsets for saved registers on exceptions */
#define save_lr 8(r1)
#define save_ctr 12(r1)
#define save_cr 16(r1)
#define save_xer 20(r1)
#define save_nip 24(r1)
#define save_msr 28(r1)
#define save_r(n) 32+4*n(r1)
#endif
#endif

View File

@@ -1,574 +0,0 @@
/*
* em86.c -- Include file for bootloader.
*/
/*
* Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
/*****************************************************************************
*
* Code to interpret Video BIOS ROM routines.
*
*
******************************************************************************/
/* These include are for the development version only */
#include <sys/types.h>
#include "pci.h"
#include <libcpu/byteorder.h>
#ifdef __BOOT__
#include "bootldr.h"
#include <limits.h>
#include <rtems/bspIo.h>
#endif
/* Code options, put them on the compiler command line */
/* #define EIP_STATS */ /* EIP based profiling */
/* #undef EIP_STATS */
typedef union _reg_type1 {
unsigned e;
unsigned short x;
struct {
unsigned char l, h;
} lh;
} reg_type1;
typedef union _reg_type2 {
uint32_t e;
uint16_t x;
} reg_type2;
typedef struct _x86 {
reg_type1
_eax, _ecx, _edx, _ebx;
reg_type2
_esp, _ebp, _esi, _edi;
unsigned
es, cs, ss, ds, fs, gs, eip, eflags;
unsigned char
*esbase, *csbase, *ssbase, *dsbase, *fsbase, *gsbase;
volatile unsigned char *iobase;
unsigned char *ioperm;
unsigned
reason, nexteip, parm1, parm2, opcode, base;
unsigned *optable, opreg; /* no more used! */
unsigned char* vbase;
unsigned instructions;
#ifdef __BOOT__
u_char * ram;
u_char * rom;
struct pci_dev * dev;
#else
unsigned filler[14]; /* Skip to next 64 byte boundary */
unsigned eipstats[32768][2];
#endif
} x86;
x86 v86_private __attribute__((aligned(32)));
/* Emulator is in another source file */
extern
void em86_enter(x86 * p);
#define EAX (p->_eax.e)
#define ECX (p->_ecx.e)
#define EDX (p->_edx.e)
#define EBX (p->_ebx.e)
#define ESP (p->_esp.e)
#define EBP (p->_ebp.e)
#define ESI (p->_esi.e)
#define EDI (p->_edi.e)
#define AX (p->_eax.x)
#define CX (p->_ecx.x)
#define DX (p->_edx.x)
#define BX (p->_ebx.x)
#define SP (p->_esp.x)
#define BP (p->_ebp.x)
#define SI (p->_esi.x)
#define DI (p->_edi.x)
#define AL (p->_eax.lh.l)
#define CL (p->_ecx.lh.l)
#define DL (p->_edx.lh.l)
#define BL (p->_ebx.lh.l)
#define AH (p->_eax.lh.h)
#define CH (p->_ecx.lh.h)
#define DH (p->_edx.lh.h)
#define BH (p->_ebx.lh.h)
/* Function used to debug */
#ifdef __BOOT__
#define printf printk
#endif
#ifdef DEBUG
static void dump86(x86 * p){
unsigned char *s = p->csbase + p->eip;
printf("cs:eip=%04x:%08x, eax=%08x, ecx=%08x, edx=%08x, ebx=%08x\n",
p->cs, p->eip, ld_le32(&EAX),
ld_le32(&ECX), ld_le32(&EDX), ld_le32(&EBX));
printf("ss:esp=%04x:%08x, ebp=%08x, esi=%08x, edi=%08x, efl=%08x\n",
p->ss, ld_le32(&ESP), ld_le32(&EBP),
ld_le32(&ESI), ld_le32(&EDI), p->eflags);
printf("nip=%08x, ds=%04x, es=%04x, fs=%04x, gs=%04x, total=%d\n",
p->nexteip, p->ds, p->es, p->fs, p->gs, p->instructions);
printf("code: %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x\n",
s[0], s[1], s[2], s[3], s[4], s[5],
s[6], s[7], s[8], s[9], s[10], s[11]);
#ifndef __BOOT__
printf("op1=%08x, op2=%08x, result=%08x, flags=%08x\n",
p->filler[11], p->filler[12], p->filler[13], p->filler[14]);
#endif
}
#else
#define dump86(x)
#endif
static int bios86pci(x86 * p) {
unsigned reg=ld_le16(&DI);
reg_type2 tmp;
if (AL>=8 && AL<=13 && reg>0xff) {
AH = PCIBIOS_BAD_REGISTER_NUMBER;
} else {
switch(AL) {
case 2: /* find_device */
/* Should be improved for BIOS able to handle
* multiple devices. We simply suppose the BIOS
* inits a single device, and return an error
* if it tries to find more...
*/
if (SI) {
AH=PCIBIOS_DEVICE_NOT_FOUND;
} else {
BH = p->dev->bus->number;
BL = p->dev->devfn;
AH = 0;
}
break;
/*
case 3: find_class not implemented for now.
*/
case 8: /* read_config_byte */
AH=pcibios_read_config_byte(BH, BL, reg, &CL);
break;
case 9: /* read_config_word */
AH=pcibios_read_config_word(BH, BL, reg, &tmp.x);
CX=ld_le16(&tmp.x);
break;
case 10: /* read_config_dword */
AH=pcibios_read_config_dword(BH, BL, reg, &tmp.e);
ECX=ld_le32(&tmp.e);
break;
case 11: /* write_config_byte */
AH=pcibios_write_config_byte(BH, BL, reg, CL);
break;
case 12: /* write_config_word */
AH=pcibios_write_config_word(BH, BL, reg, ld_le16(&CX));
break;
case 13: /* write_config_dword */
AH=pcibios_write_config_dword(
BH, BL, reg, ld_le32((uint32_t *)&ECX));
break;
default:
printf("Unimplemented or illegal PCI service call #%d!\n",
AL);
return 1;
}
}
p->eip = p->nexteip;
/* Set/clear carry according to result */
if (AH) p->eflags |= 1; else p->eflags &=~1;
return 0;
}
static void push2(x86 *p, unsigned value) {
unsigned char * sbase= p->ssbase;
unsigned newsp = (ld_le16(&SP)-2)&0xffff;
st_le16(&SP,newsp);
st_le16((unsigned short *)(sbase+newsp), value);
}
static unsigned pop2(x86 *p) {
unsigned char * sbase=p->ssbase;
unsigned oldsp = ld_le16(&SP);
st_le16(&SP,oldsp+2);
return ld_le16((unsigned short *)(sbase+oldsp));
}
static int int10h(x86 * p) { /* Process BIOS video interrupt */
unsigned vector;
vector=ld_le32((uint32_t *)p->vbase+0x10);
if (((vector&0xffff0000)>>16)==0xc000) {
push2(p, p->eflags);
push2(p, p->cs);
push2(p, p->nexteip);
p->cs=vector>>16;
p->csbase=p->vbase + (p->cs<<4);
p->eip=vector&0xffff;
#if 1
p->eflags&=0xfcff; /* Clear AC/TF/IF */
#else
p->eflags = (p->eflags&0xfcff)|0x100; /* Set TF for debugging */
#endif
/* p->eflags|=0x100; uncomment to force a trap */
return(0);
} else {
switch(AH) {
case 0x12:
switch(BL){
case 0x32:
p->eip=p->nexteip;
return(0);
break;
default:
break;
}
default:
break;
}
printf("unhandled soft interrupt 0x10: vector=%x\n", vector);
return(1);
}
}
static int process_softint(x86 * p) {
#if 0
if (p->parm1!=0x10 || AH!=0x0e) {
printf("Soft interrupt\n");
dump86(p);
}
#endif
switch(p->parm1) {
case 0x10: /* BIOS video interrupt */
return int10h(p);
case 0x1a:
if(AH==0xb1) return bios86pci(p);
break;
default:
break;
}
dump86(p);
printf("Unhandled soft interrupt number 0x%04x, AX=0x%04x\n",
p->parm1, ld_le16(&AX));
return(1);
}
/* The only function called back by the emulator is em86_trap, all
instructions may that change the code segment are trapped here.
p->reason is one of the following codes. */
#define code_zerdiv 0
#define code_trap 1
#define code_int3 3
#define code_into 4
#define code_bound 5
#define code_ud 6
#define code_dna 7
#define code_iretw 256
#define code_iretl 257
#define code_lcallw 258
#define code_lcalll 259
#define code_ljmpw 260
#define code_ljmpl 261
#define code_lretw 262
#define code_lretl 263
#define code_softint 264
#define code_lock 265 /* Lock prefix */
/* Codes 1024 to 2047 are used for I/O port access instructions:
- The three LSB define the port size (1, 2 or 4)
- bit of weight 512 means out if set, in if clear
- bit of weight 256 means ins/outs if set, in/out if clear
- bit of weight 128 means use esi/edi if set, si/di if clear
(only used for ins/outs instructions, always clear for in/out)
*/
#define code_inb 1024+1
#define code_inw 1024+2
#define code_inl 1024+4
#define code_outb 1024+512+1
#define code_outw 1024+512+2
#define code_outl 1024+512+4
#define code_insb_a16 1024+256+1
#define code_insw_a16 1024+256+2
#define code_insl_a16 1024+256+4
#define code_outsb_a16 1024+512+256+1
#define code_outsw_a16 1024+512+256+2
#define code_outsl_a16 1024+512+256+4
#define code_insb_a32 1024+256+128+1
#define code_insw_a32 1024+256+128+2
#define code_insl_a32 1024+256+128+4
#define code_outsb_a32 1024+512+256+128+1
#define code_outsw_a32 1024+512+256+128+2
#define code_outsl_a32 1024+512+256+128+4
int em86_trap(x86 *p) {
#ifndef __BOOT__
int i;
unsigned char command[80];
unsigned char *verb, *t;
unsigned short *fp;
static unsigned char def=0;
static unsigned char * bptaddr=NULL; /* Breakpoint address */
static unsigned char bptopc; /* Replaced breakpoint opcode */
unsigned char cmd;
unsigned tmp;
#endif
switch(p->reason) {
case code_int3:
#ifndef __BOOT__
if(p->csbase+p->eip == bptaddr) {
*bptaddr=bptopc;
bptaddr=NULL;
}
else printf("Unexpected ");
#endif
printf("Breakpoint Interrupt !\n");
/* Note that this fallthrough (no break;) is on purpose */
#ifdef __BOOT__
return 0;
#else
case code_trap:
dump86(p);
for(;;) {
printf("b(reakpoint, g(o, q(uit, s(tack, t(race ? [%c] ", def);
fgets(command,sizeof(command),stdin);
verb = strtok(command," \n");
if(verb) cmd=*verb; else cmd=def;
def=0;
switch(cmd) {
case 'b':
case 'B':
if(bptaddr) *bptaddr=bptopc;
t=strtok(0," \n");
i=sscanf(t,"%x",&tmp);
if(i==1) {
bptaddr=p->vbase + tmp;
bptopc=*bptaddr;
*bptaddr=0xcc;
} else bptaddr=NULL;
break;
case 'q':
case 'Q':
return 1;
break;
case 'g':
case 'G':
p->eflags &= ~0x100;
return 0;
break;
case 's':
case 'S': /* Print the 8 stack top words */
fp = (unsigned short *)(p->ssbase+ld_le16(&SP));
printf("Stack [%04x:%04x]: %04x %04x %04x %04x %04x %04x %04x %04x\n",
p->ss, ld_le16(&SP),
ld_le16(fp+0), ld_le16(fp+1), ld_le16(fp+2), ld_le16(fp+3),
ld_le16(fp+4), ld_le16(fp+5), ld_le16(fp+6), ld_le16(fp+7));
break;
case 't':
case 'T':
p->eflags |= 0x10100; /* Set the resume and trap flags */
def='t';
return 0;
break;
/* Should add some code to edit registers */
}
}
#endif
break;
case code_ud:
printf("Attempt to execute an unimplemented"
"or undefined opcode!\n");
dump86(p);
return(1); /* exit interpreter */
break;
case code_dna:
printf("Attempt to execute a floating point instruction!\n");
dump86(p);
return(1);
break;
case code_softint:
return process_softint(p);
break;
case code_iretw:
p->eip=pop2(p);
p->cs=pop2(p);
p->csbase=p->vbase + (p->cs<<4);
p->eflags= (p->eflags&0xfffe0000)|pop2(p);
/* p->eflags|= 0x100; */ /* Uncomment to trap after iretws */
return(0);
break;
#ifndef __BOOT__
case code_inb:
case code_inw:
case code_inl:
case code_insb_a16:
case code_insw_a16:
case code_insl_a16:
case code_insb_a32:
case code_insw_a32:
case code_insl_a32:
case code_outb:
case code_outw:
case code_outl:
case code_outsb_a16:
case code_outsw_a16:
case code_outsl_a16:
case code_outsb_a32:
case code_outsw_a32:
case code_outsl_a32:
/* For now we simply enable I/O to the ports and continue */
for(i=p->parm1; i<p->parm1+(p->reason&7); i++) {
p->ioperm[i/8] &= ~(1<<i%8);
}
printf("Access to ports %04x-%04x enabled.\n",
p->parm1, p->parm1+(p->reason&7)-1);
return(0);
#endif
case code_lretw:
/* Check for the exit eyecatcher */
if ( *(u_int *)(p->ssbase+ld_le16(&SP)) == UINT_MAX) return 1;
/* No break on purpose */
default:
dump86(p);
printf("em86_trap called with unhandled reason code !\n");
return(1);
}
}
void cleanup_v86_mess(void) {
x86 *p = (x86 *) bd->v86_private;
/* This automatically removes the mappings ! */
vfree(p->vbase);
p->vbase = 0;
pfree(p->ram);
p->ram = 0;
sfree(p->ioperm);
p->ioperm=0;
}
int init_v86(void) {
x86 *p = (x86 *) bd->v86_private;
/* p->vbase is non null when the v86 is properly set-up */
if (p->vbase) return 0;
/* Set everything to 0 */
memset(p, 0, sizeof(*p));
p->ioperm = salloc(65536/8+1);
p->ram = palloc(0xa0000);
p->iobase = ptr_mem_map->io_base;
if (!p->ram || !p->ioperm) return 1;
/* The ioperm array must have an additional byte at the end ! */
p->ioperm[65536/8] = 0xff;
p->vbase = valloc(0x110000);
if (!p->vbase) return 1;
/* These calls should never fail. */
vmap(p->vbase, (u_long)p->ram|PTE_RAM, 0xa0000);
vmap(p->vbase+0x100000, (u_long)p->ram|PTE_RAM, 0x10000);
vmap(p->vbase+0xa0000,
((u_long)ptr_mem_map->isa_mem_base+0xa0000)|PTE_IO, 0x20000);
return 0;
}
void em86_main(struct pci_dev *dev){
x86 *p = (x86 *) bd->v86_private;
u_short signature;
u_char length;
volatile u_int *src;
u_int *dst, left;
uint32_t saved_rom;
#if defined(MONITOR_IO) && !defined(__BOOT__)
#define IOMASK 0xff
#else
#define IOMASK 0
#endif
#ifndef __BOOT__
int i;
/* Allow or disable access to all ports */
for(i=0; i<65536/8; i++) p->ioperm[i]=IOMASK;
p->ioperm[i] = 0xff; /* Last unused byte must have this value */
#endif
p->dev = dev;
memset(p->vbase, 0, 0xa0000);
/* Set up a few registers */
p->cs = 0xc000; p->csbase = p->vbase + 0xc0000;
p->ss = 0x1000; p->ssbase = p->vbase + 0x10000;
p->eflags=0x200;
st_le16(&SP,0xfffc); p->eip=3;
p->dsbase = p->esbase = p->fsbase = p->gsbase = p->vbase;
/* Follow the PCI BIOS specification */
AH=dev->bus->number;
AL=dev->devfn;
/* All other registers are irrelevant except ES:DI which
* should point to a PnP installation check block. This
* is not yet implemented due to lack of references. */
/* Store a return address of 0xffff:0xffff as eyecatcher */
*(u_int *)(p->ssbase+ld_le16(&SP)) = UINT_MAX;
/* Interrupt for BIOS EGA services is 0xf000:0xf065 (int 0x10) */
st_le32((uint32_t *)p->vbase + 0x10, 0xf000f065);
/* Enable the ROM, read it and disable it immediately */
pci_bootloader_read_config_dword(dev, PCI_ROM_ADDRESS, &saved_rom);
pci_bootloader_write_config_dword(dev, PCI_ROM_ADDRESS, 0x000c0001);
/* Check that there is an Intel ROM. Should we also check that
* the first instruction is a jump (0xe9 or 0xeb) ?
*/
signature = *(u_short *)(ptr_mem_map->isa_mem_base+0xc0000);
if (signature!=0x55aa) {
printf("bad signature: %04x.\n", signature);
return;
}
/* Allocate memory and copy the video rom to vbase+0xc0000; */
length = ptr_mem_map->isa_mem_base[0xc0002];
p->rom = palloc(length*512);
if (!p->rom) return;
for(dst=(u_int *) p->rom,
src=(volatile u_int *)(ptr_mem_map->isa_mem_base+0xc0000),
left = length*512/sizeof(u_int);
left--;
*dst++=*src++);
/* Disable the ROM and map the copy in virtual address space, note
* that the ROM has to be mapped as RAM since some BIOSes (at least
* Cirrus) perform write accesses to their own ROM. The reason seems
* to be that they check that they must execute from shadow RAM
* because accessing the ROM prevents accessing the video RAM
* according to comments in linux/arch/alpha/kernel/bios32.c.
*/
pci_bootloader_write_config_dword(dev, PCI_ROM_ADDRESS, saved_rom);
vmap(p->vbase+0xc0000, (u_long)p->rom|PTE_RAM, length*512);
/* Now actually emulate the ROM init routine */
em86_enter(p);
/* Free the acquired resources */
vunmap(p->vbase+0xc0000);
pfree(p->rom);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,471 +0,0 @@
/*
* exception.S -- Exception handlers for early boot.
*
* Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
/* This is an improved version of the TLB interrupt handling code from
* the 603e users manual (603eUM.pdf) downloaded from the WWW. All the
* visible bugs have been removed. Note that many have survived in the errata
* to the 603 user manual (603UMer.pdf).
*
* This code also pays particular attention to optimization, takes into
* account the differences between 603 and 603e, single/multiple processor
* systems and tries to order instructions for dual dispatch in many places.
*
* The optimization has been performed along two lines:
* 1) to minimize the number of instruction cache lines needed for the most
* common execution paths (the ones that do not result in an exception).
* 2) then to order the code to maximize the number of dual issue and
* completion opportunities without increasing the number of cache lines
* used in the same cases.
*
* The last goal of this code is to fit inside the address range
* assigned to the interrupt vectors: 192 instructions with fixed
* entry points every 64 instructions.
*
* Some typos have also been corrected and the Power l (lowercase L)
* instructions replaced by lwz without comment.
*
* I have attempted to describe the reasons of the order and of the choice
* of the instructions but the comments may be hard to understand without
* the processor manual.
*
* Note that the fact that the TLB are reloaded by software in theory
* allows tremendous flexibility, for example we could avoid setting the
* reference bit of the PTE which will could actually not be accessed because
* of protection violation by changing a few lines of code. However,
* this would significantly slow down most TLB reload operations, and
* this is the reason for which we try never to make checks which would be
* redundant with hardware and usually indicate a bug in a program.
*
* There are some inconsistencies in the documentation concerning the
* settings of SRR1 bit 15. All recent documentations say now that it is set
* for stores and cleared for loads. Anyway this handler never uses this bit.
*
* A final remark, the rfi instruction seems to implicitly clear the
* MSR<14> (tgpr)bit. The documentation claims that this bit is restored
* from SRR1 by rfi, but the corresponding bit in SRR1 is the LRU way bit.
* Anyway, the only exception which can occur while TGPR is set is a machine
* check which would indicate an unrecoverable problem. Recent documentation
* now says in some place that rfi clears MSR<14>.
*
* TLB software load for 602/603/603e/603ev:
* Specific Instructions:
* tlbld - write the dtlb with the pte in rpa reg
* tlbli - write the itlb with the pte in rpa reg
* Specific SPRs:
* dmiss - address of dstream miss
* imiss - address of istream miss
* hash1 - address primary hash PTEG address
* hash2 - returns secondary hash PTEG address
* iCmp - returns the primary istream compare value
* dCmp - returns the primary dstream compare value
* rpa - the second word of pte used by tlblx
* Other specific resources:
* cr0 saved in 4 high order bits of SRR1,
* SRR1 bit 14 [WAY] selects TLB set to load from LRU algorithm
* gprs r0..r3 shadowed by the setting of MSR bit 14 [TGPR]
* other bits in SRR1 (unused by this handler but see earlier comments)
*
* There are three basic flows corresponding to three vectors:
* 0x1000: Instruction TLB miss,
* 0x1100: Data TLB miss on load,
* 0x1200: Data TLB miss on store or not dirty page
*/
/* define the following if code does not have to run on basic 603 */
/* #define USE_KEY_BIT */
/* define the following for safe multiprocessing */
/* #define MULTIPROCESSING */
/* define the following for mixed endian */
/* #define CHECK_MIXED_ENDIAN */
/* define the following if entries always have the reference bit set */
#define ASSUME_REF_SET
/* Some OS kernels may want to keep a single copy of the dirty bit in a per
* page table. In this case writable pages are always write-protected as long
* as they are clean, and the dirty bit set actually means that the page
* is writable.
*/
#define DIRTY_MEANS_WRITABLE
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include "bootldr.h"
/*
* Instruction TLB miss flow
* Entry at 0x1000 with the following:
* srr0 -> address of instruction that missed
* srr1 -> 0:3=cr0, 13=1 (instruction), 14=lru way, 16:31=saved MSR
* msr<tgpr> -> 1
* iMiss -> ea that missed
* iCmp -> the compare value for the va that missed
* hash1 -> pointer to first hash pteg
* hash2 -> pointer to second hash pteg
*
* Register usage:
* r0 is limit address during search / scratch after
* r1 is pte data / error code for ISI exception when search fails
* r2 is pointer to pte
* r3 is compare value during search / scratch after
*/
/* Binutils or assembler bug ? Declaring the section executable and writable
* generates an error message on the @fixup entries.
*/
.section .exception,"aw"
# .org 0x1000 # instruction TLB miss entry point
.globl tlb_handlers
tlb_handlers:
.type tlb_handlers,@function
#define ISIVec tlb_handlers-0x1000+0x400
#define DSIVec tlb_handlers-0x1000+0x300
mfspr r2,HASH1
lwz r1,0(r2) # Start memory access as soon as possible
mfspr r3,ICMP # to load the cache.
0: la r0,48(r2) # Use explicit loop to avoid using ctr
1: cmpw r1,r3 # In theory the loop is somewhat slower
beq- 2f # than documentation example
cmpw r0,r2 # but we gain from starting cache load
lwzu r1,8(r2) # earlier and using slots between load
bne+ 1b # and comparison for other purposes.
cmpw r1,r3
bne- 4f # Secondary hash check
2: lwz r1,4(r2) # Found: load second word of PTE
mfspr r0,IMISS # get miss address during load delay
#ifdef ASSUME_REF_SET
andi. r3,r1,8 # check for guarded memory
bne- 5f
mtspr PPC_RPA,r1
mfsrr1 r3
tlbli r0
#else
/* This is basically the original code from the manual. */
# andi. r3,r1,8 # check for guarded memory
# bne- 5f
# andi. r3,r1,0x100 # check R bit ahead to help folding
/* However there is a better solution: these last three instructions can be
replaced by the following which should cause less pipeline stalls because
both tests are combined and there is a single CR rename buffer */
extlwi r3,r1,6,23 # Keep only RCWIMG in 6 most significant bits.
rlwinm. r3,r3,5,0,27 # Keep only G (in sign) and R and test.
blt- 5f # Negative means guarded, zero R not set.
mfsrr1 r3 # get saved cr0 bits now to dual issue
ori r1,r1,0x100
mtspr PPC_RPA,r1
tlbli r0
/* Do not update PTE if R bit already set, this will save one cache line
writeback at a later time, and avoid even more bus traffic in
multiprocessing systems, when several processors access the same PTEGs.
We also hope that the reference bit will be already set. */
bne+ 3f
#ifdef MULTIPROCESSING
srwi r1,r1,8 # get byte 7 of pte
stb r1,+6(r2) # update page table
#else
sth r1,+6(r2) # update page table
#endif
#endif
3: mtcrf 0x80,r3 # restore CR0
rfi # return to executing program
/* The preceding code is 20 to 25 instructions long, which occupies
3 or 4 cache lines. */
4: andi. r0,r3,0x0040 # see if we have done second hash
lis r1,0x4000 # set up error code in case next branch taken
bne- 6f # speculatively issue the following
mfspr r2,HASH2 # get the second pointer
ori r3,r3,0x0040 # change the compare value
lwz r1,0(r2) # load first entry
b 0b # and go back to main loop
/* We are now at 27 to 32 instructions, using 3 or 4 cache lines for all
cases in which the TLB is successfully loaded. */
/* Guarded memory protection violation: synthesize an ISI exception. */
5: lis r1,0x1000 # set srr1<3>=1 to flag guard violation
/* Entry Not Found branches here with r1 correctly set. */
6: mfsrr1 r3
mfmsr r0
insrwi r1,r3,16,16 # build srr1 for ISI exception
mtsrr1 r1 # set srr1
/* It seems few people have realized rlwinm can be used to clear a bit or
a field of contiguous bits in a register by setting mask_begin>mask_end. */
rlwinm r0,r0,0,15,13 # clear the msr<tgpr> bit
mtcrf 0x80, r3 # restore CR0
mtmsr r0 # flip back to the native gprs
isync # Required from 602 doc!
b ISIVec # go to instruction access exception
/* Up to now there are 37 to 42 instructions so at least 20 could be
inserted for complex cases or for statistics recording. */
/*
Data TLB miss on load flow
Entry at 0x1100 with the following:
srr0 -> address of instruction that caused the miss
srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=0, 16:31=saved MSR
msr<tgpr> -> 1
dMiss -> ea that missed
dCmp -> the compare value for the va that missed
hash1 -> pointer to first hash pteg
hash2 -> pointer to second hash pteg
Register usage:
r0 is limit address during search / scratch after
r1 is pte data / error code for DSI exception when search fails
r2 is pointer to pte
r3 is compare value during search / scratch after
*/
.org tlb_handlers+0x100
mfspr r2,HASH1
lwz r1,0(r2) # Start memory access as soon as possible
mfspr r3,DCMP # to load the cache.
0: la r0,48(r2) # Use explicit loop to avoid using ctr
1: cmpw r1,r3 # In theory the loop is somewhat slower
beq- 2f # than documentation example
cmpw r0,r2 # but we gain from starting cache load
lwzu r1,8(r2) # earlier and using slots between load
bne+ 1b # and comparison for other purposes.
cmpw r1,r3
bne- 4f # Secondary hash check
2: lwz r1,4(r2) # Found: load second word of PTE
mfspr r0,DMISS # get miss address during load delay
#ifdef ASSUME_REF_SET
mtspr PPC_RPA,r1
mfsrr1 r3
tlbld r0
#else
andi. r3,r1,0x100 # check R bit ahead to help folding
mfsrr1 r3 # get saved cr0 bits now to dual issue
ori r1,r1,0x100
mtspr PPC_RPA,r1
tlbld r0
/* Do not update PTE if R bit already set, this will save one cache line
writeback at a later time, and avoid even more bus traffic in
multiprocessing systems, when several processors access the same PTEGs.
We also hope that the reference bit will be already set. */
bne+ 3f
#ifdef MULTIPROCESSING
srwi r1,r1,8 # get byte 7 of pte
stb r1,+6(r2) # update page table
#else
sth r1,+6(r2) # update page table
#endif
#endif
3: mtcrf 0x80,r3 # restore CR0
rfi # return to executing program
/* The preceding code is 18 to 23 instructions long, which occupies
3 cache lines. */
4: andi. r0,r3,0x0040 # see if we have done second hash
lis r1,0x4000 # set up error code in case next branch taken
bne- 9f # speculatively issue the following
mfspr r2,HASH2 # get the second pointer
ori r3,r3,0x0040 # change the compare value
lwz r1,0(r2) # load first entry asap
b 0b # and go back to main loop
/* We are now at 25 to 30 instructions, using 3 or 4 cache lines for all
cases in which the TLB is successfully loaded. */
/*
Data TLB miss on store or not dirty page flow
Entry at 0x1200 with the following:
srr0 -> address of instruction that caused the miss
srr1 -> 0:3=cr0, 13=0 (data), 14=lru way, 15=1, 16:31=saved MSR
msr<tgpr> -> 1
dMiss -> ea that missed
dCmp -> the compare value for the va that missed
hash1 -> pointer to first hash pteg
hash2 -> pointer to second hash pteg
Register usage:
r0 is limit address during search / scratch after
r1 is pte data / error code for DSI exception when search fails
r2 is pointer to pte
r3 is compare value during search / scratch after
*/
.org tlb_handlers+0x200
mfspr r2,HASH1
lwz r1,0(r2) # Start memory access as soon as possible
mfspr r3,DCMP # to load the cache.
0: la r0,48(r2) # Use explicit loop to avoid using ctr
1: cmpw r1,r3 # In theory the loop is somewhat slower
beq- 2f # than documentation example
cmpw r0,r2 # but we gain from starting cache load
lwzu r1,8(r2) # earlier and using slots between load
bne+ 1b # and comparison for other purposes.
cmpw r1,r3
bne- 4f # Secondary hash check
2: lwz r1,4(r2) # Found: load second word of PTE
mfspr r0,DMISS # get miss address during load delay
/* We could simply set the C bit and then rely on hardware to flag protection
violations. This raises the problem that a page which actually has not been
modified may be marked as dirty and violates the OEA model for guaranteed
bit settings (table 5-8 of 603eUM.pdf). This can have harmful consequences
on operating system memory management routines, and play havoc with copy on
write schemes. So the protection check is ABSOLUTELY necessary. */
andi. r3,r1,0x80 # check C bit
beq- 5f # if (C==0) go to check protection
3: mfsrr1 r3 # get the saved cr0 bits
mtspr PPC_RPA,r1 # set the pte
tlbld r0 # load the dtlb
mtcrf 0x80,r3 # restore CR0
rfi # return to executing program
/* The preceding code is 20 instructions long, which occupy
3 cache lines. */
4: andi. r0,r3,0x0040 # see if we have done second hash
lis r1,0x4200 # set up error code in case next branch taken
bne- 9f # speculatively issue the following
mfspr r2,HASH2 # get the second pointer
ori r3,r3,0x0040 # change the compare value
lwz r1,0(r2) # load first entry asap
b 0b # and go back to main loop
/* We are now at 27 instructions, using 3 or 4 cache lines for all
cases in which the TLB C bit is already set. */
#ifdef DIRTY_MEANS_WRITABLE
5: lis r1,0x0A00 # protection violation on store
#else
/*
Entry found and C==0: check protection before setting C:
Register usage:
r0 is dMiss register
r1 is PTE entry (to be copied to RPA if success)
r2 is pointer to pte
r3 is trashed
For the 603e, the key bit in SRR1 helps to decide whether there is a
protection violation. However the way the check is done in the manual is
not very efficient. The code shown here works as well for 603 and 603e and
is much more efficient for the 603 and comparable to the manual example
for 603e. This code however has quite a bad structure due to the fact it
has been reordered to speed up the most common cases.
*/
/* The first of the following two instructions could be replaced by
andi. r3,r1,3 but it would compete with cmplwi for cr0 resource. */
5: clrlwi r3,r1,30 # Extract two low order bits
cmplwi r3,2 # Test for PP=10
bne- 7f # assume fallthrough is more frequent
6: ori r1,r1,0x180 # set referenced and changed bit
sth r1,6(r2) # update page table
b 3b # and finish loading TLB
/* We are now at 33 instructions, using 5 cache lines. */
7: bgt- 8f # if PP=11 then DSI protection exception
/* This code only works if key bit is present (602/603e/603ev) */
#ifdef USE_KEY_BIT
mfsrr1 r3 # get the KEY bit and test it
andis. r3,r3,0x0008
beq 6b # default prediction taken, truly better ?
#else
/* This code is for all 602 and 603 family models: */
mfsrr1 r3 # Here the trick is to use the MSR PR bit as a
mfsrin r0,r0 # shift count for an rlwnm. instruction which
extrwi r3,r3,1,17 # extracts and tests the correct key bit from
rlwnm. r3,r0,r3,1,1 # the segment register. RISC they said...
mfspr r0,DMISS # Restore fault address to r0
beq 6b # if 0 load tlb else protection fault
#endif
/* We are now at 40 instructions, (37 if using key bit), using 5 cache
lines in all cases in which the C bit is successfully set */
8: lis r1,0x0A00 # protection violation on store
#endif /* DIRTY_IS_WRITABLE */
/* PTE entry not found branch here with DSISR code in r1 */
9: mfsrr1 r3
mtdsisr r1
clrlwi r2,r3,16 # set up srr1 for DSI exception
mfmsr r0
/* I have some doubts about the usefulness of the xori instruction in
mixed or pure little-endian environment. The address is in the same
doubleword, hence in the same protection domain and performing an exclusive
or with 7 is only valid for byte accesses. */
#ifdef CHECK_MIXED_ENDIAN
andi. r1,r2,1 # test LE bit ahead to help folding
#endif
mtsrr1 r2
rlwinm r0,r0,0,15,13 # clear the msr<tgpr> bit
mfspr r1,DMISS # get miss address
#ifdef CHECK_MIXED_ENDIAN
beq 1f # if little endian then:
xori r1,r1,0x07 # de-mung the data address
1:
#endif
mtdar r1 # put in dar
mtcrf 0x80,r3 # restore CR0
mtmsr r0 # flip back to the native gprs
isync # required from 602 manual
b DSIVec # branch to DSI exception
/* We are now between 50 and 56 instructions. Close to the limit
but should be sufficient in case bugs are found. */
/* Altogether the three handlers occupy 128 instructions in the worst
case, 64 instructions could still be added (non contiguously). */
.org tlb_handlers+0x300
.globl _handler_glue
_handler_glue:
/* Entry code for exceptions: DSI (0x300), ISI(0x400), alignment(0x600) and
* traps(0x700). In theory it is not necessary to save and restore r13 and all
* higher numbered registers, but it is done because it allowed to call the
* firmware (PPCBug) for debugging in the very first stages when writing the
* bootloader.
*/
stwu r1,-160(r1)
stw r0,save_r(0)
mflr r0
stmw r2,save_r(2)
bl 0f
0: mfctr r4
stw r0,save_lr
mflr r9 /* Interrupt vector + few instructions */
la r10,160(r1)
stw r4,save_ctr
mfcr r5
lwz r8,2f-0b(r9)
mfxer r6
stw r5,save_cr
mtctr r8
stw r6,save_xer
mfsrr0 r7
stw r10,save_r(1)
mfsrr1 r8
stw r7,save_nip
la r4,8(r1)
lwz r13,1f-0b(r9)
rlwinm r3,r9,24,0x3f /* Interrupt vector >> 8 */
stw r8,save_msr
bctrl
lwz r7,save_msr
lwz r6,save_nip
mtsrr1 r7
lwz r5,save_xer
mtsrr0 r6
lwz r4,save_ctr
mtxer r5
lwz r3,save_lr
mtctr r4
lwz r0,save_cr
mtlr r3
lmw r2,save_r(2)
mtcr r0
lwz r0,save_r(0)
la r1,160(r1)
rfi
1: .long (__bd)@fixup
2: .long (_handler)@fixup
.section .fixup,"aw"
.align 2
.long 1b, 2b
.previous

View File

@@ -1,466 +0,0 @@
/*
* head.S -- Bootloader Entry point
*
* Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include "bootldr.h"
#include <bspopts.h>
#define TEST_PPCBUG_CALLS
#undef TEST_PPCBUG_CALLS
#define FRAME_SIZE 32
#define LOCK_CACHES (HID0_DLOCK | HID0_ILOCK)
#define INVL_CACHES (HID0_DCI | HID0_ICFI)
#define ENBL_CACHES (HID0_DCE | HID0_ICE)
#ifndef qemu
#define USE_PPCBUG
#endif
#define PRINT_CHAR(c) \
addi r20,r3,0 ; \
li r3,c ; \
li r10,0x20 ; \
sc ; \
addi r3,r20,0 ; \
li r10,0x26 ; \
sc
#define MONITOR_ENTER \
mfmsr r10 ; \
ori r10,r10,MSR_IP ; \
mtmsr r10 ; \
li r10,0x63 ; \
sc
START_GOT
GOT_ENTRY(_GOT2_TABLE_)
GOT_ENTRY(_FIXUP_TABLE_)
GOT_ENTRY(.bss)
GOT_ENTRY(codemove)
GOT_ENTRY(0)
GOT_ENTRY(__bd)
GOT_ENTRY(moved)
GOT_ENTRY(_binary_rtems_gz_start)
GOT_ENTRY(_binary_initrd_gz_start)
GOT_ENTRY(_binary_initrd_gz_end)
#ifdef TEST_PPCBUG_CALLS
GOT_ENTRY(banner_start)
GOT_ENTRY(banner_end)
#endif
#ifdef USE_PPCBUG
GOT_ENTRY(nioc_reset_packet)
#endif
END_GOT
.globl start
.type start,@function
/* Point the stack into the PreP partition header in the x86 reserved
* code area, so that simple C routines can be called.
*/
start:
#if defined(USE_PPCBUG) && defined(DEBUG) && defined(REENTER_MONITOR)
MONITOR_ENTER
#endif
bl 1f
1: mflr r1
li r0,0
stwu r0,start-1b-0x400+0x1b0-FRAME_SIZE(r1)
stmw r26,FRAME_SIZE-24(r1)
GET_GOT
mfmsr r28 /* Turn off interrupts */
ori r0,r28,MSR_EE
xori r0,r0,MSR_EE
mtmsr r0
/* Enable the caches, from now on cr2.eq set means processor is 601 */
mfpvr r0
mfspr r29,HID0
srwi r0,r0,16
cmplwi cr2,r0,1
beq 2,2f
/*
* commented out, 11/7/2002, gregm. This instruction sequence seems to
* be pathological on the 603e.
*
#ifndef USE_PPCBUG
ori r0,r29,ENBL_CACHES|INVL_CACHES|LOCK_CACHES
xori r0,r0,INVL_CACHES|LOCK_CACHES
sync
isync
mtspr HID0,r0
#endif
*/
2: bl reloc
/* save all the parameters and the orginal msr/hid0/r31 */
lwz bd,GOT(__bd)
stw r3,0(bd)
stw r4,4(bd)
stw r5,8(bd)
stw r6,12(bd)
stw r7,16(bd)
stw r8,20(bd)
stw r9,24(bd)
stw r10,28(bd)
stw r28,o_msr(bd)
stw r29,o_hid0(bd)
stw r31,o_r31(bd)
#ifdef USE_PPCBUG
/* Stop the network interface - otherwise, memory can get
* corrupted by the IF DMAing data into its old buffers or
* by writing descriptors...
*/
lwz r3,GOT(nioc_reset_packet)
li r10, 0x1d /* .NETCTRL */
sc
#endif
/* Call the routine to fill boot_data structure from residual data.
* And to find where the code has to be moved.
*/
lis r3,__size@sectoff@ha
addi r3,r3,__size@sectoff@l
bl early_setup
/* Now we need to relocate ourselves, where we are told to. First put a
* copy of the codemove routine to some place in memory.
* (which may be where the 0x41 partition was loaded, so size is critical).
*/
lwz r4,GOT(codemove)
li r5,_size_codemove
lwz r3,mover(bd)
lwz r6,cache_lsize(bd)
bl codemove
mtctr r3 # Where the temporary codemove is.
lwz r3,image(bd)
lis r5,_edata@sectoff@ha
lwz r4,GOT(0) # Our own address
addi r5,r5,_edata@sectoff@l
lwz r6,cache_lsize(bd)
lwz r8,GOT(moved)
sub r7,r3,r4 # Difference to adjust pointers.
add r8,r8,r7
add r30,r30,r7
add bd,bd,r7
/* Call the copy routine but return to the new area. */
mtlr r8 # for the return address
bctr # returns to the moved instruction
/* Establish the new top stack frame. */
moved: lwz r1,stack(bd)
li r0,0
stwu r0,-16(r1)
/* relocate again */
bl reloc
/* Clear all of BSS */
lwz r10,GOT(.bss)
li r0,__bss_words@sectoff@l
subi r10,r10,4
cmpwi r0,0
mtctr r0
li r0,0
beq 4f
3: stwu r0,4(r10)
bdnz 3b
/* Final memory initialization. First switch to unmapped mode
* in case the FW had set the MMU on, and flush the TLB to avoid
* stale entries from interfering. No I/O access is allowed
* during this time!
*/
4:
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('M')
#endif
bl MMUoff
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('B')
#endif
bl flush_tlb
/* Some firmware versions leave stale values in the BATs, it's time
* to invalidate them to avoid interferences with our own mappings.
* But the 601 valid bit is in the BATL (IBAT only) and others are in
* the [ID]BATU. Bloat, bloat.. fortunately thrown away later.
*/
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('T')
#endif
li r3,0
beq cr2,5f
mtdbatu 0,r3
mtdbatu 1,r3
mtdbatu 2,r3
mtdbatu 3,r3
5: mtibatu 0,r3
mtibatl 0,r3
mtibatu 1,r3
mtibatl 1,r3
mtibatu 2,r3
mtibatl 2,r3
mtibatu 3,r3
mtibatl 3,r3
lis r3,__size@sectoff@ha
addi r3,r3,__size@sectoff@l
sync # We are going to touch SDR1 !
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('i')
#endif
bl mm_init
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('M')
#endif
bl MMUon
/* Now we are mapped and can perform I/O if we want */
#ifdef TEST_PPCBUG_CALLS
/* Experience seems to show that PPCBug can only be called with the
* data cache disabled and with MMU disabled. Bummer.
*/
li r10,0x22 # .OUTLN
lwz r3,GOT(banner_start)
lwz r4,GOT(banner_end)
sc
#endif
#if defined(USE_PPCBUG) && defined(DEBUG)
PRINT_CHAR('H')
#endif
bl setup_hw
lwz r4,GOT(_binary_rtems_gz_start)
lis r5,_rtems_gz_size@sectoff@ha
lwz r6,GOT(_binary_initrd_gz_start)
lis r3,_rtems_size@sectoff@ha
lwz r7,GOT(_binary_initrd_gz_end)
addi r5,r5,_rtems_gz_size@sectoff@l
addi r3,r3,_rtems_size@sectoff@l
sub r7,r7,r6
bl decompress_kernel
/* Back here we are unmapped and we start the kernel, passing up to eight
* parameters just in case, only r3 to r7 used for now. Flush the tlb so
* that the loaded image starts in a clean state.
*/
bl flush_tlb
lwz r3,0(bd)
lwz r4,4(bd)
lwz r5,8(bd)
lwz r6,12(bd)
lwz r7,16(bd)
lwz r8,20(bd)
lwz r9,24(bd)
lwz r10,28(bd)
lwz r30,0(0)
mtctr r30
/*
* Linux code again
*
lis r30,0xdeadc0de@ha
addi r30,r30,0xdeadc0de@l
stw r30,0(0)
li r30,0
*/
dcbst 0,r30 /* Make sure it's in memory ! */
/* We just flash invalidate and disable the dcache, unless it's a 601,
* critical areas have been flushed and we don't care about the stack
* and other scratch areas.
*/
beq cr2,1f
mfspr r0,HID0
ori r0,r0,HID0_DCI|HID0_DCE
sync
mtspr HID0,r0
xori r0,r0,HID0_DCI|HID0_DCE
mtspr HID0,r0
/* Provisional return to FW, works for PPCBug */
#if 0 && defined(REENTER_MONITOR)
MONITOR_ENTER
#else
1: bctr
#endif
/* relocation function, r30 must point to got2+0x8000 */
reloc:
/* Adjust got2 pointers, no need to check for 0, this code already puts
* a few entries in the table.
*/
li r0,__got2_entries@sectoff@l
la r12,GOT(_GOT2_TABLE_)
lwz r11,GOT(_GOT2_TABLE_)
mtctr r0
sub r11,r12,r11
addi r12,r12,-4
1: lwzu r0,4(r12)
add r0,r0,r11
stw r0,0(r12)
bdnz 1b
/* Now adjust the fixups and the pointers to the fixups in case we need
* to move ourselves again.
*/
2: li r0,__fixup_entries@sectoff@l
lwz r12,GOT(_FIXUP_TABLE_)
cmpwi r0,0
mtctr r0
addi r12,r12,-4
beqlr
3: lwzu r10,4(r12)
lwzux r0,r10,r11
add r0,r0,r11
stw r10,0(r12)
stw r0,0(r10)
bdnz 3b
blr
/* Set the MMU on and off: code is always mapped 1:1 and does not need MMU,
* but it does not cost so much to map it also and it catches calls through
* NULL function pointers.
*/
.globl MMUon
.type MMUon,@function
MMUon: blr
nop
/*
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR|MSR_IP
mflr r11
xori r0,r0,MSR_IP
mtsrr0 r11
mtsrr1 r0
rfi
*/
.globl MMUoff
.type MMUoff,@function
MMUoff: blr
nop
/*
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR|MSR_IP
mflr r11
xori r0,r0,MSR_IR|MSR_DR
mtsrr0 r11
mtsrr1 r0
rfi
*/
/* Due to the PPC architecture (and according to the specifications), a
* series of tlbie which goes through a whole 256 MB segment always flushes
* the whole TLB. This is obviously overkill and slow, but who cares ?
* It takes about 1 ms on a 200 MHz 603e and works even if residual data
* get the number of TLB entries wrong.
*/
flush_tlb:
lis r11,0x1000
1: addic. r11,r11,-0x1000
tlbie r11
bnl 1b
/* tlbsync is not implemented on 601, so use sync which seems to be a superset
* of tlbsync in all cases and do not bother with CPU dependant code
*/
sync
blr
.globl codemove
codemove:
.type codemove,@function
/* r3 dest, r4 src, r5 length in bytes, r6 cachelinesize */
cmplw cr1,r3,r4
addi r0,r5,3
srwi. r0,r0,2
beq cr1,4f /* In place copy is not necessary */
beq 7f /* Protect against 0 count */
mtctr r0
bge cr1,2f
la r8,-4(r4)
la r7,-4(r3)
1: lwzu r0,4(r8)
stwu r0,4(r7)
bdnz 1b
b 4f
2: slwi r0,r0,2
add r8,r4,r0
add r7,r3,r0
3: lwzu r0,-4(r8)
stwu r0,-4(r7)
bdnz 3b
/* Now flush the cache: note that we must start from a cache aligned
* address. Otherwise we might miss one cache line.
*/
4: cmpwi r6,0
add r5,r3,r5
beq 7f /* Always flush prefetch queue in any case */
subi r0,r6,1
andc r3,r3,r0
mr r4,r3
5: cmplw r4,r5
dcbst 0,r4
add r4,r4,r6
blt 5b
sync /* Wait for all dcbst to complete on bus */
mr r4,r3
6: cmplw r4,r5
icbi 0,r4
add r4,r4,r6
blt 6b
7: sync /* Wait for all icbi to complete on bus */
isync
blr
.size codemove,.-codemove
_size_codemove=.-codemove
.section ".data" # .rodata
.align 4
#ifdef USE_PPCBUG
/* A control 'packet' for the .NETCTRL PPCBug syscall to
* reset a network interface. Let's hope they used the
* first one for booting!! (CLUN/DLUN == 0/0)
* Must be 4-byte aligned...
*/
nioc_reset_packet:
.byte 0 /* Contoller LUN */
.byte 0 /* Device LUN */
.word 0 /* status return */
.long 5 /* Command (5=RESET) */
.long 0 /* Mem. Addr. for real data (unused for reset) */
.long 0 /* Number of bytes */
.long 0 /* Status/Control Flags (unused for reset) */
#endif
#ifdef TEST_PPCBUG_CALLS
banner_start:
.ascii "This message was printed by PPCBug with MMU enabled"
banner_end:
#endif

View File

@@ -1,62 +0,0 @@
/* lib.c
*
* This file contains the implementation of functions that are unresolved
* in the bootloader. Unfortunately it shall not use any object code
* from newlib or rtems because they are not compiled with the right option!!!
*
* You've been warned!!!.
*/
/*
* Copyright (C) 1998, 1999 valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
/*
* Provide our own prototypes to avoid warnings and risk getting inlined
* conflicts from the normal header files.
*/
void* memset(void *p, int c, unsigned int n);
void* memcpy(void *dst, const void * src, unsigned int n);
char* strcat(char * dest, const char * src);
int strlen(const char* string);
void* memset(void *p, int c, unsigned int n)
{
char *q =p;
for(; n>0; --n) *q++=c;
return p;
}
void* memcpy(void *dst, const void * src, unsigned int n)
{
unsigned char *d=dst;
const unsigned char *s=src;
while(n-- > 0) *d++=*s++;
return dst;
}
char* strcat(char * dest, const char * src)
{
char *tmp = dest;
while (*dest)
dest++;
while ((*dest++ = *src++) != '\0')
;
return tmp;
}
int strlen(const char* string)
{
register int i = 0;
while (string[i] != '\0')
++i;
return i;
}

View File

@@ -1,545 +0,0 @@
/*
* head.S -- Bootloader Entry point
*
* Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems/system.h>
#include <sys/types.h>
#include <string.h>
#include "bootldr.h"
#include <libcpu/spr.h>
#include "zlib.h"
#include <libcpu/byteorder.h>
#include <rtems/bspIo.h>
#include <bsp.h>
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
SPR_RO(PPC_PVR)
struct inode;
struct wait_queue;
struct buffer_head;
typedef struct { int counter; } atomic_t;
typedef struct page {
/* these must be first (free area handling) */
struct page *next;
struct page *prev;
struct inode *inode;
unsigned long offset;
struct page *next_hash;
atomic_t count;
unsigned long flags; /* atomic flags, some possibly updated asynchronously */
struct wait_queue *wait;
struct page **pprev_hash;
struct buffer_head * buffers;
} mem_map_t;
extern opaque mm_private, pci_private, v86_private, console_private;
#define CONSOLE_ON_SERIAL "console=ttyS0"
extern struct console_io vacuum_console_functions;
extern opaque log_console_setup, serial_console_setup, vga_console_setup;
boot_data __bd = {0, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0,
&mm_private,
NULL,
&pci_private,
NULL,
&v86_private,
"root=/dev/hdc1"
};
static void exit(void) __attribute__((noreturn));
static void exit(void) {
printk("\nOnly way out is to press the reset button!\n");
asm volatile("": : :"memory");
while(1);
}
void hang(const char *s, u_long x, ctxt *p) {
u_long *r1;
#ifdef DEBUG
print_all_maps("\nMemory mappings at exception time:\n");
#endif
printk("%s %lx NIP: %p LR: %p\n"
"Callback trace (stack:return address)\n",
s, x, (void *) p->nip, (void *) p->lr);
asm volatile("lwz %0,0(1); lwz %0,0(%0); lwz %0,0(%0)": "=b" (r1));
while(r1) {
printk(" %p:%p\n", r1, (void *) r1[1]);
r1 = (u_long *) *r1;
}
exit();
};
static void *zalloc(void *x, unsigned items, unsigned size)
{
void *p = salloc(items*size);
if (!p) {
printk("oops... not enough memory for gunzip\n");
}
return p;
}
static void zfree(void *x, void *addr, unsigned nb)
{
sfree(addr);
}
#define HEAD_CRC 2
#define EXTRA_FIELD 4
#define ORIG_NAME 8
#define COMMENT 0x10
#define RESERVED 0xe0
#define DEFLATED 8
void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
{
z_stream s;
int r, i, flags;
/* skip header */
i = 10;
flags = src[3];
if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
printk("bad gzipped data\n");
exit();
}
if ((flags & EXTRA_FIELD) != 0)
i = 12 + src[10] + (src[11] << 8);
if ((flags & ORIG_NAME) != 0)
while (src[i++] != 0)
;
if ((flags & COMMENT) != 0)
while (src[i++] != 0)
;
if ((flags & HEAD_CRC) != 0)
i += 2;
if (i >= *lenp) {
printk("gunzip: ran out of data in header\n");
exit();
}
s.zalloc = zalloc;
s.zfree = zfree;
r = inflateInit2(&s, -MAX_WBITS);
if (r != Z_OK) {
printk("inflateInit2 returned %d\n", r);
exit();
}
s.next_in = src + i;
s.avail_in = *lenp - i;
s.next_out = dst;
s.avail_out = dstlen;
r = inflate(&s, Z_FINISH);
if (r != Z_OK && r != Z_STREAM_END) {
printk("inflate returned %d\n", r);
exit();
}
*lenp = s.next_out - (unsigned char *) dst;
inflateEnd(&s);
}
void decompress_kernel(int kernel_size, void * zimage_start, int len,
void * initrd_start, int initrd_len ) {
u_char *parea;
RESIDUAL* rescopy;
int zimage_size= len;
/* That's a mess, we have to copy the residual data twice just in
* case it happens to be in the low memory area where the kernel
* is going to be unpacked. Later we have to copy it back to
* lower addresses because only the lowest part of memory is mapped
* during boot.
*/
parea=__palloc(kernel_size, PA_LOW);
if(!parea) {
printk("Not enough memory to uncompress the kernel.");
exit();
}
rescopy=salloc(sizeof(RESIDUAL));
/* Let us hope that residual data is aligned on word boundary */
*rescopy = *bd->residual;
bd->residual = (void *)PAGE_ALIGN(kernel_size);
/* Note that this clears the bss as a side effect, so some code
* with ugly special case for SMP could be removed from the kernel!
*/
memset(parea, 0, kernel_size);
printk("\nUncompressing the kernel...\n");
gunzip(parea, kernel_size, zimage_start, &zimage_size);
bd->of_entry = 0;
bd->load_address = 0;
bd->r6 = (char *)bd->residual+PAGE_ALIGN(sizeof(RESIDUAL));
bd->r7 = bd->r6+strlen(bd->cmd_line);
if ( initrd_len ) {
/* We have to leave some room for the hash table and for the
* whole array of struct page. The hash table would be better
* located at the end of memory if possible. With some bridges
* DMA from the last pages of memory is slower because
* prefetching from PCI has to be disabled to avoid accessing
* non existing memory. So it is the ideal place to put the
* hash table.
*/
unsigned tmp = rescopy->TotalMemory;
/* It's equivalent to tmp & (-tmp), but using the negation
* operator on unsigned variables looks so ugly.
*/
if ((tmp & (~tmp+1)) != tmp) tmp <<= 1; /* Next power of 2 */
tmp /= 256; /* Size of hash table */
if (tmp> (2<<20)) tmp=2<<20;
tmp = tmp*2 + 0x40000; /* Alignment can double size + 256 kB */
tmp += (rescopy->TotalMemory / PAGE_SIZE)
* sizeof(struct page);
bd->load_address = (void *)PAGE_ALIGN((int)bd->r7 + tmp);
bd->of_entry = (char *)bd->load_address+initrd_len;
}
#ifdef DEBUG
printk("Kernel at 0x%p, size=0x%x\n", NULL, kernel_size);
printk("Initrd at 0x%p, size=0x%x\n",bd->load_address, initrd_len);
printk("Residual data at 0x%p\n", bd->residual);
printk("Command line at 0x%p\n",bd->r6);
#endif
printk("done\nNow booting...\n");
MMUoff(); /* We need to access address 0 ! */
codemove(0, parea, kernel_size, bd->cache_lsize);
codemove(bd->residual, rescopy, sizeof(RESIDUAL), bd->cache_lsize);
codemove(bd->r6, bd->cmd_line, sizeof(bd->cmd_line), bd->cache_lsize);
/* codemove checks for 0 length */
codemove(bd->load_address, initrd_start, initrd_len, bd->cache_lsize);
}
static int ticks_per_ms=0;
/*
* This is based on rtems_bsp_delay from libcpu
*/
void
boot_udelay(uint32_t _microseconds)
{
uint32_t start, ticks, now;
ticks = _microseconds * ticks_per_ms / 1000;
CPU_Get_timebase_low( start );
do {
CPU_Get_timebase_low( now );
} while (now - start < ticks);
}
void
setup_hw(void)
{
char *cp, ch;
register RESIDUAL * res;
/* PPC_DEVICE * nvram; */
struct pci_dev *default_vga;
int timer, err;
u_short default_vga_cmd;
res=bd->residual;
default_vga=NULL;
default_vga_cmd = 0;
#define vpd res->VitalProductData
if (_read_PPC_PVR()>>16 != 1) {
if ( res && vpd.ProcessorBusHz ) {
ticks_per_ms = vpd.ProcessorBusHz/
(vpd.TimeBaseDivisor ? vpd.TimeBaseDivisor : 4000);
} else {
ticks_per_ms = 16500; /* assume 66 MHz on bus */
}
}
select_console(CONSOLE_LOG);
/* We check that the keyboard is present and immediately
* select the serial console if not.
*/
#if defined(BSP_KBD_IOBASE)
err = kbdreset();
if (err) select_console(CONSOLE_SERIAL);
#else
err = 1;
select_console(CONSOLE_SERIAL);
#endif
printk("\nModel: %s\nSerial: %s\n"
"Processor/Bus frequencies (Hz): %ld/%ld\n"
"Time Base Divisor: %ld\n"
"Memory Size: %lx\n"
"Residual: %lx (length %lu)\n",
vpd.PrintableModel,
vpd.Serial,
vpd.ProcessorHz,
vpd.ProcessorBusHz,
(vpd.TimeBaseDivisor ? vpd.TimeBaseDivisor : 4000),
res->TotalMemory,
(unsigned long)res,
res->ResidualLength);
/* This reconfigures all the PCI subsystem */
pci_init();
/* The Motorola NT firmware does not set the correct mem size */
if ( vpd.FirmwareSupplier == 0x10000 ) {
int memsize;
memsize = find_max_mem(bd->pci_devices);
if ( memsize != res->TotalMemory ) {
printk("Changed Memory size from %lx to %x\n",
res->TotalMemory, memsize);
res->TotalMemory = memsize;
res->GoodMemory = memsize;
}
}
#define ENABLE_VGA_USAGE
#undef ENABLE_VGA_USAGE
#ifdef ENABLE_VGA_USAGE
/* Find the primary VGA device, chosing the first one found
* if none is enabled. The basic loop structure has been copied
* from linux/drivers/char/bttv.c by Alan Cox.
*/
for (p = bd->pci_devices; p; p = p->next) {
u_short cmd;
if (p->class != PCI_CLASS_NOT_DEFINED_VGA &&
((p->class) >> 16 != PCI_BASE_CLASS_DISPLAY))
continue;
if (p->bus->number != 0) {
printk("VGA device not on bus 0 not initialized!\n");
continue;
}
/* Only one can be active in text mode, which for now will
* be assumed as equivalent to having I/O response enabled.
*/
pci_bootloader_read_config_word(p, PCI_COMMAND, &cmd);
if(cmd & PCI_COMMAND_IO || !default_vga) {
default_vga=p;
default_vga_cmd=cmd;
}
}
/* Disable the enabled VGA device, if any. */
if (default_vga)
pci_bootloader_write_config_word(default_vga, PCI_COMMAND,
default_vga_cmd&
~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
init_v86();
/* Same loop copied from bttv.c, this time doing the serious work */
for (p = bd->pci_devices; p; p = p->next) {
u_short cmd;
if (p->class != PCI_CLASS_NOT_DEFINED_VGA &&
((p->class) >> 16 != PCI_BASE_CLASS_DISPLAY))
continue;
if (p->bus->number != 0) continue;
pci_bootloader_read_config_word(p, PCI_COMMAND, &cmd);
pci_bootloader_write_config_word(p, PCI_COMMAND,
cmd|PCI_COMMAND_IO|PCI_COMMAND_MEMORY);
printk("Calling the emulator.\n");
em86_main(p);
pci_bootloader_write_config_word(p, PCI_COMMAND, cmd);
}
cleanup_v86_mess();
#endif
/* Reenable the primary VGA device */
if (default_vga) {
pci_bootloader_write_config_word(default_vga, PCI_COMMAND,
default_vga_cmd|
(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
if (err) {
printk("Keyboard error %d, using serial console!\n",
err);
} else {
select_console(CONSOLE_VGA);
}
} else if (!err) {
select_console(CONSOLE_SERIAL);
if (bd->cmd_line[0] == '\0') {
strcat(&bd->cmd_line[0], CONSOLE_ON_SERIAL);
}
else {
int s = strlen (bd->cmd_line);
bd->cmd_line[s + 1] = ' ';
bd->cmd_line[s + 2] = '\0';
strcat(&bd->cmd_line[0], CONSOLE_ON_SERIAL);
}
}
#if 0
/* In the future we may use the NVRAM to store default
* kernel parameters.
*/
nvram=residual_find_device(~0UL, NULL, SystemPeripheral, NVRAM,
~0UL, 0);
if (nvram) {
PnP_TAG_PACKET * pkt;
switch (nvram->DevId.Interface) {
case IndirectNVRAM:
pkt=PnP_find_packet(res->DevicePnpHeap
+nvram->AllocatedOffset,
)
}
}
#endif
printk("\nRTEMS 4.x/PPC load: ");
timer = 0;
cp = bd->cmd_line+strlen(bd->cmd_line);
while (timer++ < 5*1000) {
if (debug_tstc()) {
while ((ch = debug_getc()) != '\n' && ch != '\r') {
if (ch == '\b' || ch == 0177) {
if (cp != bd->cmd_line) {
cp--;
printk("\b \b");
}
} else {
*cp++ = ch;
debug_putc(ch);
}
}
break; /* Exit 'timer' loop */
}
boot_udelay(1000); /* 1 msec */
}
*cp = 0;
}
/* Functions to deal with the residual data */
static int same_DevID(unsigned short vendor,
unsigned short Number,
unsigned char * str)
{
static unsigned const char hexdigit[]="0123456789ABCDEF";
if (strlen((char*)str)!=7) return 0;
if ( ( ((vendor>>10)&0x1f)+'A'-1 == str[0]) &&
( ((vendor>>5)&0x1f)+'A'-1 == str[1]) &&
( (vendor&0x1f)+'A'-1 == str[2]) &&
(hexdigit[(Number>>12)&0x0f] == str[3]) &&
(hexdigit[(Number>>8)&0x0f] == str[4]) &&
(hexdigit[(Number>>4)&0x0f] == str[5]) &&
(hexdigit[Number&0x0f] == str[6]) ) return 1;
return 0;
}
PPC_DEVICE *residual_find_device(unsigned long BusMask,
unsigned char * DevID,
int BaseType,
int SubType,
int Interface,
int n)
{
int i;
RESIDUAL *res = bd->residual;
if ( !res || !res->ResidualLength ) return NULL;
for (i=0; i<res->ActualNumDevices; i++) {
#define Dev res->Devices[i].DeviceId
if ( (Dev.BusId&BusMask) &&
(BaseType==-1 || Dev.BaseType==BaseType) &&
(SubType==-1 || Dev.SubType==SubType) &&
(Interface==-1 || Dev.Interface==Interface) &&
(DevID==NULL || same_DevID((Dev.DevId>>16)&0xffff,
Dev.DevId&0xffff, DevID)) &&
!(n--) ) return res->Devices+i;
#undef Dev
}
return 0;
}
PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
unsigned packet_tag,
int n)
{
unsigned mask, masked_tag, size;
if(!p) return 0;
if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
masked_tag = packet_tag&mask;
for(; *p != END_TAG; p+=size) {
if ((*p & mask) == masked_tag && !(n--))
return (PnP_TAG_PACKET *) p;
if (tag_type(*p))
size=ld_le16((unsigned short *)(p+1))+3;
else
size=tag_small_count(*p)+1;
}
return 0; /* not found */
}
PnP_TAG_PACKET *PnP_find_small_vendor_packet(unsigned char *p,
unsigned packet_type,
int n)
{
int next=0;
while (p) {
p = (unsigned char *) PnP_find_packet(p, 0x70, next);
if (p && p[1]==packet_type && !(n--))
return (PnP_TAG_PACKET *) p;
next = 1;
};
return 0; /* not found */
}
PnP_TAG_PACKET *PnP_find_large_vendor_packet(unsigned char *p,
unsigned packet_type,
int n)
{
int next=0;
while (p) {
p = (unsigned char *) PnP_find_packet(p, 0x84, next);
if (p && p[3]==packet_type && !(n--))
return (PnP_TAG_PACKET *) p;
next = 1;
};
return 0; /* not found */
}
/* Find out the amount of installed memory. For MPC105 and IBM 660 this
* can be done by finding the bank with the highest memory ending address
*/
int
find_max_mem( struct pci_dev *dev )
{
u_char banks,tmp;
int i, top, max;
max = 0;
for ( ; dev; dev = dev->next) {
if ( ((dev->vendor == PCI_VENDOR_ID_MOTOROLA) &&
(dev->device == PCI_DEVICE_ID_MOTOROLA_MPC105)) ||
((dev->vendor == PCI_VENDOR_ID_IBM) &&
(dev->device == 0x0037/*IBM 660 Bridge*/)) ) {
pci_bootloader_read_config_byte(dev, 0xa0, &banks);
for (i = 0; i < 8; i++) {
if ( banks & (1<<i) ) {
pci_bootloader_read_config_byte(dev, 0x90+i, &tmp);
top = tmp;
pci_bootloader_read_config_byte(dev, 0x98+i, &tmp);
top |= (tmp&3)<<8;
if ( top > max ) max = top;
}
}
if ( max ) return ((max+1)<<20);
else return(0);
}
}
return(0);
}

View File

@@ -1,996 +0,0 @@
/*
* mm.c -- Crude memory management for early boot.
*
* Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
*
* Modified to compile in RTEMS development environment
* by Eric Valette
*
* Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
/* This code is a crude memory manager for early boot for LinuxPPC.
* As such, it does not try to perform many optimiztions depending
* on the processor, it only uses features which are common to
* all processors (no BATs...).
*
* On PreP platorms (the only ones on which it works for now),
* it maps 1:1 all RAM/ROM and I/O space as claimed by the
* residual data. The holes between these areas can be virtually
* remapped to any of these, since for some functions it is very handy
* to have virtually contiguous but physically discontiguous memory.
*
* Physical memory allocation is also very crude, since it's only
* designed to manage a small number of large chunks. For valloc/vfree
* and palloc/pfree, the unit of allocation is the 4kB page.
*
* The salloc/sfree has been added after tracing gunzip and seeing
* how it performed a very large number of small allocations.
* For these the unit of allocation is 8 bytes (the s stands for
* small or subpage). This memory is cleared when allocated.
*
*/
#include <rtems/bspIo.h>
#include <sys/types.h>
#include <libcpu/spr.h>
#include "bootldr.h"
#include <libcpu/mmu.h>
#include <limits.h>
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
extern void (tlb_handlers)(void);
extern void (_handler_glue)(void);
/* We use our own kind of simple memory areas for the loader, but
* we want to avoid potential clashes with kernel includes.
* Here a map maps contiguous areas from base to end,
* the firstpte entry corresponds to physical address and has the low
* order bits set for caching and permission.
*/
typedef struct _map {
struct _map *next;
u_long base;
u_long end;
u_long firstpte;
} map;
/* The LSB of the firstpte entries on map lists other than mappings
* are constants which can be checked for debugging. All these constants
* have bit of weight 4 set, this bit is zero in the mappings list entries.
* Actually firstpte&7 value is:
* - 0 or 1 should not happen
* - 2 for RW actual virtual->physical mappings
* - 3 for RO actual virtual->physical mappings
* - 6 for free areas to be suballocated by salloc
* - 7 for salloc'ated areas
* - 4 or 5 for all others, in this case firtpte & 63 is
* - 4 for unused maps (on the free list)
* - 12 for free physical memory
* - 13 for physical memory in use
* - 20 for free virtual address space
* - 21 for allocated virtual address space
* - 28 for physical memory space suballocated by salloc
* - 29 for physical memory that can't be freed
*/
#define MAP_FREE_SUBS 6
#define MAP_USED_SUBS 7
#define MAP_FREE 4
#define MAP_FREE_PHYS 12
#define MAP_USED_PHYS 13
#define MAP_FREE_VIRT 20
#define MAP_USED_VIRT 21
#define MAP_SUBS_PHYS 28
#define MAP_PERM_PHYS 29
SPR_RW(SDR1);
SPR_RO(DSISR);
SPR_RO(PPC_DAR);
/* We need a few statically allocated free maps to bootstrap the
* memory managment */
static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
{free_maps+2, 0, 0, MAP_FREE},
{free_maps+3, 0, 0, MAP_FREE},
{NULL, 0, 0, MAP_FREE}};
struct _mm_private {
void *sdr1;
u_long hashmask;
map *freemaps; /* Pool of unused map structs */
map *mappings; /* Sorted list of virtual->physical mappings */
map *physavail; /* Unallocated physical address space */
map *physused; /* Allocated physical address space */
map *physperm; /* Permanently allocated physical space */
map *virtavail; /* Unallocated virtual address space */
map *virtused; /* Allocated virtual address space */
map *sallocfree; /* Free maps for salloc */
map *sallocused; /* Used maps for salloc */
map *sallocphys; /* Physical areas used by salloc */
u_int hashcnt; /* Used to cycle in PTEG when they overflow */
} mm_private = {hashmask: 0xffc0,
freemaps: free_maps+0};
/* A simplified hash table entry declaration */
typedef struct _hash_entry {
int key;
u_long rpn;
} hash_entry;
void print_maps(map *, const char *);
/* The handler used for all exceptions although for now it is only
* designed to properly handle MMU interrupts to fill the hash table.
*/
void _handler(int vec, ctxt *p) {
map *area;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
u_long vaddr, cause;
if (vec==4 || vec==7) { /* ISI exceptions are different */
vaddr = p->nip;
cause = p->msr;
} else { /* Valid for DSI and alignment exceptions */
vaddr = _read_PPC_DAR();
cause = _read_DSISR();
}
if (vec==3 || vec==4) {
/* Panic if the fault is not PTE not found. */
if (!(cause & 0x40000000)) {
MMUon();
printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
hang("Memory protection violation at ", vaddr, p);
}
for(area=mm->mappings; area; area=area->next) {
if(area->base<=vaddr && vaddr<=area->end) break;
}
if (area) {
u_long hash, vsid, rpn;
hash_entry volatile *hte, *_hte1;
u_int i, alt=0, flushva;
vsid = _read_SR((void *)vaddr);
rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
hash = vsid<<6;
hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
hash &= mm->hashmask;
/* Find an empty entry in the PTEG, else
* replace a random one.
*/
hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
for (i=0; i<8; i++) {
if (hte[i].key>=0) goto found;
}
hash ^= mm->hashmask;
alt = 0x40; _hte1 = hte;
hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
for (i=0; i<8; i++) {
if (hte[i].key>=0) goto found;
}
alt = 0;
hte = _hte1;
/* Chose a victim entry and replace it. There might be
* better policies to choose the victim, but in a boot
* loader we want simplicity as long as it works.
*
* We would not need to invalidate the TLB entry since
* the mapping is still valid. But this would be a mess
* when unmapping so we make sure that the TLB is a
* subset of the hash table under all circumstances.
*/
i = mm->hashcnt;
mm->hashcnt = (mm->hashcnt+1)%8;
/* Note that the hash is already complemented here ! */
flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
if (hte[i].key&0x40) flushva^=0x3ff000;
flushva |= ((hte[i].key<<21)&0xf0000000)
| ((hte[i].key<<22)&0x0fc00000);
hte[i].key=0;
asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
found:
hte[i].rpn = rpn;
asm volatile("eieio": : );
hte[i].key = 0x80000000|(vsid<<7)|alt|
((vaddr>>22)&0x3f);
return;
} else {
MMUon();
printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
hang("\nInvalid memory access attempt at ", vaddr, p);
}
} else {
MMUon();
printk(
"\nPanic: vector=%d, dsisr=%lx, faultaddr =%lx, "
"msr=%lx opcode=%x\n", vec,
cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
if (vec == 7) {
unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
printk("Hexdecimal code at address %p = %x\n", ptr, *ptr);
}
hang("Program or alignment exception at ", vaddr, p);
}
}
/* Generic routines for map handling.
*/
static inline
void free_map(map *p) {
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
if (!p) return;
p->next=mm->freemaps;
mm->freemaps=p;
p->firstpte=MAP_FREE;
}
/* Sorted insertion in linked list */
static
int insert_map(map **head, map *p) {
map *q = *head;
if (!p) return 0;
if (q && (q->base < p->base)) {
for(;q->next && q->next->base<p->base; q = q->next);
if ((q->end >= p->base) ||
(q->next && p->end>=q->next->base)) {
free_map(p);
printk("Overlapping areas!\n");
return 1;
}
p->next = q->next;
q->next = p;
} else { /* Insert at head */
if (q && (p->end >= q->base)) {
free_map(p);
printk("Overlapping areas!\n");
return 1;
}
p->next = q;
*head = p;
}
return 0;
}
/* Removal from linked list */
static
map *remove_map(map **head, map *p) {
map *q = *head;
if (!p || !q) return NULL;
if (q==p) {
*head = q->next;
return p;
}
for(;q && q->next!=p; q=q->next);
if (q) {
q->next=p->next;
return p;
} else {
return NULL;
}
}
static
map *remove_map_at(map **head, void * vaddr) {
map *p, *q = *head;
if (!vaddr || !q) return NULL;
if (q->base==(u_long)vaddr) {
*head = q->next;
return q;
}
while (q->next && q->next->base != (u_long)vaddr) q=q->next;
p=q->next;
if (p) q->next=p->next;
return p;
}
static inline
map * alloc_map_page(void) {
map *from, *p;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
/* printk("Allocating new map page !"); */
/* Get the highest page */
for (from=mm->physavail; from && from->next; from=from->next);
if (!from) return NULL;
from->end -= PAGE_SIZE;
mm->freemaps = (map *) (from->end+1);
for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
p->next = p+1;
p->firstpte = MAP_FREE;
}
(p-1)->next=0;
/* Take the last one as pointer to self and insert
* the map into the permanent map list.
*/
p->firstpte = MAP_PERM_PHYS;
p->base=(u_long) mm->freemaps;
p->end = p->base+PAGE_SIZE-1;
insert_map(&mm->physperm, p);
if (from->end+1 == from->base)
free_map(remove_map(&mm->physavail, from));
return mm->freemaps;
}
static
map * alloc_map(void) {
map *p;
struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
p = mm->freemaps;
if (!p) {
p=alloc_map_page();
}
if(p) mm->freemaps=p->next;
return p;
}
static
void coalesce_maps(map *p) {
while(p) {
if (p->next && (p->end+1 == p->next->base)) {
map *q=p->next;
p->end=q->end;
p->next=q->next;
free_map(q);
} else {
p = p->next;
}
}
}
/* These routines are used to find the free memory zones to avoid
* overlapping destructive copies when initializing.
* They work from the top because of the way we want to boot.
* In the following the term zone refers to the memory described
* by one or several contiguous so called segments in the
* residual data.
*/
#define STACK_PAGES 2
static inline u_long
find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
u_long i, newmin=0, size=0;
for(i=0; i<res->ActualNumMemSegs; i++) {
if (res->Segs[i].Usage & flags
&& res->Segs[i].BasePage<lowpage
&& res->Segs[i].BasePage>newmin) {
newmin=res->Segs[i].BasePage;
size=res->Segs[i].PageCount;
}
}
return newmin+size;
}
static inline u_long
find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
u_long i;
int progress;
do {
progress=0;
for (i=0; i<res->ActualNumMemSegs; i++) {
if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
== highpage)
&& res->Segs[i].Usage & flags) {
highpage=res->Segs[i].BasePage;
progress=1;
}
}
} while(progress);
return highpage;
}
/* The Motorola NT firmware does not provide any setting in the residual
* data about memory segment usage. The following table provides enough
* info so that this bootloader can work.
*/
MEM_MAP seg_fix[] = {
{ 0x2000, 0xFFF00, 0x00100 },
{ 0x0020, 0x02000, 0x7E000 },
{ 0x0008, 0x00800, 0x00168 },
{ 0x0004, 0x00000, 0x00005 },
{ 0x0001, 0x006F1, 0x0010F },
{ 0x0002, 0x006AD, 0x00044 },
{ 0x0010, 0x00005, 0x006A8 },
{ 0x0010, 0x00968, 0x00698 },
{ 0x0800, 0xC0000, 0x3F000 },
{ 0x0600, 0xBF800, 0x00800 },
{ 0x0500, 0x81000, 0x3E800 },
{ 0x0480, 0x80800, 0x00800 },
{ 0x0440, 0x80000, 0x00800 } };
/* The Motorola NT firmware does not set up all required info in the residual
* data. This routine changes some things in a way that the bootloader and
* linux are happy.
*/
static void
fix_residual( RESIDUAL *res )
{
#if 0
PPC_DEVICE *hostbridge;
#endif
int i;
/* Missing memory segment information */
res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
for (i=0; i<res->ActualNumMemSegs; i++) {
res->Segs[i].Usage = seg_fix[i].Usage;
res->Segs[i].BasePage = seg_fix[i].BasePage;
res->Segs[i].PageCount = seg_fix[i].PageCount;
}
/* The following should be fixed in the current version of the
* kernel and of the bootloader.
*/
#if 0
/* PPCBug has this zero */
res->VitalProductData.CacheLineSize = 0;
/* Motorola NT firmware sets TimeBaseDivisor to 0 */
if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
res->VitalProductData.TimeBaseDivisor = 4000;
}
/* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
* sets "PCIBridgeDirect". This bootloader and linux works better if
* BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
*/
hostbridge=residual_find_device(PCIDEVICE, NULL,
BridgeController,
PCIBridge, -1, 0);
if (hostbridge) {
hostbridge->DeviceId.BusId = PROCESSORDEVICE;
hostbridge->DeviceId.Interface = PCIBridgeIndirect;
}
#endif
}
/* This routine is the first C code called with very little stack space!
* Its goal is to find where the boot image can be moved. This will
* be the highest address with enough room.
*/
int early_setup(u_long image_size) {
register RESIDUAL *res = bd->residual;
u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
if ( residual_fw_is_qemu( res ) ) {
/* save command-line - QEMU firmware sets R6/R7 to
* commandline start/end (NON-PReP STD)
*/
int len = bd->r7 - bd->r6;
if ( len > 0 ) {
if ( len > sizeof(bd->cmd_line) - 1 )
len = sizeof(bd->cmd_line) - 1;
codemove(bd->cmd_line, bd->r6, len, bd->cache_lsize);
bd->cmd_line[len] = 0;
}
}
/* Fix residual if we are loaded by Motorola NT firmware */
if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
fix_residual( res );
/* FIXME: if OF we should do something different */
if( !bd->of_entry && res &&
res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
u_long lowpage=ULONG_MAX, highpage;
u_long imghigh=0, stkhigh=0;
/* Find the highest and large enough contiguous zone
consisting of free and BootImage sections. */
/* Find 3 free areas of memory, one for the main image, one
* for the stack (STACK_PAGES), and page one to put the map
* structures. They are allocated from the top of memory.
* In most cases the stack will be put just below the image.
*/
while((highpage =
find_next_zone(res, lowpage, BootImage|Free))) {
lowpage=find_zone_start(res, highpage, BootImage|Free);
if ((highpage-lowpage)>minpages &&
highpage>imghigh) {
imghigh=highpage;
highpage -=minpages;
}
if ((highpage-lowpage)>STACK_PAGES &&
highpage>stkhigh) {
stkhigh=highpage;
highpage-=STACK_PAGES;
}
}
bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
/* The code mover is put at the lowest possible place
* of free memory. If this corresponds to the loaded boot
* partition image it does not matter because it overrides
* the unused part of it (x86 code).
*/
bd->mover=(void *) (lowpage<<PAGE_SHIFT);
/* Let us flush the caches in all cases. After all it should
* not harm even on 601 and we don't care about performance.
* Right now it's easy since all processors have a line size
* of 32 bytes. Once again residual data has proved unreliable.
*/
bd->cache_lsize = 32;
}
/* For now we always assume that it's succesful, we should
* handle better the case of insufficient memory.
*/
return 0;
}
void * valloc(u_long size) {
map *p, *q;
struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
if (size==0) return NULL;
size=PAGE_ALIGN(size)-1;
for (p=mm->virtavail; p; p=p->next) {
if (p->base+size <= p->end) break;
}
if(!p) return NULL;
q=alloc_map();
q->base=p->base;
q->end=q->base+size;
q->firstpte=MAP_USED_VIRT;
insert_map(&mm->virtused, q);
if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
else p->base += size+1;
return (void *)q->base;
}
static
void vflush(map *virtmap) {
struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
u_long i, limit=(mm->hashmask>>3)+8;
hash_entry volatile *p=(hash_entry *) mm->sdr1;
/* PTE handling is simple since the processor never update
* the entries. Writable pages always have the C bit set and
* all valid entries have the R bit set. From the processor
* point of view the hash table is read only.
*/
for (i=0; i<limit; i++) {
if (p[i].key<0) {
u_long va;
va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
if (p[i].key&0x40) va^=0x3ff000;
va |= ((p[i].key<<21)&0xf0000000)
| ((p[i].key<<22)&0x0fc00000);
if (va>=virtmap->base && va<=virtmap->end) {
p[i].key=0;
asm volatile("sync; tlbie %0; sync" : :
"r" (va));
}
}
}
}
void vfree(void *vaddr) {
map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
/* Flush memory queues */
asm volatile("sync": : : "memory");
virtmap = remove_map_at(&mm->virtused, vaddr);
if (!virtmap) return;
/* Remove mappings corresponding to virtmap */
for (physmap=mm->mappings; physmap; ) {
map *nextmap=physmap->next;
if (physmap->base>=virtmap->base
&& physmap->base<virtmap->end) {
free_map(remove_map(&mm->mappings, physmap));
}
physmap=nextmap;
}
vflush(virtmap);
virtmap->firstpte= MAP_FREE_VIRT;
insert_map(&mm->virtavail, virtmap);
coalesce_maps(mm->virtavail);
}
void vunmap(void *vaddr) {
map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
/* Flush memory queues */
asm volatile("sync": : : "memory");
/* vaddr must be within one of the vm areas in use and
* then must correspond to one of the physical areas
*/
for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
if (virtmap->base<=(u_long)vaddr &&
virtmap->end>=(u_long)vaddr) break;
}
if (!virtmap) return;
physmap = remove_map_at(&mm->mappings, vaddr);
if(!physmap) return;
vflush(physmap);
free_map(physmap);
}
int vmap(void *vaddr, u_long p, u_long size) {
map *q;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
size=PAGE_ALIGN(size);
if(!size) return 1;
/* Check that the requested area fits in one vm image */
for (q=mm->virtused; q; q=q->next) {
if ((q->base <= (u_long)vaddr) &&
(q->end>=(u_long)vaddr+size -1)) break;
}
if (!q) return 1;
q= alloc_map();
if (!q) return 1;
q->base = (u_long)vaddr;
q->end = (u_long)vaddr+size-1;
q->firstpte = p;
return insert_map(&mm->mappings, q);
}
static
void create_identity_mappings(int type, int attr) {
u_long lowpage=ULONG_MAX, highpage;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
RESIDUAL * res=bd->residual;
while((highpage = find_next_zone(res, lowpage, type))) {
map *p;
lowpage=find_zone_start(res, highpage, type);
p=alloc_map();
/* Do not map page 0 to catch null pointers */
lowpage = lowpage ? lowpage : 1;
p->base=lowpage<<PAGE_SHIFT;
p->end=(highpage<<PAGE_SHIFT)-1;
p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
insert_map(&mm->mappings, p);
}
}
static inline
void add_free_map(u_long base, u_long end) {
map *q=NULL;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
if (base<end) q=alloc_map();
if (!q) return;
q->base=base;
q->end=end-1;
q->firstpte=MAP_FREE_VIRT;
insert_map(&mm->virtavail, q);
}
static inline
void create_free_vm(void) {
map *p;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
for(p=mm->mappings; p; p=p->next) {
add_free_map(vaddr, p->base);
vaddr=p->end+1;
}
/* Special end of memory case */
if (vaddr) add_free_map(vaddr,0);
}
/* Memory management initialization.
* Set up the mapping lists.
*/
static inline
void add_perm_map(u_long start, u_long size) {
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
map *p=alloc_map();
p->base = start;
p->end = start + size - 1;
p->firstpte = MAP_PERM_PHYS;
insert_map(& mm->physperm , p);
}
void mm_init(u_long image_size)
{
u_long lowpage=ULONG_MAX, highpage;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
RESIDUAL * res=bd->residual;
int i;
map *p;
/* The checks are simplified by the fact that the image
* and stack area are always allocated at the upper end
* of a free block.
*/
while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
lowpage=find_zone_start(res, highpage, BootImage|Free);
if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
== highpage) {
highpage=(u_long)(bd->image)>>PAGE_SHIFT;
add_perm_map((u_long)bd->image, image_size);
}
if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
highpage -= STACK_PAGES;
add_perm_map(highpage<<PAGE_SHIFT,
STACK_PAGES*PAGE_SIZE);
}
/* Protect the interrupt handlers that we need ! */
if (lowpage<2) lowpage=2;
/* Check for the special case of full area! */
if (highpage>lowpage) {
p = alloc_map();
p->base = lowpage<<PAGE_SHIFT;
p->end = (highpage<<PAGE_SHIFT)-1;
p->firstpte=MAP_FREE_PHYS;
insert_map(&mm->physavail, p);
}
}
/* Allocate the hash table */
mm->sdr1=__palloc(0x10000, PA_PERM|16);
_write_SDR1((u_long)mm->sdr1);
memset(mm->sdr1, 0, 0x10000);
mm->hashmask = 0xffc0;
/* Setup the segment registers as we want them */
for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
/* Create the maps for the physical memory, firwmarecode does not
* seem to be necessary. ROM is mapped read-only to reduce the risk
* of reprogramming it because it's often Flash and some are
* amazingly easy to overwrite.
*/
create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
FirmwareStack, PTE_RAM);
create_identity_mappings(SystemROM, PTE_ROM);
create_identity_mappings(IOMemory|SystemIO|SystemRegs|
PCIAddr|PCIConfig|ISAAddr, PTE_IO);
create_free_vm();
/* Install our own MMU and trap handlers. */
codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
}
void * salloc(u_long size) {
map *p, *q;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
if (size==0) return NULL;
size = (size+7)&~7;
for (p=mm->sallocfree; p; p=p->next) {
if (p->base+size <= p->end) break;
}
if(!p) {
void *m;
m = __palloc(size, PA_SUBALLOC);
p = alloc_map();
if (!m && !p) return NULL;
p->base = (u_long) m;
p->firstpte = MAP_FREE_SUBS;
p->end = (u_long)m+PAGE_ALIGN(size)-1;
insert_map(&mm->sallocfree, p);
coalesce_maps(mm->sallocfree);
coalesce_maps(mm->sallocphys);
};
q=alloc_map();
q->base=p->base;
q->end=q->base+size-1;
q->firstpte=MAP_USED_SUBS;
insert_map(&mm->sallocused, q);
if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
else p->base += size;
memset((void *)q->base, 0, size);
return (void *)q->base;
}
void sfree(void *p) {
map *q;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
q=remove_map_at(&mm->sallocused, p);
if (!q) return;
q->firstpte=MAP_FREE_SUBS;
insert_map(&mm->sallocfree, q);
coalesce_maps(mm->sallocfree);
}
/* first/last area fit, flags is a power of 2 indicating the required
* alignment. The algorithms are stupid because we expect very little
* fragmentation of the areas, if any. The unit of allocation is the page.
* The allocation is by default performed from higher addresses down,
* unless flags&PA_LOW is true.
*/
void * __palloc(u_long size, int flags)
{
u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
map *newmap, *frommap, *p, *splitmap=0;
map **queue;
u_long qflags;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
/* Asking for a size which is not a multiple of the alignment
is likely to be an error. */
if (size & mask) return NULL;
size = PAGE_ALIGN(size);
if(!size) return NULL;
if (flags&PA_SUBALLOC) {
queue = &mm->sallocphys;
qflags = MAP_SUBS_PHYS;
} else if (flags&PA_PERM) {
queue = &mm->physperm;
qflags = MAP_PERM_PHYS;
} else {
queue = &mm->physused;
qflags = MAP_USED_PHYS;
}
/* We need to allocate that one now so no two allocations may attempt
* to take the same memory simultaneously. Alloc_map_page does
* not call back here to avoid infinite recursion in alloc_map.
*/
if (mask&PAGE_MASK) {
splitmap=alloc_map();
if (!splitmap) return NULL;
}
for (p=mm->physavail, frommap=NULL; p; p=p->next) {
u_long high = p->end;
u_long limit = ((p->base+mask)&~mask) + size-1;
if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
frommap = p;
if (flags&PA_LOW) break;
}
}
if (!frommap) {
if (splitmap) free_map(splitmap);
return NULL;
}
newmap=alloc_map();
if (flags&PA_LOW) {
newmap->base = (frommap->base+mask)&~mask;
} else {
newmap->base = (frommap->end +1 - size) & ~mask;
}
newmap->end = newmap->base+size-1;
newmap->firstpte = qflags;
/* Add a fragment if we don't allocate until the end. */
if (splitmap) {
splitmap->base=newmap->base+size;
splitmap->end=frommap->end;
splitmap->firstpte= MAP_FREE_PHYS;
frommap->end=newmap->base-1;
} else if (flags & PA_LOW) {
frommap->base=newmap->base+size;
} else {
frommap->end=newmap->base-1;
}
/* Remove a fragment if it becomes empty. */
if (frommap->base == frommap->end+1) {
free_map(remove_map(&mm->physavail, frommap));
}
if (splitmap) {
if (splitmap->base == splitmap->end+1) {
free_map(remove_map(&mm->physavail, splitmap));
} else {
insert_map(&mm->physavail, splitmap);
}
}
insert_map(queue, newmap);
return (void *) newmap->base;
}
void pfree(void * p) {
map *q;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
q=remove_map_at(&mm->physused, p);
if (!q) return;
q->firstpte=MAP_FREE_PHYS;
insert_map(&mm->physavail, q);
coalesce_maps(mm->physavail);
}
#ifdef DEBUG
/* Debugging functions */
void print_maps(map *chain, const char *s) {
map *p;
printk("%s",s);
for(p=chain; p; p=p->next) {
printk(" %08lx-%08lx: %08lx\n",
p->base, p->end, p->firstpte);
}
}
void print_all_maps(const char * s) {
u_long freemaps;
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
map *free;
printk("%s",s);
print_maps(mm->mappings, " Currently defined mappings:\n");
print_maps(mm->physavail, " Currently available physical areas:\n");
print_maps(mm->physused, " Currently used physical areas:\n");
print_maps(mm->virtavail, " Currently available virtual areas:\n");
print_maps(mm->virtused, " Currently used virtual areas:\n");
print_maps(mm->physperm, " Permanently used physical areas:\n");
print_maps(mm->sallocphys, " Physical memory used for salloc:\n");
print_maps(mm->sallocfree, " Memory available for salloc:\n");
print_maps(mm->sallocused, " Memory allocated through salloc:\n");
for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
printk(" %ld free maps.\n", freemaps);
}
void print_hash_table(void) {
struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
hash_entry *p=(hash_entry *) mm->sdr1;
u_int i, valid=0;
for (i=0; i<((mm->hashmask)>>3)+8; i++) {
if (p[i].key<0) valid++;
}
printk("%u valid hash entries on pass 1.\n", valid);
valid = 0;
for (i=0; i<((mm->hashmask)>>3)+8; i++) {
if (p[i].key<0) valid++;
}
printk("%u valid hash entries on pass 2.\n"
" vpn:rpn_attr, p/s, pteg.i\n", valid);
for (i=0; i<((mm->hashmask)>>3)+8; i++) {
if (p[i].key<0) {
u_int pteg=(i>>3);
u_long vpn;
vpn = (pteg^((p[i].key)>>7)) &0x3ff;
if (p[i].key&0x40) vpn^=0x3ff;
vpn |= ((p[i].key<<9)&0xffff0000)
| ((p[i].key<<10)&0xfc00);
printk("%08lx:%08lx, %s, %5d.%d\n",
vpn, p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
pteg, i%8);
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,95 +0,0 @@
/*
* PCI defines and function prototypes
* Copyright 1994, Drew Eckhardt
* Copyright 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
*
* PCI BIOS Specification
* PCI Local Bus Specification
* PCI to PCI Bridge Specification
* PCI System Design Guide
*/
#ifndef BOOTLOADER_PCI_H
#define BOOTLOADER_PCI_H
#include <rtems/pci.h>
/* Functions used to access pci configuration space */
struct pci_bootloader_config_access_functions {
int (*read_config_byte)(unsigned char, unsigned char,
unsigned char, uint8_t *);
int (*read_config_word)(unsigned char, unsigned char,
unsigned char, uint16_t *);
int (*read_config_dword)(unsigned char, unsigned char,
unsigned char, uint32_t *);
int (*write_config_byte)(unsigned char, unsigned char,
unsigned char, uint8_t);
int (*write_config_word)(unsigned char, unsigned char,
unsigned char, uint16_t);
int (*write_config_dword)(unsigned char, unsigned char,
unsigned char, uint32_t);
};
/*
* There is one pci_dev structure for each slot-number/function-number
* combination:
*/
struct pci_dev {
struct pci_bus *bus; /* bus this device is on */
struct pci_dev *sibling; /* next device on this bus */
struct pci_dev *next; /* chain of all devices */
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
unsigned int devfn; /* encoded device & function index */
unsigned short vendor;
unsigned short device;
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
unsigned int hdr_type; /* PCI header type */
unsigned int master : 1; /* set if device is master capable */
/*
* In theory, the irq level can be read from configuration
* space and all would be fine. However, old PCI chips don't
* support these registers and return 0 instead. For example,
* the Vision864-P rev 0 chip can uses INTA, but returns 0 in
* the interrupt line and pin registers. pci_init()
* initializes this field with the value at PCI_INTERRUPT_LINE
* and it is the job of pcibios_fixup() to change it if
* necessary. The field must not be 0 unless the device
* cannot generate interrupts at all.
*/
unsigned int irq; /* irq generated by this device */
/* Base registers for this device, can be adjusted by
* pcibios_fixup() as necessary.
*/
unsigned long base_address[6];
unsigned long rom_address;
};
struct pci_bus {
struct pci_bus *parent; /* parent bus this bridge is on */
struct pci_bus *children; /* chain of P2P bridges on this bus */
struct pci_bus *next; /* chain of all PCI buses */
struct pci_dev *self; /* bridge device as seen by parent */
struct pci_dev *devices; /* devices behind this bridge */
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
unsigned char number; /* bus number */
unsigned char primary; /* number of primary bridge */
unsigned char secondary; /* number of secondary bridge */
unsigned char subordinate; /* max number of subordinate buses */
};
extern struct pci_bus pci_root; /* root bus */
extern struct pci_dev *pci_devices; /* list of all devices */
#endif /* BOOTLOADER_PCI_H */

View File

@@ -1,96 +0,0 @@
OUTPUT_ARCH(powerpc)
OUTPUT_FORMAT ("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
/* Do we need any of these for elf?
__DYNAMIC = 0; */
SECTIONS
{
.text :
{
/* We have to build the header by hand, painful since ppcboot
format support is very poor in binutils.
objdump -b ppcboot zImage --all-headers can be used to check. */
/* The following line can be added as a branch to use the same image
* for netboot as for prepboots, the only problem is that objdump
* did not in this case recognize the format since it insisted
* in checking the x86 code area held only zeroes.
*/
LONG(0x48000000+start);
. = 0x1be; BYTE(0x80); BYTE(0)
BYTE(2); BYTE(0); BYTE(0x41); BYTE(1);
BYTE(0x12); BYTE(0x4f); LONG(0);
BYTE(((_edata + 0x1ff)>>9)&0xff);
BYTE(((_edata + 0x1ff)>>17)&0xff);
BYTE(((_edata + 0x1ff)>>25)&0xff);
. = 0x1fe;
BYTE(0x55);
BYTE(0xaa);
BYTE(start&0xff);
BYTE((start>>8)&0xff);
BYTE((start>>16)&0xff);
BYTE((start>>24)&0xff);
BYTE(_edata&0xff);
BYTE((_edata>>8)&0xff);
BYTE((_edata>>16)&0xff);
BYTE((_edata>>24)&0xff);
BYTE(0); /* flags */
BYTE(0); /* os_id */
BYTE(0x4C); BYTE(0x69); BYTE(0x6e);
BYTE(0x75); BYTE(0x78); /* Partition name */
. = 0x400;
*(.text)
*(.sdata2)
*(.rodata)
*(.rodata*)
}
/* . = ALIGN(16); */
.image :
{
rtems.gz(*)
. = ALIGN(4);
*.gz(*)
}
/* Read-write section, merged into data segment: */
/* . = ALIGN(4096); */
.reloc :
{
*(.got)
_GOT2_TABLE_ = .;
*(.got2)
_FIXUP_TABLE_ = .;
*(.fixup)
}
__got2_entries = (_FIXUP_TABLE_ - _GOT2_TABLE_) >>2;
__fixup_entries = (. - _FIXUP_TABLE_)>>2;
.handlers :
{
*(.exception)
}
.data :
{
*(.data)
*(.data*)
*(.sdata)
. = ALIGN(4);
_edata = .;
}
PROVIDE(_binary_initrd_gz_start = 0);
PROVIDE(_binary_initrd_gz_end = 0);
_rtems_gz_size = _binary_rtems_gz_end - _binary_rtems_gz_start;
_rtems_size = __rtems_end - __rtems_start;
.bss :
{
*(.sbss)
*(.bss)
. = ALIGN(4);
}
__bss_words = SIZEOF(.bss)>>2;
__size = . ;
/DISCARD/ :
{
*(.comment)
}
}

View File

@@ -1,226 +0,0 @@
#include <bsp/residual.h>
#include <stdint.h>
/* Magic knowledge - qemu loads image here.
* However, we use the value from NVRAM if possible...
*/
#define KERNELBASE 0x01000000
/* When starting qemu make sure to give the correct
* amount of memory!
*
* NOTE: Code now supports reading the actual memory
* amount from NVRAM. The residual copy in RAM
* is fixed-up accordingly.
*/
#define MEM_MEGS 32
/* Mock up a minimal/fake residual; just enough to make the
* bootloader happy.
*/
struct fake_data {
unsigned long dat_len;
unsigned long res_off;
unsigned long cmd_off;
unsigned long cmd_len;
unsigned long img_adr;
RESIDUAL fake_residual;
char cmdline[1024];
} fake_data = {
dat_len: sizeof(fake_data),
res_off: (unsigned long) &fake_data.fake_residual
-(unsigned long) &fake_data,
cmd_off: (unsigned long) &fake_data.cmdline
-(unsigned long) &fake_data,
cmd_len: sizeof(fake_data.cmdline),
img_adr: KERNELBASE,
fake_residual:
{
ResidualLength: sizeof(RESIDUAL),
Version: 0,
Revision: 0,
EC: 0,
VitalProductData: {
FirmwareSupplier: QEMU,
ProcessorHz: 300000000, /* fantasy */
ProcessorBusHz: 100000000, /* qemu timebase clock */
TimeBaseDivisor:1*1000,
},
MaxNumCpus: 1,
ActualNumCpus: 1,
Cpus: {
{
CpuType: 0x00040103, /* FIXME: fill from PVR */
CpuNumber: 0,
CpuState: 0,
},
},
/* Memory */
TotalMemory: 1024*1024*MEM_MEGS,
GoodMemory: 1024*1024*MEM_MEGS,
ActualNumMemSegs: 13,
Segs: {
{ 0x2000, 0xFFF00, 0x00100 },
{ 0x0020, MEM_MEGS*0x100, 0x80000 - MEM_MEGS*0x100 },
{ 0x0008, 0x00800, 0x00168 },
{ 0x0004, 0x00000, 0x00005 },
{ 0x0001, 0x006F1, 0x0010F },
{ 0x0002, 0x006AD, 0x00044 },
{ 0x0010, 0x00005, 0x006A8 },
{ 0x0010, 0x00968, MEM_MEGS*0x100 - 0x00968 },
{ 0x0800, 0xC0000, 0x3F000 },
{ 0x0600, 0xBF800, 0x00800 },
{ 0x0500, 0x81000, 0x3E800 },
{ 0x0480, 0x80800, 0x00800 },
{ 0x0440, 0x80000, 0x00800 }
},
ActualNumMemories: 0,
Memories: {
{0},
},
/* Devices */
ActualNumDevices: 1,
Devices: {
{
DeviceId: {
BusId: PROCESSORDEVICE,
BaseType: BridgeController,
SubType: PCIBridge,
Interface: PCIBridgeIndirect,
},
}
},
DevicePnPHeap: {0}
},
/* This is overwritten by command line passed by qemu. */
cmdline: {
'-','-','n','e','2','k','-','i','r','q','=','9',
0,
}
};
/* Read one byte from NVRAM */
static inline uint8_t
nvram_rd(void)
{
uint8_t rval = *(volatile uint8_t*)0x80000077;
asm volatile("eieio");
return rval;
}
/* Set NVRAM address pointer */
static inline void
nvram_addr(uint16_t addr)
{
*(volatile uint8_t*)0x80000074 = (addr & 0xff);
asm volatile("eieio");
*(volatile uint8_t*)0x80000075 = ((addr>>8) & 0xff);
asm volatile("eieio");
}
/* Read a 32-bit (big-endian) work from NVRAM */
static uint32_t
nvram_rdl_be(uint16_t addr)
{
int i;
uint32_t rval = 0;
for ( i=0; i<sizeof(rval); i++ ) {
nvram_addr( addr + i );
rval = (rval<<8) | nvram_rd();
}
return rval;
}
/* !!! NOTE !!!
*
* We use a special hack to propagate command-line info to the bootloader.
* This is NOT PreP compliant (but who cares).
* We set R6 and R7 to point to the start/end of the command line string
* and hacked the bootloader so it uses R6/R7 (provided that the firmware
* is detected as 'QEMU').
*
* (see bootloader/mm.c, bootloader/misc.c, bootloader/bootldr.h, -- boot_data.cmd_line[])
*/
uint32_t
res_copy(void)
{
struct fake_data *p;
uint32_t addr, cmdl, l, imga;
uint32_t mem_sz, pgs;
int i;
int have_nvram;
/* Make sure we have a valid NVRAM -- just check for 'QEMU' at the
* beginning
*/
have_nvram = ( (('Q'<<24) | ('E'<<16) | ('M'<< 8) | ('U'<< 0)) == nvram_rdl_be( 0x0000 ) );
if ( !have_nvram ) {
/* reading NVRAM failed - fall back to using the static residual copy;
* this means no support for variable memory size or 'true' command line.
*/
return (uint32_t)&fake_data;
}
/* Dilemma - we don't really know where to put the residual copy
* (original is in ROM and cannot be modified).
* We can't put it at the top of memory since the bootloader starts
* allocating memory from there, before saving the residual, that is.
* Too close to the final image might not work either because RTEMS
* zeroes its BSS *before* making its copies of the residual and commandline.
*
* For now we hope that appending to the kernel image works (and that
* the bootloader puts it somewhere safe).
*/
imga = nvram_rdl_be( 0x0038 );
addr = imga + nvram_rdl_be( 0x003c );
addr += 0x1f;
addr &= ~(0x1f);
p = (struct fake_data *)addr;
/* commandline + length from NVRAM */
cmdl = nvram_rdl_be( 0x0040 );
l = nvram_rdl_be( 0x0044 );
if ( l > 0 ) {
/* have a command-line; copy it into our local buffer */
if ( l > sizeof( p->cmdline ) - 1 ) {
l = sizeof( p->cmdline ) - 1;
}
/* original may overlap our buffer; must safely move around */
if ( p->cmdline < (char*)cmdl ) {
for ( i=0; i<l; i++ ) {
p->cmdline[i] = ((char*)cmdl)[i];
}
} else {
for ( i=l-1; i>=0; i-- ) {
p->cmdline[i] = ((char*)cmdl)[i];
}
}
}
p->cmdline[l] = 0;
/* Copy rest of residual */
for ( i=0; i<sizeof(p->fake_residual); i++ )
((char*)&p->fake_residual)[i] = ((char*)&fake_data.fake_residual)[i];
p->dat_len = fake_data.dat_len;
p->res_off = fake_data.res_off;
p->cmd_off = fake_data.cmd_off;
p->cmd_len = l+1;
p->img_adr = imga;
/* Fix up memory in residual from NVRAM settings */
mem_sz = nvram_rdl_be( 0x0030 );
pgs = mem_sz >> 12;
p->fake_residual.TotalMemory = mem_sz;
p->fake_residual.GoodMemory = mem_sz;
p->fake_residual.Segs[1].BasePage = pgs;
p->fake_residual.Segs[1].PageCount = 0x80000 - pgs;
p->fake_residual.Segs[7].PageCount = pgs - 0x00968;
return (uint32_t)p;
}

View File

@@ -1,217 +0,0 @@
/* A fake 'bios' which does nothing but move a kernel image
* to RAM address zero and then starts that...
*/
#include <bsp/residual.h>
#define LD_CACHE_LINE_SIZE 5
#define INIT_STACK (0x100 - 16) /* 16-byte/svr4 aligned */
/* These offsets must correspond to declaration in qemu_fakeres.c */
#define DAT_LEN 0
#define RES_OFF 4
#define CMD_OFF 8
#define CMD_LEN 12
#define IMG_ADR 16
/* Non-volatile registers */
#define OBASE 30
#define PCID 25
#define PCIA 26
#define PCI_MAX_DEV 32
#define BA_OPCODE(tgt) ((18<<(31-5)) | 2 | ((tgt) & 0x03fffffc))
.global fake_data
.global res_set_memsz
.global _start
_start:
lis 1, INIT_STACK@h
ori 1,1,INIT_STACK@l
/* qemu 0.14.1 has the wrong exception prefix for 74xx CPUs
* (bug 811683). Work around this by putting a stub at 0x00000X00
* which simply jumps to high memory. We only need the SC exception
* for now.
*/
lis 3, BA_OPCODE(0xfff00000)@h
ori 3, 3, BA_OPCODE(0xfff00000)@l
li 4, 0x0c00
add 3, 3, 4
stw 3, 0(4)
dcbf 0, 4
icbi 0, 4
bl pci_irq_set
/* copy residual to RAM and fix up;
* this routine returns a pointer to
* a 'fake_data' struct. If reading
* NVRAM failed then the return value
* points to a fall-back version in
* ROM...
*/
bl res_copy
/* fake_data pointer to R29 */
mr 29, 3
/* Load up R3..R5 with PreP mandated
* values (R3: residual, R4: kernel image,
* R5: OpenFirmware PTR (or NULL).
*/
/* load R3 with residual pointer */
lwz 3, RES_OFF(29)
add 3, 3, 29
/* load R4 with image address */
lwz 4, IMG_ADR(29)
/* load R5 with zero (OFW = NULL) */
li 5, 0
/* EXTENSION: R6 = cmdline start */
lwz 6, CMD_OFF(29)
add 6, 6, 29
/* EXTENSION: R7 = cmdline end */
lwz 7, CMD_LEN(29)
add 7, 7, 6
/* jump to image address */
mtctr 4
bctr
.org 0x100
b _start
.org 0x110
template:
mfsrr0 30
mfsrr1 31
1: b 1b
template_end:
.org 0xc00
b monitor
.org 0x4000
codemove: /* src/dst are cache-aligned */
addi 5,5,(1<<LD_CACHE_LINE_SIZE)-1
srwi 5,5,LD_CACHE_LINE_SIZE
addi 3,3,-4
addi 4,4,-4
1:
li 0, (1<<LD_CACHE_LINE_SIZE)
mtctr 0
2:
lwzu 0, 4(3)
stwu 0, 4(4)
bdnz 2b
dcbf 0,4
icbi 0,4
addic. 5,5,-1
bne 1b
blr
cpexc:
lis 3,template@h
ori 3,3,template@l
li 5,template_end-template
b codemove
monitor:
stwu 1,-16(1)
stw OBASE, 8(1)
lis OBASE, 0x80000000@h
cmplwi 10,0x63 /* enter_monitor -> RESET */
bne 10f
hwreset:
li 3,1
stb 3,0x92(OBASE)
1: b 1b
10: cmplwi 10,0x1d /* .NETCTRL -> ignore */
bne 10f
b ret_from_mon
10: b hwreset /* unknown -> RESET */
ret_from_mon:
lwz OBASE,8(1)
lwz 1,0(1)
rfi
rcb:
stwbrx 3, 0, PCIA
lbzx 3, 0, PCID
blr
wcb:
stwbrx 3, 0, PCIA
stbx 4, 0, PCID
blr
rcd:
stwbrx 3, 0, PCIA
lwbrx 3, 0, PCID
blr
/* fixup pci interrupt line register according to what
* qemu does: line = ((pin-1) + slot_no) & 1 ? 11 : 9;
*/
pci_irq_set:
/* set up stack frame */
stwu 1, -32(1)
mflr 0
stw 0, 32+4(1)
/* load counter with # of PCI devs */
li 0, PCI_MAX_DEV
mtctr 0
/* save non-volatile registers we use
* in stack frame
*/
stw 20, 8(1)
stw PCIA, 12(1)
stw PCID, 16(1)
/* load non-volatile registers with
* intended values.
*/
lis 20, 0x80000000@h /* key for slot # 0 */
lis PCIA, 0x80000cf8@h /* PCI config space address reg */
ori PCIA, PCIA, 0x80000cf8@l
addi PCID, PCIA, 4 /* PCI config space data reg */
/* loop over all slots and fix up PCI IRQ LINE */
1:
mr 3, 20
bl rcd
addi 3, 3, 1
cmplwi 3, 0 /* slot empty (= -1 + 1 = 0) ? */
beq 2f
addi 3, 20, 0x3d
bl rcb
cmplwi 3, 0
beq 2f
slwi 4, 3, 11
addi 3, 20, 0x3c
xor 4, 4, 3 /* bit 11 = slot # + irq_num [zero-based] + 1 */
andi. 4, 4, 0x0800
li 4, 11
beq 3f
li 4, 9
3:
bl wcb
2:
addi 20, 20, 0x0800 /* next slot */
bdnz 1b
/* restore and return */
lwz 20, 32+4(1)
mtlr 20
lwz PCID, 16(1)
lwz PCIA, 12(1)
lwz 20, 8(1)
lwz 1, 0(1)
blr
.section .romentry, "ax"
b _start

File diff suppressed because it is too large Load Diff

View File

@@ -1,434 +0,0 @@
/*
* This file is derived from zlib.h and zconf.h from the zlib-0.95
* distribution by Jean-loup Gailly and Mark Adler, with some additions
* by Paul Mackerras to aid in implementing Deflate compression and
* decompression for PPP packets.
*/
/*
* ==FILEVERSION 960122==
*
* This marker is used by the Linux installation script to determine
* whether an up-to-date version of this file is already installed.
*/
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 0.95, Aug 16th, 1995.
Copyright (C) 1995 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Jean-loup Gailly Mark Adler
gzip@prep.ai.mit.edu madler@alumni.caltech.edu
*/
#ifndef _ZLIB_H
#define _ZLIB_H
#define local
#ifdef DEBUG_ZLIB
#include <bsp/consoleIo.h>
#define fprintf printk
#endif
/* #include "zconf.h" */ /* included directly here */
/* zconf.h -- configuration of the zlib compression library
* Copyright (C) 1995 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
/*
The library does not install any signal handler. It is recommended to
add at least a handler for SIGSEGV when decompressing; the library checks
the consistency of the input data whenever possible but may go nuts
for some forms of corrupted input.
*/
/*
* Compile with -DMAXSEG_64K if the alloc function cannot allocate more
* than 64k bytes at a time (needed on systems with 16-bit int).
* Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
* at addresses which are not a multiple of their size.
* Under DOS, -DFAR=far or -DFAR=__far may be needed.
*/
#ifndef STDC
# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
# define STDC
# endif
#endif
#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
# include <unix.h>
#endif
/* Maximum value for memLevel in deflateInit2 */
#ifndef MAX_MEM_LEVEL
# ifdef MAXSEG_64K
# define MAX_MEM_LEVEL 8
# else
# define MAX_MEM_LEVEL 9
# endif
#endif
#ifndef FAR
# define FAR
#endif
/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
#ifndef MAX_WBITS
# define MAX_WBITS 15 /* 32K LZ77 window */
#endif
/* The memory requirements for deflate are (in bytes):
1 << (windowBits+2) + 1 << (memLevel+9)
that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
plus a few kilobytes for small objects. For example, if you want to reduce
the default memory requirements from 256K to 128K, compile with
make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
Of course this will generally degrade compression (there's no free lunch).
The memory requirements for inflate are (in bytes) 1 << windowBits
that is, 32K for windowBits=15 (default value) plus a few kilobytes
for small objects.
*/
/* Type declarations */
#ifndef OF /* function prototypes */
# ifdef STDC
# define OF(args) args
# else
# define OF(args) ()
# endif
#endif
typedef unsigned char Byte; /* 8 bits */
typedef unsigned int uInt; /* 16 bits or more */
typedef unsigned long uLong; /* 32 bits or more */
typedef Byte FAR Bytef;
typedef char FAR charf;
typedef int FAR intf;
typedef uInt FAR uIntf;
typedef uLong FAR uLongf;
#ifdef STDC
typedef void FAR *voidpf;
typedef void *voidp;
#else
typedef Byte FAR *voidpf;
typedef Byte *voidp;
#endif
/* end of original zconf.h */
#define ZLIB_VERSION "0.95P"
/*
The 'zlib' compression library provides in-memory compression and
decompression functions, including integrity checks of the uncompressed
data. This version of the library supports only one compression method
(deflation) but other algorithms may be added later and will have the same
stream interface.
For compression the application must provide the output buffer and
may optionally provide the input buffer for optimization. For decompression,
the application must provide the input buffer and may optionally provide
the output buffer for optimization.
Compression can be done in a single step if the buffers are large
enough (for example if an input file is mmap'ed), or can be done by
repeated calls of the compression function. In the latter case, the
application must provide more input and/or consume the output
(providing more output space) before each call.
*/
typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
struct internal_state;
typedef struct z_stream_s {
Bytef *next_in; /* next input byte */
uInt avail_in; /* number of bytes available at next_in */
uLong total_in; /* total nb of input bytes read so far */
Bytef *next_out; /* next output byte should be put there */
uInt avail_out; /* remaining free space at next_out */
uLong total_out; /* total nb of bytes output so far */
char *msg; /* last error message, NULL if no error */
struct internal_state FAR *state; /* not visible by applications */
alloc_func zalloc; /* used to allocate the internal state */
free_func zfree; /* used to free the internal state */
voidp opaque; /* private data object passed to zalloc and zfree */
Byte data_type; /* best guess about the data type: ascii or binary */
} z_stream;
/*
The application must update next_in and avail_in when avail_in has
dropped to zero. It must update next_out and avail_out when avail_out
has dropped to zero. The application must initialize zalloc, zfree and
opaque before calling the init function. All other fields are set by the
compression library and must not be updated by the application.
The opaque value provided by the application will be passed as the first
parameter for calls of zalloc and zfree. This can be useful for custom
memory management. The compression library attaches no meaning to the
opaque value.
zalloc must return Z_NULL if there is not enough memory for the object.
On 16-bit systems, the functions zalloc and zfree must be able to allocate
exactly 65536 bytes, but will not be required to allocate more than this
if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
pointers returned by zalloc for objects of exactly 65536 bytes *must*
have their offset normalized to zero. The default allocation function
provided by this library ensures this (see zutil.c). To reduce memory
requirements and avoid any allocation of 64K objects, at the expense of
compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
The fields total_in and total_out can be used for statistics or
progress reports. After compression, total_in holds the total size of
the uncompressed data and may be saved for use in the decompressor
(particularly if the decompressor wants to decompress everything in
a single step).
*/
/* constants */
#define Z_NO_FLUSH 0
#define Z_PARTIAL_FLUSH 1
#define Z_FULL_FLUSH 2
#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
#define Z_FINISH 4
#define Z_PACKET_FLUSH 5
/* See deflate() below for the usage of these constants */
#define Z_OK 0
#define Z_STREAM_END 1
#define Z_ERRNO (-1)
#define Z_STREAM_ERROR (-2)
#define Z_DATA_ERROR (-3)
#define Z_MEM_ERROR (-4)
#define Z_BUF_ERROR (-5)
/* error codes for the compression/decompression functions */
#define Z_BEST_SPEED 1
#define Z_BEST_COMPRESSION 9
#define Z_DEFAULT_COMPRESSION (-1)
/* compression levels */
#define Z_FILTERED 1
#define Z_HUFFMAN_ONLY 2
#define Z_DEFAULT_STRATEGY 0
#define Z_BINARY 0
#define Z_ASCII 1
#define Z_UNKNOWN 2
/* Used to set the data_type field */
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
extern char *zlib_version;
/* The application can compare zlib_version and ZLIB_VERSION for consistency.
If the first character differs, the library code actually used is
not compatible with the zlib.h header file used by the application.
*/
/* basic functions */
extern int inflateInit OF((z_stream *strm));
/*
Initializes the internal stream state for decompression. The fields
zalloc and zfree must be initialized before by the caller. If zalloc and
zfree are set to Z_NULL, inflateInit updates them to use default allocation
functions.
inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
enough memory. msg is set to null if there is no error message.
inflateInit does not perform any decompression: this will be done by
inflate().
*/
extern int inflate OF((z_stream *strm, int flush));
/*
Performs one or both of the following actions:
- Decompress more input starting at next_in and update next_in and avail_in
accordingly. If not all input can be processed (because there is not
enough room in the output buffer), next_in is updated and processing
will resume at this point for the next call of inflate().
- Provide more output starting at next_out and update next_out and avail_out
accordingly. inflate() always provides as much output as possible
(until there is no more input data or no more space in the output buffer).
Before the call of inflate(), the application should ensure that at least
one of the actions is possible, by providing more input and/or consuming
more output, and updating the next_* and avail_* values accordingly.
The application can consume the uncompressed output when it wants, for
example when the output buffer is full (avail_out == 0), or after each
call of inflate().
If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
inflate flushes as much output as possible to the output buffer. The
flushing behavior of inflate is not specified for values of the flush
parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
current implementation actually flushes as much output as possible
anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
has been consumed, it is expecting to see the length field of a stored
block; if not, it returns Z_DATA_ERROR.
inflate() should normally be called until it returns Z_STREAM_END or an
error. However if all decompression is to be performed in a single step
(a single call of inflate), the parameter flush should be set to
Z_FINISH. In this case all pending input is processed and all pending
output is flushed; avail_out must be large enough to hold all the
uncompressed data. (The size of the uncompressed data may have been saved
by the compressor for this purpose.) The next operation on this stream must
be inflateEnd to deallocate the decompression state. The use of Z_FINISH
is never required, but can be used to inform inflate that a faster routine
may be used for the single inflate() call.
inflate() returns Z_OK if some progress has been made (more input
processed or more output produced), Z_STREAM_END if the end of the
compressed data has been reached and all uncompressed output has been
produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
the stream structure was inconsistent (for example if next_in or next_out
was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
progress is possible or if there was not enough room in the output buffer
when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
call inflateSync to look for a good compression block. */
extern int inflateEnd OF((z_stream *strm));
/*
All dynamically allocated data structures for this stream are freed.
This function discards any unprocessed input and does not flush any
pending output.
inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
was inconsistent. In the error case, msg may be set but then points to a
static string (which must not be deallocated).
*/
/* advanced functions */
extern int inflateInit2 OF((z_stream *strm,
int windowBits));
/*
This is another version of inflateInit with more compression options. The
fields next_out, zalloc and zfree must be initialized before by the caller.
The windowBits parameter is the base two logarithm of the maximum window
size (the size of the history buffer). It should be in the range 8..15 for
this version of the library (the value 16 will be allowed soon). The
default value is 15 if inflateInit is used instead. If a compressed stream
with a larger window size is given as input, inflate() will return with
the error code Z_DATA_ERROR instead of trying to allocate a larger window.
If next_out is not null, the library will use this buffer for the history
buffer; the buffer must either be large enough to hold the entire output
data, or have at least 1<<windowBits bytes. If next_out is null, the
library will allocate its own buffer (and leave next_out null). next_in
need not be provided here but must be provided by the application for the
next call of inflate().
If the history buffer is provided by the application, next_out must
never be changed by the application since the decompressor maintains
history information inside this buffer from call to call; the application
can only reset next_out to the beginning of the history buffer when
avail_out is zero and all output has been consumed.
inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
windowBits < 8). msg is set to null if there is no error message.
inflateInit2 does not perform any decompression: this will be done by
inflate().
*/
extern int inflateSync OF((z_stream *strm));
/*
Skips invalid compressed data until the special marker (see deflate()
above) can be found, or until all available input is skipped. No output
is provided.
inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
if no more input was provided, Z_DATA_ERROR if no marker has been found,
or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
case, the application may save the current current value of total_in which
indicates where valid compressed data was found. In the error case, the
application may repeatedly call inflateSync, providing more input each time,
until success or end of the input data.
*/
extern int inflateReset OF((z_stream *strm));
/*
This function is equivalent to inflateEnd followed by inflateInit,
but does not free and reallocate all the internal decompression state.
The stream will keep attributes that may have been set by inflateInit2.
inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent (such as zalloc or state being NULL).
*/
extern int inflateIncomp OF((z_stream *strm));
/*
This function adds the data at next_in (avail_in bytes) to the output
history without performing any output. There must be no pending output,
and the decompressor must be expecting to see the start of a block.
Calling this function is equivalent to decompressing a stored block
containing the data at next_in (except that the data is not output).
*/
/* checksum functions */
/*
This function is not related to compression but is exported
anyway because it might be useful in applications using the
compression library.
*/
extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
/*
Update a running Adler-32 checksum with the bytes buf[0..len-1] and
return the updated checksum. If buf is NULL, this function returns
the required initial value for the checksum.
An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
much faster. Usage example:
uLong adler = adler32(0L, Z_NULL, 0);
while (read_buffer(buffer, length) != EOF) {
adler = adler32(adler, buffer, length);
}
if (adler != original_adler) error();
*/
#ifndef _Z_UTIL_H
struct internal_state {int dummy;}; /* hack for buggy compilers */
#endif
#endif /* _ZLIB_H */