New network driver from Erik Ivanenko <erik.ivanenko@utoronto.ca>.

This commit is contained in:
Joel Sherrill
1998-11-23 14:28:09 +00:00
parent 588b68976f
commit 20ad9e9d93
4 changed files with 3113 additions and 0 deletions

View File

@@ -0,0 +1,268 @@
/* $Id$
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#define dma_outb(x,y) outport_byte(y,x)
#define dma_inb inport_byte
/*
* NOTES about DMA transfers:
*
* controller 1: channels 0-3, byte operations, ports 00-1F
* controller 2: channels 4-7, word operations, ports C0-DF
*
* - ALL registers are 8 bits only, regardless of transfer size
* - channel 4 is not used - cascades 1 into 2.
* - channels 0-3 are byte - addresses/counts are for physical bytes
* - channels 5-7 are word - addresses/counts are for physical words
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
* - transfer count loaded to registers is 1 less than actual count
* - controller 2 offsets are all even (2x offsets for controller 1)
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
* DMA transfers are limited to the lower 16MB of _physical_ memory.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
*
* Address mapping for channels 0-3:
*
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* P7 ... P0 A7 ... A0 A7 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
* | ... | \ \ ... \ \ \ ... \ \
* | ... | \ \ ... \ \ \ ... \ (not used)
* | ... | \ \ ... \ \ \ ... \
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
* the hardware level, so odd-byte transfers aren't possible).
*
* Transfer count (_not # bytes_) is limited to 64K, represented as actual
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
* and up to 128K bytes may be transferred on channels 5-7 in one operation.
*
*/
#define MAX_DMA_CHANNELS 8
/* The maximum address that we can perform a DMA transfer to on this platform */
#define MAX_DMA_ADDRESS 0x1000000
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA_ADDR_0 0x00 /* DMA address registers */
#define DMA_ADDR_1 0x02
#define DMA_ADDR_2 0x04
#define DMA_ADDR_3 0x06
#define DMA_ADDR_4 0xC0
#define DMA_ADDR_5 0xC4
#define DMA_ADDR_6 0xC8
#define DMA_ADDR_7 0xCC
#define DMA_CNT_0 0x01 /* DMA count registers */
#define DMA_CNT_1 0x03
#define DMA_CNT_2 0x05
#define DMA_CNT_3 0x07
#define DMA_CNT_4 0xC2
#define DMA_CNT_5 0xC6
#define DMA_CNT_6 0xCA
#define DMA_CNT_7 0xCE
#define DMA_PAGE_0 0x87 /* DMA page registers */
#define DMA_PAGE_1 0x83
#define DMA_PAGE_2 0x81
#define DMA_PAGE_3 0x82
#define DMA_PAGE_5 0x8B
#define DMA_PAGE_6 0x89
#define DMA_PAGE_7 0x8A
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
if (dmanr<=3)
{dma_outb(dmanr, DMA1_MASK_REG);}
else
{dma_outb(dmanr & 3, DMA2_MASK_REG);}
}
static __inline__ void disable_dma(unsigned int dmanr)
{
if (dmanr<=3)
{ dma_outb(dmanr | 4, DMA1_MASK_REG); }
else
{ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);}
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while interrupts are disabled! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
if (dmanr<=3)
{dma_outb(0, DMA1_CLEAR_FF_REG);}
else
{dma_outb(0, DMA2_CLEAR_FF_REG);}
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr<=3)
{ dma_outb(mode | dmanr, DMA1_MODE_REG);}
else
{ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);}
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
{
switch(dmanr) {
case 0:
{dma_outb(pagenr, DMA_PAGE_0);}
break;
case 1:
{dma_outb(pagenr, DMA_PAGE_1);}
break;
case 2:
{dma_outb(pagenr, DMA_PAGE_2);}
break;
case 3:
{dma_outb(pagenr, DMA_PAGE_3);}
break;
case 5:
{dma_outb(pagenr & 0xfe, DMA_PAGE_5);}
break;
case 6:
{dma_outb(pagenr & 0xfe, DMA_PAGE_6);}
break;
case 7:
{dma_outb(pagenr & 0xfe, DMA_PAGE_7);}
break;
}
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
} else {
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
}
}
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
} else {
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
}
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
/* using short to get 16-bit wrap around */
unsigned short count,temp;
dma_inb(io_port,count);
count ++;
dma_inb(io_port,temp);
count += temp << 8;
return (dmanr<=3)? count : (count<<1);
}
#endif /* _ASM_DMA_H */

View File

@@ -0,0 +1,13 @@
#ifndef NET_EXTERNS_H
#define NET_EXTERNS_H
/*
* External entry points
*/
extern int uti596_attach(struct rtems_bsdnet_ifconfig *);
extern int uti596dump(char * );
extern void uti596reset(void);
extern void uti596Diagnose(int);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,276 @@
/* uti596.h: Contains the defines and structures used by the uti596 driver */
/*
* EII: March 11: Created v. 0.0
* Jan 12/98 Added STAT bits, s11-=s5 and max_colls.
*/
#ifndef UTI596_H
#define UTI596_H
#include <rtems/error.h>
#include <rtems/rtems_bsdnet.h>
#include <sys/param.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
/* Ethernet statistics */
struct enet_statistics{
int rx_packets; /* total packets received */
int tx_packets; /* total packets transmitted */
int rx_errors; /* bad packets received */
int tx_errors; /* packet transmit problems */
int rx_dropped; /* no space in buffers */
int tx_dropped; /* */
int tx_retries_exceeded; /* excessive retries */
int multicast; /* multicast packets received */
int collisions;
/* detailed rx_errors: */
int rx_length_errors;
int rx_over_errors; /* receiver ring buff overflow */
int rx_crc_errors; /* recved pkt with crc error */
int rx_frame_errors; /* recv'd frame alignment error */
int rx_fifo_errors; /* recv'r fifo overrun */
int rx_missed_errors; /* receiver missed packet */
/* detailed tx_errors */
int tx_aborted_errors;
int tx_carrier_errors;
int tx_fifo_errors;
int tx_heartbeat_errors;
int tx_window_errors;
};
enum commands {
CmdNOp = 0,
CmdSASetup = 1,
CmdConfigure = 2,
CmdMulticastList = 3,
CmdTx = 4,
CmdTDR = 5,
CmdDump = 6,
CmdDiagnose = 7
};
#define UTI596_MUTEX 1
#define CMD_EOL 0x8000 /* The last command of the list, stop. */
#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
#define CMD_FLEX 0x0008 /* Enable flexible memory model */
#define SCB_STAT_CX 0x8000 /* Cmd completes with 'I' bit set */
#define SCB_STAT_FR 0x4000 /* Frame Received */
#define SCB_STAT_CNA 0x2000 /* Cmd unit Not Active */
#define SCB_STAT_RNR 0x1000 /* Receiver Not Ready */
#define STAT_C 0x8000 /* Set to 1 after execution */
#define STAT_B 0x4000 /* 1 : Cmd being executed, 0 : Cmd done. */
#define STAT_OK 0x2000 /* 1: Command executed ok 0 : Error */
#define STAT_A 0x1000 /* command has been aborted */
#define STAT_S11 0x0800
#define STAT_S10 0x0400
#define STAT_S9 0x0200
#define STAT_S8 0x0100
#define STAT_S7 0x0080
#define STAT_S6 0x0040
#define STAT_S5 0x0020
#define STAT_MAX_COLLS 0x000F
#define RBD_STAT_P 0x4000 /* prefetch */
#define RBD_STAT_F 0x4000 /* used */
#define CUC_START 0x0100
#define CUC_RESUME 0x0200
#define CUC_SUSPEND 0x0300
#define CUC_ABORT 0x0400
#define RX_START 0x0010
#define RX_RESUME 0x0020
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
#define RU_SUSPENDED 0x001
#define RU_NO_RESOURCES 0x0020
#define RU_READY 0x0040
#define IO_ADDR 0x360
#define PORT_ADDR IO_ADDR
#define CHAN_ATTN PORT_ADDR + 4
#define NIC_ADDR PORT_ADDR + 8
struct i596_cmd {
volatile unsigned short status;
volatile unsigned short command;
struct i596_cmd *next;
};
#define I596_NULL ( ( void * ) 0xffffffff)
#define UTI_596_END_OF_FRAME 0x8000
#define SIZE_MASK 0x3fff
/*
* Transmit buffer Descriptor
*/
struct i596_tbd {
unsigned short size;
unsigned short pad;
struct i596_tbd *next;
char *data;
};
/*
* Receive buffer Descriptor
*/
struct i596_rbd {
unsigned short count;
unsigned short offset;
struct i596_rbd *next;
char *data;
unsigned short size;
unsigned short pad;
};
/*
* Transmit Command Structure
*/
struct tx_cmd {
struct i596_cmd cmd;
struct i596_tbd *pTbd;
unsigned short size;
unsigned short pad;
} ;
/*
* Receive Frame Descriptor
*/
struct i596_rfd {
volatile unsigned short stat;
volatile unsigned short cmd;
struct i596_rfd *next;
struct i596_rbd *pRbd;
unsigned short count;
unsigned short size;
char data [1532 ];
} ;
struct i596_dump {
struct i596_cmd cmd;
char * pData;
};
struct i596_set_add {
struct i596_cmd cmd;
char data[8];
};
struct i596_configure {
struct i596_cmd cmd;
char data[16];
};
#define RX_RING_SIZE 8
/*
* System Control Block
*/
struct i596_scb {
volatile unsigned short status;
volatile unsigned short command;
struct i596_cmd *pCmd;
struct i596_rfd *pRfd;
volatile unsigned long crc_err;
volatile unsigned long align_err;
volatile unsigned long resource_err;
volatile unsigned long over_err;
volatile unsigned long rcvdt_err;
volatile unsigned long short_err;
volatile unsigned short t_on;
volatile unsigned short t_off;
};
/*
* Intermediate System Control Block
*/
struct i596_iscp {
volatile unsigned long stat;
struct i596_scb *scb;
} ;
/*
* System Control Parameters
*/
struct i596_scp {
unsigned long sysbus;
unsigned long pad;
struct i596_iscp *iscp;
} ;
struct uti596_softc {
struct arpcom arpcom;
rtems_irq_connect_data irqInfo;
struct i596_scp *pScp;
struct i596_iscp iscp;
struct i596_scb scb;
struct i596_set_add set_add;
struct i596_configure set_conf;
struct i596_cmd tdr;
unsigned long stat;
struct tx_cmd *pTxCmd;
struct i596_tbd *pTbd;
int ioAddr;
struct i596_rfd *pBeginRFA;
struct i596_rfd *pEndRFA;
struct i596_rfd *pLastUnkRFD;
struct i596_rbd *pLastUnkRBD;
struct i596_rfd *pEndSavedQueue;
struct i596_cmd *pCmdHead;
struct i596_cmd *pCmdTail; /* unneeded, as chaining not used, but implemented */
rtems_id rxDaemonTid;
rtems_id txDaemonTid;
struct enet_statistics stats;
int started;
unsigned long rxInterrupts;
unsigned long txInterrupts;
volatile int cmdOk;
int resetDone;
unsigned long txRawWait;
struct i596_rfd *pInboundFrameQueue;
short int rxBdCount;
short int txBdCount;
short int countRFD;
short int savedCount;
struct i596_rfd *pSavedRfdQueue;
rtems_name semaphore_name;
rtems_id semaphore_id;
char zeroes[64];
unsigned long rawsndcnt;
} ;
#endif