summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/i825xx
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/i825xx
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/i825xx')
-rw-r--r--drivers/net/ethernet/i825xx/82596.c1537
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig68
-rw-r--r--drivers/net/ethernet/i825xx/Makefile11
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c1086
-rw-r--r--drivers/net/ethernet/i825xx/ether1.h277
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c240
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1424
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c191
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1185
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h318
10 files changed, 6337 insertions, 0 deletions
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
new file mode 100644
index 000000000..3ee89ae49
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -0,0 +1,1537 @@
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performance test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+#include <linux/pgtable.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/cacheflush.h>
+
+static char version[] __initdata =
+ "82596.c $Revision: 1.5 $\n";
+
+#define DRV_NAME "82596"
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT 0x0001
+#define DEB_PROBE 0x0002
+#define DEB_SERIOUS 0x0004
+#define DEB_ERRORS 0x0008
+#define DEB_MULTI 0x0010
+#define DEB_TDR 0x0020
+#define DEB_OPEN 0x0040
+#define DEB_RESET 0x0080
+#define DEB_ADDCMD 0x0100
+#define DEB_STATUS 0x0200
+#define DEB_STARTTX 0x0400
+#define DEB_RXADDR 0x0800
+#define DEB_TXADDR 0x1000
+#define DEB_RXFRAME 0x2000
+#define DEB_INTS 0x4000
+#define DEB_STRUCT 0x8000
+#define DEB_ANY 0xffff
+
+
+#define DEB(x,y) if (i596_debug & (x)) y
+
+
+#if IS_ENABLED(CONFIG_MVME16x_NET)
+#define ENABLE_MVME16x_NET
+#endif
+#if IS_ENABLED(CONFIG_BVME6000_NET)
+#define ENABLE_BVME6000_NET
+#endif
+
+#ifdef ENABLE_MVME16x_NET
+#include <asm/mvme16xhw.h>
+#endif
+#ifdef ENABLE_BVME6000_NET
+#include <asm/bvme6000hw.h>
+#endif
+
+/*
+ * Define various macros for Channel Attention, word swapping etc., dependent
+ * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
+ */
+
+#ifdef __mc68000__
+#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define ISCP_BUSY 0x00010000
+#else
+#error 82596.c: unknown architecture
+#endif
+
+/*
+ * These were the intel versions, left here for reference. There
+ * are currently no x86 users of this legacy i82596 chip.
+ */
+#if 0
+#define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
+#define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
+#define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
+#define WSWAPscb(x) ((struct i596_scb *)((long)x))
+#define WSWAPcmd(x) ((struct i596_cmd *)((long)x))
+#define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
+#define WSWAPchar(x) ((char *)((long)x))
+#define ISCP_BUSY 0x0001
+#endif
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
+
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PKT_BUF_SZ 1536
+#define MAX_MC_CNT 64
+
+#define I596_TOTAL_SIZE 17
+
+#define I596_NULL ((void *)0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+#define TX_TIMEOUT (HZ/20)
+
+
+struct i596_reg {
+ unsigned short porthi;
+ unsigned short portlo;
+ unsigned long ca;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ struct i596_tbd *next;
+ char *data;
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596. The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next. It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+ struct i596_cmd *v_next; /* Address from CPUs viewpoint */
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *b_next; /* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ struct i596_tbd *tbd;
+ unsigned short size;
+ unsigned short pad;
+ struct sk_buff *skb; /* So we can free it after tx */
+};
+
+struct tdr_cmd {
+ struct i596_cmd cmd;
+ unsigned short status;
+ unsigned short pad;
+};
+
+struct mc_cmd {
+ struct i596_cmd cmd;
+ short mc_cnt;
+ char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+ struct i596_cmd cmd;
+ char eth_addr[8];
+};
+
+struct cf_cmd {
+ struct i596_cmd cmd;
+ char i596_config[16];
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ struct i596_rfd *b_next; /* Address from i596 viewpoint */
+ struct i596_rbd *rbd;
+ unsigned short count;
+ unsigned short size;
+ struct i596_rfd *v_next; /* Address from CPUs viewpoint */
+ struct i596_rfd *v_prev;
+};
+
+struct i596_rbd {
+ unsigned short count;
+ unsigned short zero1;
+ struct i596_rbd *b_next;
+ unsigned char *b_data; /* Address from i596 viewpoint */
+ unsigned short size;
+ unsigned short zero2;
+ struct sk_buff *skb;
+ struct i596_rbd *v_next;
+ struct i596_rbd *b_addr; /* This rbd addr from i596 view */
+ unsigned char *v_data; /* Address from CPUs viewpoint */
+};
+
+#define TX_RING_SIZE 64
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ unsigned long crc_err;
+ unsigned long align_err;
+ unsigned long resource_err;
+ unsigned long over_err;
+ unsigned long rcvdt_err;
+ unsigned long short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ unsigned long stat;
+ struct i596_scb *scb;
+};
+
+struct i596_scp {
+ unsigned long sysbus;
+ unsigned long pad;
+ struct i596_iscp *iscp;
+};
+
+struct i596_private {
+ volatile struct i596_scp scp;
+ volatile struct i596_iscp iscp;
+ volatile struct i596_scb scb;
+ struct sa_cmd sa_cmd;
+ struct cf_cmd cf_cmd;
+ struct tdr_cmd tdr_cmd;
+ struct mc_cmd mc_cmd;
+ unsigned long stat;
+ int last_restart __attribute__((aligned(4)));
+ struct i596_rfd *rfd_head;
+ struct i596_rbd *rbd_head;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct i596_rfd rfds[RX_RING_SIZE];
+ struct i596_rbd rbds[RX_RING_SIZE];
+ struct tx_cmd tx_cmds[TX_RING_SIZE];
+ struct i596_tbd tbds[TX_RING_SIZE];
+ int next_tx_cmd;
+ spinlock_t lock;
+};
+
+static char init_setup[] =
+{
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+#ifdef CONFIG_VME
+ 0xc0, /* don't save bad frames */
+#else
+ 0x80, /* don't save bad frames */
+#endif
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id);
+static int i596_close(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 25;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+
+static inline void CA(struct net_device *dev)
+{
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ ((struct i596_reg *) dev->base_addr)->ca = 1;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile u32 i;
+
+ i = *(volatile u32 *) (dev->base_addr);
+ }
+#endif
+}
+
+
+static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
+{
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
+ p->porthi = ((c) | (u32) (x)) & 0xffff;
+ p->portlo = ((c) | (u32) (x)) >> 16;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ u32 v = (u32) (c) | (u32) (x);
+ v = ((u32) (v) << 16) | ((u32) (v) >> 16);
+ *(volatile u32 *) dev->base_addr = v;
+ udelay(1);
+ *(volatile u32 *) dev->base_addr = v;
+ }
+#endif
+}
+
+
+static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ while (--delcnt && lp->iscp.stat)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ while (--delcnt && lp->scb.command)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
+{
+ volatile struct i596_cmd *c = cmd;
+
+ while (--delcnt && c->command)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s.\n", dev->name, str);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
+ &lp->scp, lp->scp.sysbus, lp->scp.iscp);
+ printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
+ &lp->iscp, lp->iscp.stat, lp->iscp.scb);
+ printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
+ " .cmd = %p, .rfd = %p\n",
+ &lp->scb, lp->scb.status, lp->scb.command,
+ lp->scb.cmd, lp->scb.rfd);
+ printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx,"
+ " over %lx, rcvdt %lx, short %lx\n",
+ lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
+ lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
+ cmd = lp->cmd_head;
+ while (cmd != I596_NULL) {
+ printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
+ cmd, cmd->status, cmd->command, cmd->b_next);
+ cmd = cmd->v_next;
+ }
+ rfd = lp->rfd_head;
+ printk(KERN_ERR "rfd_head = %p\n", rfd);
+ do {
+ printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
+ " count %04x\n",
+ rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
+ rfd->count);
+ rfd = rfd->v_next;
+ } while (rfd != lp->rfd_head);
+ rbd = lp->rbd_head;
+ printk(KERN_ERR "rbd_head = %p\n", rbd);
+ do {
+ printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n",
+ rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
+ rbd = rbd->v_next;
+ } while (rbd != lp->rbd_head);
+}
+
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+static irqreturn_t i596_error(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x28] = 1;
+ pcc2[0x2b] = 0x1d;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ *ethirq = 3;
+ }
+#endif
+ printk(KERN_ERR "%s: Error interrupt\n", dev->name);
+ i596_display_data(dev);
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct i596_rbd *rbd;
+ int i;
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ if (rbd->skb == NULL)
+ break;
+ dev_kfree_skb(rbd->skb);
+ rbd->skb = NULL;
+ }
+}
+
+static inline int init_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int i;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ /* First build the Receive Buffer Descriptor List */
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
+
+ if (skb == NULL) {
+ remove_rx_bufs(dev);
+ return -ENOMEM;
+ }
+
+ rbd->v_next = rbd+1;
+ rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
+ rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
+ rbd->skb = skb;
+ rbd->v_data = skb->data;
+ rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
+ rbd->size = PKT_BUF_SZ;
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
+#endif
+ }
+ lp->rbd_head = lp->rbds;
+ rbd = lp->rbds + rx_ring_size - 1;
+ rbd->v_next = lp->rbds;
+ rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
+
+ /* Now build the Receive Frame Descriptor List */
+
+ for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
+ rfd->rbd = I596_NULL;
+ rfd->v_next = rfd+1;
+ rfd->v_prev = rfd-1;
+ rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
+ rfd->cmd = CMD_FLEX;
+ }
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
+ rfd = lp->rfds;
+ rfd->rbd = lp->rbd_head;
+ rfd->v_prev = lp->rfds + rx_ring_size - 1;
+ rfd = lp->rfds + rx_ring_size - 1;
+ rfd->v_next = lp->rfds;
+ rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+
+ return 0;
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int i;
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+
+ for (i = 0; i < rx_ring_size; i++) {
+ lp->rfds[i].rbd = I596_NULL;
+ lp->rfds[i].cmd = CMD_FLEX;
+ }
+ lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
+ lp->rbd_head = lp->rbds;
+ lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ unsigned long flags;
+
+ MPU_PORT(dev, PORT_RESET, NULL);
+
+ udelay(100); /* Wait 100us - seems to help */
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Disable all ints for now */
+ pcc2[0x28] = 1;
+ pcc2[0x2a] = 0x48;
+ /* Following disables snooping. Snooping is not required
+ * as we make appropriate use of non-cached pages for
+ * shared data, and cache_push/cache_clear.
+ */
+ pcc2[0x2b] = 0x08;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ }
+#endif
+
+ /* change the scp address */
+
+ MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
+
+#endif
+
+ lp->last_cmd = jiffies;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x)
+ lp->scp.sysbus = 0x00000054;
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000)
+ lp->scp.sysbus = 0x0000004c;
+#endif
+
+ lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
+ lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
+ lp->iscp.stat = ISCP_BUSY;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = lp->scb.cmd = I596_NULL;
+
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ lp->scb.t_on = 7 * 25;
+ lp->scb.t_off = 1 * 25;
+ }
+#endif
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
+
+ CA(dev);
+
+ if (wait_istat(dev,lp,1000,"initialization timed out"))
+ goto failed;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+ rebuild_rx_bufs(dev);
+ lp->scb.command = 0;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Enable ints, etc. now */
+ pcc2[0x2a] = 0x55; /* Edge sensitive */
+ pcc2[0x2b] = 0x15;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 3;
+ }
+#endif
+
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
+ memcpy(lp->cf_cmd.i596_config, init_setup, 14);
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
+ lp->sa_cmd.cmd.command = CmdSASetup;
+ i596_add_cmd(dev, &lp->sa_cmd.cmd);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
+ lp->tdr_cmd.cmd.command = CmdTDR;
+ i596_add_cmd(dev, &lp->tdr_cmd.cmd);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
+ spin_unlock_irqrestore (&lp->lock, flags);
+ goto failed;
+ }
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
+ lp->scb.command = RX_START;
+ CA(dev);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (wait_cmd(dev,lp,1000,"RX_START not processed"))
+ goto failed;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
+ return 0;
+
+failed:
+ printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
+ MPU_PORT(dev, PORT_RESET, NULL);
+ return -1;
+}
+
+static inline int i596_rx(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+ int frames = 0;
+
+ DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
+ lp->rfd_head, lp->rbd_head));
+
+ rfd = lp->rfd_head; /* Ref next frame to check */
+
+ while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
+ if (rfd->rbd == I596_NULL)
+ rbd = I596_NULL;
+ else if (rfd->rbd == lp->rbd_head->b_addr)
+ rbd = lp->rbd_head;
+ else {
+ printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
+ /* XXX Now what? */
+ rbd = I596_NULL;
+ }
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n",
+ rfd, rfd->rbd, rfd->stat));
+
+ if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
+ /* a good frame */
+ int pkt_len = rbd->count & 0x3fff;
+ struct sk_buff *skb = rbd->skb;
+ int rx_in_place = 0;
+
+ DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
+ frames++;
+
+ /* Check if the packet is long enough to just accept
+ * without copying to a properly sized skbuff.
+ */
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ /* Get fresh skbuff to replace filled one. */
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
+ if (newskb == NULL) {
+ skb = NULL; /* drop pkt */
+ goto memory_squeeze;
+ }
+ /* Pass up the skb already on the Rx ring. */
+ skb_put(skb, pkt_len);
+ rx_in_place = 1;
+ rbd->skb = newskb;
+ rbd->v_data = newskb->data;
+ rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
+#endif
+ } else {
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ }
+memory_squeeze:
+ if (skb == NULL) {
+ /* XXX tulip.c can defer packets here!! */
+ dev->stats.rx_dropped++;
+ } else {
+ if (!rx_in_place) {
+ /* 16 byte align the data fields */
+ skb_reserve(skb, 2);
+ skb_put_data(skb, rbd->v_data,
+ pkt_len);
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ skb->len = pkt_len;
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(rbd->skb->data),
+ pkt_len);
+#endif
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes+=pkt_len;
+ }
+ }
+ else {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
+ dev->name, rfd->stat));
+ dev->stats.rx_errors++;
+ if ((rfd->stat) & 0x0001)
+ dev->stats.collisions++;
+ if ((rfd->stat) & 0x0080)
+ dev->stats.rx_length_errors++;
+ if ((rfd->stat) & 0x0100)
+ dev->stats.rx_over_errors++;
+ if ((rfd->stat) & 0x0200)
+ dev->stats.rx_fifo_errors++;
+ if ((rfd->stat) & 0x0400)
+ dev->stats.rx_frame_errors++;
+ if ((rfd->stat) & 0x0800)
+ dev->stats.rx_crc_errors++;
+ if ((rfd->stat) & 0x1000)
+ dev->stats.rx_length_errors++;
+ }
+
+ /* Clear the buffer descriptor count and EOF + F flags */
+
+ if (rbd != I596_NULL && (rbd->count & 0x4000)) {
+ rbd->count = 0;
+ lp->rbd_head = rbd->v_next;
+ }
+
+ /* Tidy the frame descriptor, marking it as end of list */
+
+ rfd->rbd = I596_NULL;
+ rfd->stat = 0;
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+ rfd->count = 0;
+
+ /* Remove end-of-list from old end descriptor */
+
+ rfd->v_prev->cmd = CMD_FLEX;
+
+ /* Update record of next frame descriptor to process */
+
+ lp->scb.rfd = rfd->b_next;
+ lp->rfd_head = rfd->v_next;
+ rfd = lp->rfd_head;
+ }
+
+ DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
+
+ return 0;
+}
+
+
+static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+
+ while (lp->cmd_head != I596_NULL) {
+ ptr = lp->cmd_head;
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ dev_kfree_skb(skb);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ ptr->v_next = ptr->b_next = I596_NULL;
+ tx_cmd->cmd.command = 0; /* Mark as free */
+ break;
+ }
+ default:
+ ptr->v_next = ptr->b_next = I596_NULL;
+ }
+ }
+
+ wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
+ lp->scb.cmd = I596_NULL;
+}
+
+static void i596_reset(struct net_device *dev, struct i596_private *lp,
+ int ioaddr)
+{
+ unsigned long flags;
+
+ DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ wait_cmd(dev,lp,100,"i596_reset timed out");
+
+ netif_stop_queue(dev);
+
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA(dev);
+
+ /* wait for shutdown */
+ wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ i596_cleanup_cmd(dev,lp);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ init_i596_mem(dev);
+}
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL | CMD_INTR);
+ cmd->v_next = cmd->b_next = I596_NULL;
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (lp->cmd_head != I596_NULL) {
+ lp->cmd_tail->v_next = cmd;
+ lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
+ } else {
+ lp->cmd_head = cmd;
+ wait_cmd(dev,lp,100,"i596_add_cmd timed out");
+ lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
+ lp->scb.command = CUC_START;
+ CA(dev);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (lp->cmd_backlog > max_cmd_backlog) {
+ unsigned long tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < ticks_limit)
+ return;
+
+ printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
+
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ int res = 0;
+
+ DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+ if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
+ res = -EAGAIN;
+ goto err_irq_dev;
+ }
+ }
+#endif
+ res = init_rx_bufs(dev);
+ if (res)
+ goto err_irq_56;
+
+ netif_start_queue(dev);
+
+ if (init_i596_mem(dev)) {
+ res = -EAGAIN;
+ goto err_queue;
+ }
+
+ return 0;
+
+err_queue:
+ netif_stop_queue(dev);
+ remove_rx_bufs(dev);
+err_irq_56:
+#ifdef ENABLE_MVME16x_NET
+ free_irq(0x56, dev);
+err_irq_dev:
+#endif
+ free_irq(dev->irq, dev);
+
+ return res;
+}
+
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
+ dev->name));
+
+ dev->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == dev->stats.tx_packets) {
+ DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
+ /* Shutdown and restart */
+ i596_reset (dev, lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
+ lp->scb.command = CUC_START | RX_START;
+ CA (dev);
+ lp->last_restart = dev->stats.tx_packets;
+ }
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tbd;
+ short length = skb->len;
+
+ DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
+ dev->name, skb->len, skb->data));
+
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+ netif_stop_queue(dev);
+
+ tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
+ tbd = lp->tbds + lp->next_tx_cmd;
+
+ if (tx_cmd->cmd.command) {
+ printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
+ dev->name);
+ dev->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ } else {
+ if (++lp->next_tx_cmd == TX_RING_SIZE)
+ lp->next_tx_cmd = 0;
+ tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
+ tbd->next = I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX | CmdTx;
+ tx_cmd->skb = skb;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tbd->pad = 0;
+ tbd->size = EOF | length;
+
+ tbd->data = WSWAPchar(virt_to_bus(skb->data));
+
+#ifdef __mc68000__
+ cache_push(virt_to_phys(skb->data), length);
+#endif
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
+ i596_add_cmd(dev, &tx_cmd->cmd);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
+ }
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+ printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
+ add, add + 6, add, add[12], add[13], str);
+}
+
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct net_device * __init i82596_probe(void)
+{
+ struct net_device *dev;
+ int i;
+ struct i596_private *lp;
+ char eth_addr[8];
+ static int probed;
+ int err;
+
+ if (probed)
+ return ERR_PTR(-ENODEV);
+ probed++;
+
+ dev = alloc_etherdev(0);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
+ printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
+ err = -ENODEV;
+ goto out;
+ }
+ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
+ dev->base_addr = MVME_I596_BASE;
+ dev->irq = (unsigned) MVME16x_IRQ_I596;
+ goto found;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
+ unsigned char msr = rtc[3];
+ int i;
+
+ rtc[3] |= 0x80;
+ for (i = 0; i < 6; i++)
+ eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
+ rtc[3] = msr;
+ dev->base_addr = BVME_I596_BASE;
+ dev->irq = (unsigned) BVME_IRQ_I596;
+ goto found;
+ }
+#endif
+ err = -ENODEV;
+ goto out;
+
+found:
+ dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
+ if (!dev->mem_start) {
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
+
+ for (i = 0; i < 6; i++)
+ DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i]));
+ eth_hw_addr_set(dev, eth_addr);
+
+ DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
+
+ DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
+
+ /* The 82596-specific entries in the device structure. */
+ dev->netdev_ops = &i596_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->ml_priv = (void *)(dev->mem_start);
+
+ lp = dev->ml_priv;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
+ "lp->scb at 0x%08lx\n",
+ dev->name, (unsigned long)lp,
+ sizeof(struct i596_private), (unsigned long)&lp->scb));
+ memset((void *) lp, 0, sizeof(struct i596_private));
+
+#ifdef __mc68000__
+ cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
+ cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
+ kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
+#endif
+ lp->scb.command = 0;
+ lp->scb.cmd = I596_NULL;
+ lp->scb.rfd = I596_NULL;
+ spin_lock_init(&lp->lock);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+out2:
+#ifdef __mc68000__
+ /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
+ * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
+ */
+ kernel_set_cachemode((void *)(dev->mem_start), 4096,
+ IOMAP_FULL_CACHING);
+#endif
+ free_page ((u32)(dev->mem_start));
+out1:
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct i596_private *lp;
+ short ioaddr;
+ unsigned short status, ack_cmd = 0;
+ int handled = 0;
+
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
+ i596_error(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+ }
+#endif
+ if (dev == NULL) {
+ printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = dev->ml_priv;
+
+ spin_lock (&lp->lock);
+
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
+ status = lp->scb.status;
+
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ dev->name, irq, status));
+
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x8000) || (status & 0x2000)) {
+ struct i596_cmd *ptr;
+
+ handled = 1;
+ if ((status & 0x8000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
+ if ((status & 0x2000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
+
+ while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
+ ptr = lp->cmd_head;
+
+ DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
+ lp->cmd_head->status, lp->cmd_head->command));
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ if ((ptr->status) & STAT_OK) {
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
+ } else {
+ dev->stats.tx_errors++;
+ if ((ptr->status) & 0x0020)
+ dev->stats.collisions++;
+ if (!((ptr->status) & 0x0040))
+ dev->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400)
+ dev->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800)
+ dev->stats.collisions++;
+ if ((ptr->status) & 0x1000)
+ dev->stats.tx_aborted_errors++;
+ }
+
+ dev_consume_skb_irq(skb);
+
+ tx_cmd->cmd.command = 0; /* Mark free */
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned short status = ((struct tdr_cmd *)ptr)->status;
+
+ if (status & 0x8000) {
+ DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
+ } else {
+ if (status & 0x4000)
+ printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk(KERN_ERR "%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk(KERN_ERR "%s: Short circuit.\n", dev->name);
+
+ DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
+ }
+ break;
+ }
+ case CmdConfigure:
+ case CmdMulticastList:
+ /* Zap command so set_multicast_list() knows it is free */
+ ptr->command = 0;
+ break;
+ }
+ ptr->v_next = ptr->b_next = I596_NULL;
+ lp->last_cmd = jiffies;
+ }
+
+ ptr = lp->cmd_head;
+ while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
+ ptr->command &= 0x1fff;
+ ptr = ptr->v_next;
+ }
+
+ if ((lp->cmd_head != I596_NULL))
+ ack_cmd |= CUC_START;
+ lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
+ }
+ if ((status & 0x1000) || (status & 0x4000)) {
+ if ((status & 0x4000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
+ i596_rx(dev);
+ /* Only RX_START if stopped - RGH 07-07-96 */
+ if (status & 0x1000) {
+ if (netif_running(dev)) {
+ DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
+ ack_cmd |= RX_START;
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ rebuild_rx_bufs(dev);
+ }
+ }
+ }
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
+ lp->scb.command = ack_cmd;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ /* Ack the interrupt */
+
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x2a] |= 0x08;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ *ethirq = 3;
+ }
+#endif
+ CA(dev);
+
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
+
+ spin_unlock (&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static int i596_close(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status));
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ wait_cmd(dev,lp,100,"close1 timed out");
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA(dev);
+
+ wait_cmd(dev,lp,100,"close2 timed out");
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DEB(DEB_STRUCT,i596_display_data(dev));
+ i596_cleanup_cmd(dev,lp);
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Disable all ints */
+ pcc2[0x28] = 1;
+ pcc2[0x2a] = 0x40;
+ pcc2[0x2b] = 0x40; /* Set snooping bits now! */
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ }
+#endif
+
+#ifdef ENABLE_MVME16x_NET
+ free_irq(0x56, dev);
+#endif
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int config = 0, cnt;
+
+ DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+ dev->name, netdev_mc_count(dev),
+ dev->flags & IFF_PROMISC ? "ON" : "OFF",
+ dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+ if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
+ return;
+
+ if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] |= 0x01;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] &= ~0x01;
+ config = 1;
+ }
+ if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] &= ~0x20;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] |= 0x20;
+ config = 1;
+ }
+ if (config) {
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+ }
+
+ cnt = netdev_mc_count(dev);
+ if (cnt > MAX_MC_CNT)
+ {
+ cnt = MAX_MC_CNT;
+ printk(KERN_ERR "%s: Only %d multicast addresses supported",
+ dev->name, cnt);
+ }
+
+ if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ unsigned char *cp;
+ struct mc_cmd *cmd;
+
+ if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
+ return;
+ cmd = &lp->mc_cmd;
+ cmd->cmd.command = CmdMulticastList;
+ cmd->mc_cnt = cnt * ETH_ALEN;
+ cp = cmd->mc_addrs;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt--)
+ break;
+ memcpy(cp, ha->addr, ETH_ALEN);
+ if (i596_debug > 1)
+ DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
+ dev->name, cp));
+ cp += ETH_ALEN;
+ }
+ i596_add_cmd(dev, &cmd->cmd);
+ }
+}
+
+static struct net_device *dev_82596;
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "i82596 debug mask");
+
+static int __init i82596_init(void)
+{
+ if (debug >= 0)
+ i596_debug = debug;
+ dev_82596 = i82596_probe();
+ return PTR_ERR_OR_ZERO(dev_82596);
+}
+module_init(i82596_init);
+
+static void __exit i82596_cleanup(void)
+{
+ unregister_netdev(dev_82596);
+#ifdef __mc68000__
+ /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
+ * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
+ */
+
+ kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
+ IOMAP_FULL_CACHING);
+#endif
+ free_page ((u32)(dev_82596->mem_start));
+ free_netdev(dev_82596);
+}
+module_exit(i82596_cleanup);
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
new file mode 100644
index 000000000..3b5fab123
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel 82596/82593/82596 network device configuration
+#
+
+config NET_VENDOR_I825XX
+ bool "Intel (82586/82593/82596) devices"
+ default y
+ depends on NET_VENDOR_INTEL
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question does not directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about these devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_I825XX
+
+config ARM_ETHER1
+ tristate "Acorn Ether1 support"
+ depends on ARM && ARCH_ACORN
+ help
+ If you have an Acorn system with one of these (AKA25) network cards,
+ you should say Y to this option if you wish to use it with Linux.
+
+config BVME6000_NET
+ tristate "BVME6000 Ethernet support"
+ depends on BVME6000
+ help
+ This is the driver for the Ethernet interface on BVME4000 and
+ BVME6000 VME boards. Say Y here to include the driver for this chip
+ in your kernel.
+ To compile this driver as a module, choose M here.
+
+config LASI_82596
+ tristate "Lasi ethernet"
+ depends on GSC
+ help
+ Say Y here to support the builtin Intel 82596 ethernet controller
+ found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
+
+config MVME16x_NET
+ tristate "MVME16x Ethernet support"
+ depends on MVME16x
+ help
+ This is the driver for the Ethernet interface on the Motorola
+ MVME162, 166, 167, 172 and 177 boards. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config SNI_82596
+ tristate "SNI RM ethernet"
+ depends on SNI_RM
+ help
+ Say Y here to support the on-board Intel 82596 ethernet controller
+ built into SNI RM machines.
+
+config SUN3_82586
+ bool "Sun3 on-board Intel 82586 support"
+ depends on SUN3
+ help
+ This driver enables support for the on-board Intel 82586 based
+ Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note
+ that this driver does not support 82586-based adapters on additional
+ VME boards.
+
+endif # NET_VENDOR_I825XX
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
new file mode 100644
index 000000000..422a19a5d
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Intel 82586/82593/82596 chipset device drivers.
+#
+
+obj-$(CONFIG_ARM_ETHER1) += ether1.o
+obj-$(CONFIG_SUN3_82586) += sun3_82586.o
+obj-$(CONFIG_LASI_82596) += lasi_82596.o
+obj-$(CONFIG_SNI_82596) += sni_82596.o
+obj-$(CONFIG_MVME16x_NET) += 82596.o
+obj-$(CONFIG_BVME6000_NET) += 82596.o
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
new file mode 100644
index 000000000..3e7d7c4ba
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/acorn/net/ether1.c
+ *
+ * Copyright (C) 1996-2000 Russell King
+ *
+ * Acorn ether1 driver (82586 chip) for Acorn machines
+ *
+ * We basically keep two queues in the cards memory - one for transmit
+ * and one for receive. Each has a head and a tail. The head is where
+ * we/the chip adds packets to be transmitted/received, and the tail
+ * is where the transmitter has got to/where the receiver will stop.
+ * Both of these queues are circular, and since the chip is running
+ * all the time, we have to be careful when we modify the pointers etc
+ * so that the buffer memory contents is valid all the time.
+ *
+ * Change log:
+ * 1.00 RMK Released
+ * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now.
+ * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready
+ * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt.
+ * Should prevent lockup.
+ * 1.04 RMK 17/09/1997 Added more info when initialisation of chip goes wrong.
+ * TDR now only reports failure when chip reports non-zero
+ * TDR time-distance.
+ * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1
+ * 1.06 RMK 10/02/2000 Updated for 2.3.43
+ * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/ecard.h>
+
+#define __ETHER1_C
+#include "ether1.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+#define BUFFER_SIZE 0x10000
+#define TX_AREA_START 0x00100
+#define TX_AREA_END 0x05000
+#define RX_AREA_START 0x05000
+#define RX_AREA_END 0x0fc00
+
+static int ether1_open(struct net_device *dev);
+static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t ether1_interrupt(int irq, void *dev_id);
+static int ether1_close(struct net_device *dev);
+static void ether1_setmulticastlist(struct net_device *dev);
+static void ether1_timeout(struct net_device *dev, unsigned int txqueue);
+
+/* ------------------------------------------------------------------------- */
+
+static char version[] = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
+
+#define BUS_16 16
+#define BUS_8 8
+
+/* ------------------------------------------------------------------------- */
+
+#define DISABLEIRQS 1
+#define NORMALIRQS 0
+
+#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs)
+#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs)
+
+static inline unsigned short
+ether1_inw_p (struct net_device *dev, int addr, int svflgs)
+{
+ unsigned long flags;
+ unsigned short ret;
+
+ if (svflgs)
+ local_irq_save (flags);
+
+ writeb(addr >> 12, REG_PAGE);
+ ret = readw(ETHER1_RAM + ((addr & 4095) << 1));
+ if (svflgs)
+ local_irq_restore (flags);
+ return ret;
+}
+
+static inline void
+ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs)
+{
+ unsigned long flags;
+
+ if (svflgs)
+ local_irq_save (flags);
+
+ writeb(addr >> 12, REG_PAGE);
+ writew(val, ETHER1_RAM + ((addr & 4095) << 1));
+ if (svflgs)
+ local_irq_restore (flags);
+}
+
+/*
+ * Some inline assembler to allow fast transfers on to/off of the card.
+ * Since this driver depends on some features presented by the ARM
+ * specific architecture, and that you can't configure this driver
+ * without specifying ARM mode, this is not a problem.
+ *
+ * This routine is essentially an optimised memcpy from the card's
+ * onboard RAM to kernel memory.
+ */
+static void
+ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
+{
+ unsigned int page, thislen, offset;
+ void __iomem *addr;
+
+ offset = start & 4095;
+ page = start >> 12;
+ addr = ETHER1_RAM + (offset << 1);
+
+ if (offset + length > 4096)
+ thislen = 4096 - offset;
+ else
+ thislen = length;
+
+ do {
+ int used;
+
+ writeb(page, REG_PAGE);
+ length -= thislen;
+
+ __asm__ __volatile__(
+ "subs %3, %3, #2\n\
+ bmi 2f\n\
+1: ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bpl 1b\n\
+2: adds %3, %3, #1\n\
+ ldreqb %0, [%1]\n\
+ streqb %0, [%2]"
+ : "=&r" (used), "=&r" (data)
+ : "r" (addr), "r" (thislen), "1" (data));
+
+ addr = ETHER1_RAM;
+
+ thislen = length;
+ if (thislen > 4096)
+ thislen = 4096;
+ page++;
+ } while (thislen);
+}
+
+static void
+ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
+{
+ unsigned int page, thislen, offset;
+ void __iomem *addr;
+
+ offset = start & 4095;
+ page = start >> 12;
+ addr = ETHER1_RAM + (offset << 1);
+
+ if (offset + length > 4096)
+ thislen = 4096 - offset;
+ else
+ thislen = length;
+
+ do {
+ int used;
+
+ writeb(page, REG_PAGE);
+ length -= thislen;
+
+ __asm__ __volatile__(
+ "subs %3, %3, #2\n\
+ bmi 2f\n\
+1: ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bpl 1b\n\
+2: adds %3, %3, #1\n\
+ ldreqb %0, [%2]\n\
+ streqb %0, [%1]"
+ : "=&r" (used), "=&r" (data)
+ : "r" (addr), "r" (thislen), "1" (data));
+
+ addr = ETHER1_RAM;
+
+ thislen = length;
+ if (thislen > 4096)
+ thislen = 4096;
+ page++;
+ } while (thislen);
+}
+
+static int
+ether1_ramtest(struct net_device *dev, unsigned char byte)
+{
+ unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
+ int i, ret = BUFFER_SIZE;
+ int max_errors = 15;
+ int bad = -1;
+ int bad_start = 0;
+
+ if (!buffer)
+ return 1;
+
+ memset (buffer, byte, BUFFER_SIZE);
+ ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE);
+ memset (buffer, byte ^ 0xff, BUFFER_SIZE);
+ ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE);
+
+ for (i = 0; i < BUFFER_SIZE; i++) {
+ if (buffer[i] != byte) {
+ if (max_errors >= 0 && bad != buffer[i]) {
+ if (bad != -1)
+ printk ("\n");
+ printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X",
+ dev->name, buffer[i], byte, i);
+ ret = -ENODEV;
+ max_errors --;
+ bad = buffer[i];
+ bad_start = i;
+ }
+ } else {
+ if (bad != -1) {
+ if (bad_start == i - 1)
+ printk ("\n");
+ else
+ printk (" - 0x%04X\n", i - 1);
+ bad = -1;
+ }
+ }
+ }
+
+ if (bad != -1)
+ printk (" - 0x%04X\n", BUFFER_SIZE);
+ kfree (buffer);
+
+ return ret;
+}
+
+static int
+ether1_reset (struct net_device *dev)
+{
+ writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
+ return BUS_16;
+}
+
+static int
+ether1_init_2(struct net_device *dev)
+{
+ int i;
+ dev->mem_start = 0;
+
+ i = ether1_ramtest (dev, 0x5a);
+
+ if (i > 0)
+ i = ether1_ramtest (dev, 0x1e);
+
+ if (i <= 0)
+ return -ENODEV;
+
+ dev->mem_end = i;
+ return 0;
+}
+
+/*
+ * These are the structures that are loaded into the ether RAM card to
+ * initialise the 82586
+ */
+
+/* at 0x0100 */
+#define NOP_ADDR (TX_AREA_START)
+#define NOP_SIZE (0x06)
+static nop_t init_nop = {
+ 0,
+ CMD_NOP,
+ NOP_ADDR
+};
+
+/* at 0x003a */
+#define TDR_ADDR (0x003a)
+#define TDR_SIZE (0x08)
+static tdr_t init_tdr = {
+ 0,
+ CMD_TDR | CMD_INTR,
+ NOP_ADDR,
+ 0
+};
+
+/* at 0x002e */
+#define MC_ADDR (0x002e)
+#define MC_SIZE (0x0c)
+static mc_t init_mc = {
+ 0,
+ CMD_SETMULTICAST,
+ TDR_ADDR,
+ 0,
+ { { 0, } }
+};
+
+/* at 0x0022 */
+#define SA_ADDR (0x0022)
+#define SA_SIZE (0x0c)
+static sa_t init_sa = {
+ 0,
+ CMD_SETADDRESS,
+ MC_ADDR,
+ { 0, }
+};
+
+/* at 0x0010 */
+#define CFG_ADDR (0x0010)
+#define CFG_SIZE (0x12)
+static cfg_t init_cfg = {
+ 0,
+ CMD_CONFIG,
+ SA_ADDR,
+ 8,
+ 8,
+ CFG8_SRDY,
+ CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6),
+ 0,
+ 0x60,
+ 0,
+ CFG13_RETRY(15) | CFG13_SLOTH(2),
+ 0,
+};
+
+/* at 0x0000 */
+#define SCB_ADDR (0x0000)
+#define SCB_SIZE (0x10)
+static scb_t init_scb = {
+ 0,
+ SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX,
+ CFG_ADDR,
+ RX_AREA_START,
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+/* at 0xffee */
+#define ISCP_ADDR (0xffee)
+#define ISCP_SIZE (0x08)
+static iscp_t init_iscp = {
+ 1,
+ SCB_ADDR,
+ 0x0000,
+ 0x0000
+};
+
+/* at 0xfff6 */
+#define SCP_ADDR (0xfff6)
+#define SCP_SIZE (0x0a)
+static scp_t init_scp = {
+ SCP_SY_16BBUS,
+ { 0, 0 },
+ ISCP_ADDR,
+ 0
+};
+
+#define RFD_SIZE (0x16)
+static rfd_t init_rfd = {
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, },
+ { 0, },
+ 0
+};
+
+#define RBD_SIZE (0x0a)
+static rbd_t init_rbd = {
+ 0,
+ 0,
+ 0,
+ 0,
+ ETH_FRAME_LEN + 8
+};
+
+#define TX_SIZE (0x08)
+#define TBD_SIZE (0x08)
+
+static int
+ether1_init_for_open (struct net_device *dev)
+{
+ int i, status, addr, next, next2;
+ int failures = 0;
+ unsigned long timeout;
+
+ writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
+
+ for (i = 0; i < 6; i++)
+ init_sa.sa_addr[i] = dev->dev_addr[i];
+
+ /* load data structures into ether1 RAM */
+ ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE);
+ ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE);
+ ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE);
+ ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE);
+ ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE);
+ ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE);
+ ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE);
+ ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE);
+
+ if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) {
+ printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n",
+ dev->name);
+ return 1;
+ }
+
+ /*
+ * setup circularly linked list of { rfd, rbd, buffer }, with
+ * all rfds circularly linked, rbds circularly linked.
+ * First rfd is linked to scp, first rbd is linked to first
+ * rfd. Last rbd has a suspend command.
+ */
+ addr = RX_AREA_START;
+ do {
+ next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
+ next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
+
+ if (next2 >= RX_AREA_END) {
+ next = RX_AREA_START;
+ init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND;
+ priv(dev)->rx_tail = addr;
+ } else
+ init_rfd.rfd_command = 0;
+ if (addr == RX_AREA_START)
+ init_rfd.rfd_rbdoffset = addr + RFD_SIZE;
+ else
+ init_rfd.rfd_rbdoffset = 0;
+ init_rfd.rfd_link = next;
+ init_rbd.rbd_link = next + RFD_SIZE;
+ init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE;
+
+ ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE);
+ ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE);
+ addr = next;
+ } while (next2 < RX_AREA_END);
+
+ priv(dev)->tx_link = NOP_ADDR;
+ priv(dev)->tx_head = NOP_ADDR + NOP_SIZE;
+ priv(dev)->tx_tail = TDR_ADDR;
+ priv(dev)->rx_head = RX_AREA_START;
+
+ /* release reset & give 586 a prod */
+ priv(dev)->resetting = 1;
+ priv(dev)->initialising = 1;
+ writeb(CTRL_RST, REG_CONTROL);
+ writeb(0, REG_CONTROL);
+ writeb(CTRL_CA, REG_CONTROL);
+
+ /* 586 should now unset iscp.busy */
+ timeout = jiffies + HZ/2;
+ while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) {
+ if (time_after(jiffies, timeout)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name);
+ return 1;
+ }
+ }
+
+ /* check status of commands that we issued */
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ;
+ while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ } else {
+ status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS);
+ if (status & TDR_XCVRPROB)
+ printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name);
+ else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) {
+#ifdef FANCY
+ printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name,
+ status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10,
+ (status & TDR_TIME) % 10);
+#else
+ printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name,
+ status & TDR_SHORT ? "short" : "open", (status & TDR_TIME));
+#endif
+ }
+ }
+
+ if (failures)
+ ether1_reset (dev);
+ return failures ? 1 : 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static int
+ether1_txalloc (struct net_device *dev, int size)
+{
+ int start, tail;
+
+ size = (size + 1) & ~1;
+ tail = priv(dev)->tx_tail;
+
+ if (priv(dev)->tx_head + size > TX_AREA_END) {
+ if (tail > priv(dev)->tx_head)
+ return -1;
+ start = TX_AREA_START;
+ if (start + size > tail)
+ return -1;
+ priv(dev)->tx_head = start + size;
+ } else {
+ if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail)
+ return -1;
+ start = priv(dev)->tx_head;
+ priv(dev)->tx_head += size;
+ }
+
+ return start;
+}
+
+static int
+ether1_open (struct net_device *dev)
+{
+ if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
+ return -EAGAIN;
+
+ if (ether1_init_for_open (dev)) {
+ free_irq (dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void
+ether1_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
+ dev->name);
+ printk(KERN_WARNING "%s: resetting device\n", dev->name);
+
+ ether1_reset (dev);
+
+ if (ether1_init_for_open (dev))
+ printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
+
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t
+ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
+{
+ int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
+ unsigned long flags;
+ tx_t tx;
+ tbd_t tbd;
+ nop_t nop;
+
+ if (priv(dev)->restart) {
+ printk(KERN_WARNING "%s: resetting device\n", dev->name);
+
+ ether1_reset(dev);
+
+ if (ether1_init_for_open(dev))
+ printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
+ else
+ priv(dev)->restart = 0;
+ }
+
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+ }
+
+ /*
+ * insert packet followed by a nop
+ */
+ txaddr = ether1_txalloc (dev, TX_SIZE);
+ tbdaddr = ether1_txalloc (dev, TBD_SIZE);
+ dataddr = ether1_txalloc (dev, skb->len);
+ nopaddr = ether1_txalloc (dev, NOP_SIZE);
+
+ tx.tx_status = 0;
+ tx.tx_command = CMD_TX | CMD_INTR;
+ tx.tx_link = nopaddr;
+ tx.tx_tbdoffset = tbdaddr;
+ tbd.tbd_opts = TBD_EOL | skb->len;
+ tbd.tbd_link = I82586_NULL;
+ tbd.tbd_bufl = dataddr;
+ tbd.tbd_bufh = 0;
+ nop.nop_status = 0;
+ nop.nop_command = CMD_NOP;
+ nop.nop_link = nopaddr;
+
+ local_irq_save(flags);
+ ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
+ ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
+ ether1_writebuffer (dev, skb->data, dataddr, skb->len);
+ ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
+ tmp = priv(dev)->tx_link;
+ priv(dev)->tx_link = nopaddr;
+
+ /* now reset the previous nop pointer */
+ ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);
+
+ local_irq_restore(flags);
+
+ /* handle transmit */
+
+ /* check to see if we have room for a full sized ether frame */
+ tmp = priv(dev)->tx_head;
+ tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
+ priv(dev)->tx_head = tmp;
+ dev_kfree_skb (skb);
+
+ if (tst == -1)
+ netif_stop_queue(dev);
+
+ out:
+ return NETDEV_TX_OK;
+}
+
+static void
+ether1_xmit_done (struct net_device *dev)
+{
+ nop_t nop;
+ int caddr, tst;
+
+ caddr = priv(dev)->tx_tail;
+
+again:
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+
+ switch (nop.nop_command & CMD_MASK) {
+ case CMD_TDR:
+ /* special case */
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
+ != (unsigned short)I82586_NULL) {
+ ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t,
+ scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ }
+ priv(dev)->tx_tail = NOP_ADDR;
+ return;
+
+ case CMD_NOP:
+ if (nop.nop_link == caddr) {
+ if (priv(dev)->initialising == 0)
+ printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name);
+ else
+ priv(dev)->initialising = 0;
+ return;
+ }
+ if (caddr == nop.nop_link)
+ return;
+ caddr = nop.nop_link;
+ goto again;
+
+ case CMD_TX:
+ if (nop.nop_status & STAT_COMPLETE)
+ break;
+ printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name);
+ priv(dev)->restart = 1;
+ return;
+
+ default:
+ printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name,
+ nop.nop_command & CMD_MASK, caddr);
+ priv(dev)->restart = 1;
+ return;
+ }
+
+ while (nop.nop_status & STAT_COMPLETE) {
+ if (nop.nop_status & STAT_OK) {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
+ } else {
+ dev->stats.tx_errors++;
+
+ if (nop.nop_status & STAT_COLLAFTERTX)
+ dev->stats.collisions++;
+ if (nop.nop_status & STAT_NOCARRIER)
+ dev->stats.tx_carrier_errors++;
+ if (nop.nop_status & STAT_TXLOSTCTS)
+ printk (KERN_WARNING "%s: cts lost\n", dev->name);
+ if (nop.nop_status & STAT_TXSLOWDMA)
+ dev->stats.tx_fifo_errors++;
+ if (nop.nop_status & STAT_COLLEXCESSIVE)
+ dev->stats.collisions += 16;
+ }
+
+ if (nop.nop_link == caddr) {
+ printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name);
+ break;
+ }
+
+ caddr = nop.nop_link;
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+ if ((nop.nop_command & CMD_MASK) != CMD_NOP) {
+ printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name);
+ break;
+ }
+
+ if (caddr == nop.nop_link)
+ break;
+
+ caddr = nop.nop_link;
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+ if ((nop.nop_command & CMD_MASK) != CMD_TX) {
+ printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name);
+ break;
+ }
+ }
+ priv(dev)->tx_tail = caddr;
+
+ caddr = priv(dev)->tx_head;
+ tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
+ priv(dev)->tx_head = caddr;
+ if (tst != -1)
+ netif_wake_queue(dev);
+}
+
+static void
+ether1_recv_done (struct net_device *dev)
+{
+ int status;
+ int nexttail, rbdaddr;
+ rbd_t rbd;
+
+ do {
+ status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS);
+ if ((status & RFD_COMPLETE) == 0)
+ break;
+
+ rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS);
+ ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE);
+
+ if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) {
+ int length = rbd.rbd_status & RBD_ACNT;
+ struct sk_buff *skb;
+
+ length = (length + 1) & ~1;
+ skb = netdev_alloc_skb(dev, length + 2);
+
+ if (skb) {
+ skb_reserve (skb, 2);
+
+ ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
+
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->stats.rx_packets++;
+ } else
+ dev->stats.rx_dropped++;
+ } else {
+ printk(KERN_WARNING "%s: %s\n", dev->name,
+ (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
+ dev->stats.rx_dropped++;
+ }
+
+ nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
+ /* nexttail should be rx_head */
+ if (nexttail != priv(dev)->rx_head)
+ printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n",
+ dev->name, nexttail, priv(dev)->rx_head);
+ ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS);
+
+ priv(dev)->rx_tail = nexttail;
+ priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS);
+ } while (1);
+}
+
+static irqreturn_t
+ether1_interrupt (int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ int status;
+
+ status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS);
+
+ if (status) {
+ ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX),
+ SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA | CTRL_ACK, REG_CONTROL);
+ if (status & SCB_STCX) {
+ ether1_xmit_done (dev);
+ }
+ if (status & SCB_STCNA) {
+ if (priv(dev)->resetting == 0)
+ printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name);
+ else
+ priv(dev)->resetting += 1;
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
+ != (unsigned short)I82586_NULL) {
+ ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ }
+ if (priv(dev)->resetting == 2)
+ priv(dev)->resetting = 0;
+ }
+ if (status & SCB_STFR) {
+ ether1_recv_done (dev);
+ }
+ if (status & SCB_STRNR) {
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) {
+ printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
+ ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
+ } else
+ printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
+ printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset,
+ NORMALIRQS));
+ }
+ } else
+ writeb(CTRL_ACK, REG_CONTROL);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ether1_close (struct net_device *dev)
+{
+ ether1_reset (dev);
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets.
+ * num_addrs == 0 Normal mode, clear multicast list.
+ * num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ * best-effort filtering.
+ */
+static void
+ether1_setmulticastlist (struct net_device *dev)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void ether1_banner(void)
+{
+ static unsigned int version_printed = 0;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static const struct net_device_ops ether1_netdev_ops = {
+ .ndo_open = ether1_open,
+ .ndo_stop = ether1_close,
+ .ndo_start_xmit = ether1_sendpacket,
+ .ndo_set_rx_mode = ether1_setmulticastlist,
+ .ndo_tx_timeout = ether1_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int
+ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct net_device *dev;
+ u8 addr[ETH_ALEN];
+ int i, ret = 0;
+
+ ether1_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = alloc_etherdev(sizeof(struct ether1_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ dev->irq = ec->irq;
+ priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!priv(dev)->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ for (i = 0; i < 6; i++)
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
+
+ if (ether1_init_2(dev)) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ dev->netdev_ops = &ether1_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ / 100;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk(KERN_INFO "%s: ether1 in slot %d, %pM\n",
+ dev->name, ec->slot_no, dev->dev_addr);
+
+ ecard_set_drvdata(ec, dev);
+ return 0;
+
+ free:
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void ether1_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id ether1_ids[] = {
+ { MANU_ACORN, PROD_ACORN_ETHER1 },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver ether1_driver = {
+ .probe = ether1_probe,
+ .remove = ether1_remove,
+ .id_table = ether1_ids,
+ .drv = {
+ .name = "ether1",
+ },
+};
+
+static int __init ether1_init(void)
+{
+ return ecard_register_driver(&ether1_driver);
+}
+
+static void __exit ether1_exit(void)
+{
+ ecard_remove_driver(&ether1_driver);
+}
+
+module_init(ether1_init);
+module_exit(ether1_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/ether1.h b/drivers/net/ethernet/i825xx/ether1.h
new file mode 100644
index 000000000..3926e042f
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/ether1.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/drivers/acorn/net/ether1.h
+ *
+ * Copyright (C) 1996 Russell King
+ *
+ * Network driver for Acorn Ether1 cards.
+ */
+
+#ifndef _LINUX_ether1_H
+#define _LINUX_ether1_H
+
+#ifdef __ETHER1_C
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define priv(dev) ((struct ether1_priv *)netdev_priv(dev))
+
+/* Page register */
+#define REG_PAGE (priv(dev)->base + 0x0000)
+
+/* Control register */
+#define REG_CONTROL (priv(dev)->base + 0x0004)
+#define CTRL_RST 0x01
+#define CTRL_LOOPBACK 0x02
+#define CTRL_CA 0x04
+#define CTRL_ACK 0x08
+
+#define ETHER1_RAM (priv(dev)->base + 0x2000)
+
+/* HW address */
+#define IDPROM_ADDRESS (priv(dev)->base + 0x0024)
+
+struct ether1_priv {
+ void __iomem *base;
+ unsigned int tx_link;
+ unsigned int tx_head;
+ volatile unsigned int tx_tail;
+ volatile unsigned int rx_head;
+ volatile unsigned int rx_tail;
+ unsigned char bus_type;
+ unsigned char resetting;
+ unsigned char initialising : 1;
+ unsigned char restart : 1;
+};
+
+#define I82586_NULL (-1)
+
+typedef struct { /* tdr */
+ unsigned short tdr_status;
+ unsigned short tdr_command;
+ unsigned short tdr_link;
+ unsigned short tdr_result;
+#define TDR_TIME (0x7ff)
+#define TDR_SHORT (1 << 12)
+#define TDR_OPEN (1 << 13)
+#define TDR_XCVRPROB (1 << 14)
+#define TDR_LNKOK (1 << 15)
+} tdr_t;
+
+typedef struct { /* transmit */
+ unsigned short tx_status;
+ unsigned short tx_command;
+ unsigned short tx_link;
+ unsigned short tx_tbdoffset;
+} tx_t;
+
+typedef struct { /* tbd */
+ unsigned short tbd_opts;
+#define TBD_CNT (0x3fff)
+#define TBD_EOL (1 << 15)
+ unsigned short tbd_link;
+ unsigned short tbd_bufl;
+ unsigned short tbd_bufh;
+} tbd_t;
+
+typedef struct { /* rfd */
+ unsigned short rfd_status;
+#define RFD_NOEOF (1 << 6)
+#define RFD_FRAMESHORT (1 << 7)
+#define RFD_DMAOVRN (1 << 8)
+#define RFD_NORESOURCES (1 << 9)
+#define RFD_ALIGNERROR (1 << 10)
+#define RFD_CRCERROR (1 << 11)
+#define RFD_OK (1 << 13)
+#define RFD_FDCONSUMED (1 << 14)
+#define RFD_COMPLETE (1 << 15)
+ unsigned short rfd_command;
+#define RFD_CMDSUSPEND (1 << 14)
+#define RFD_CMDEL (1 << 15)
+ unsigned short rfd_link;
+ unsigned short rfd_rbdoffset;
+ unsigned char rfd_dest[6];
+ unsigned char rfd_src[6];
+ unsigned short rfd_len;
+} rfd_t;
+
+typedef struct { /* rbd */
+ unsigned short rbd_status;
+#define RBD_ACNT (0x3fff)
+#define RBD_ACNTVALID (1 << 14)
+#define RBD_EOF (1 << 15)
+ unsigned short rbd_link;
+ unsigned short rbd_bufl;
+ unsigned short rbd_bufh;
+ unsigned short rbd_len;
+} rbd_t;
+
+typedef struct { /* nop */
+ unsigned short nop_status;
+ unsigned short nop_command;
+ unsigned short nop_link;
+} nop_t;
+
+typedef struct { /* set multicast */
+ unsigned short mc_status;
+ unsigned short mc_command;
+ unsigned short mc_link;
+ unsigned short mc_cnt;
+ unsigned char mc_addrs[1][6];
+} mc_t;
+
+typedef struct { /* set address */
+ unsigned short sa_status;
+ unsigned short sa_command;
+ unsigned short sa_link;
+ unsigned char sa_addr[6];
+} sa_t;
+
+typedef struct { /* config command */
+ unsigned short cfg_status;
+ unsigned short cfg_command;
+ unsigned short cfg_link;
+ unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */
+ unsigned char cfg_fifolim; /* FIFO threshold */
+ unsigned char cfg_byte8;
+#define CFG8_SRDY (1 << 6)
+#define CFG8_SAVEBADF (1 << 7)
+ unsigned char cfg_byte9;
+#define CFG9_ADDRLEN(x) (x)
+#define CFG9_ADDRLENBUF (1 << 3)
+#define CFG9_PREAMB2 (0 << 4)
+#define CFG9_PREAMB4 (1 << 4)
+#define CFG9_PREAMB8 (2 << 4)
+#define CFG9_PREAMB16 (3 << 4)
+#define CFG9_ILOOPBACK (1 << 6)
+#define CFG9_ELOOPBACK (1 << 7)
+ unsigned char cfg_byte10;
+#define CFG10_LINPRI(x) (x)
+#define CFG10_ACR(x) (x << 4)
+#define CFG10_BOFMET (1 << 7)
+ unsigned char cfg_ifs;
+ unsigned char cfg_slotl;
+ unsigned char cfg_byte13;
+#define CFG13_SLOTH(x) (x)
+#define CFG13_RETRY(x) (x << 4)
+ unsigned char cfg_byte14;
+#define CFG14_PROMISC (1 << 0)
+#define CFG14_DISBRD (1 << 1)
+#define CFG14_MANCH (1 << 2)
+#define CFG14_TNCRS (1 << 3)
+#define CFG14_NOCRC (1 << 4)
+#define CFG14_CRC16 (1 << 5)
+#define CFG14_BTSTF (1 << 6)
+#define CFG14_FLGPAD (1 << 7)
+ unsigned char cfg_byte15;
+#define CFG15_CSTF(x) (x)
+#define CFG15_ICSS (1 << 3)
+#define CFG15_CDTF(x) (x << 4)
+#define CFG15_ICDS (1 << 7)
+ unsigned short cfg_minfrmlen;
+} cfg_t;
+
+typedef struct { /* scb */
+ unsigned short scb_status; /* status of 82586 */
+#define SCB_STRXMASK (7 << 4) /* Receive unit status */
+#define SCB_STRXIDLE (0 << 4) /* Idle */
+#define SCB_STRXSUSP (1 << 4) /* Suspended */
+#define SCB_STRXNRES (2 << 4) /* No resources */
+#define SCB_STRXRDY (4 << 4) /* Ready */
+#define SCB_STCUMASK (7 << 8) /* Command unit status */
+#define SCB_STCUIDLE (0 << 8) /* Idle */
+#define SCB_STCUSUSP (1 << 8) /* Suspended */
+#define SCB_STCUACTV (2 << 8) /* Active */
+#define SCB_STRNR (1 << 12) /* Receive unit not ready */
+#define SCB_STCNA (1 << 13) /* Command unit not ready */
+#define SCB_STFR (1 << 14) /* Frame received */
+#define SCB_STCX (1 << 15) /* Command completed */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */
+#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */
+#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */
+#define SCB_CMDRXABORT (4 << 4) /* Abort reception */
+#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */
+#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */
+#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */
+#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */
+#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */
+#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */
+#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */
+#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ unsigned short scb_rfa_offset; /* Offset of first receive frame area */
+ unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/
+ unsigned short scb_aln_errors; /* Misaligned frames */
+ unsigned short scb_rsc_errors; /* Frames lost due to no space */
+ unsigned short scb_ovn_errors; /* Frames lost due to slow bus */
+} scb_t;
+
+typedef struct { /* iscp */
+ unsigned short iscp_busy; /* set by CPU before CA */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh;
+} iscp_t;
+
+ /* this address must be 0xfff6 */
+typedef struct { /* scp */
+ unsigned short scp_sysbus; /* bus size */
+#define SCP_SY_16BBUS 0x00
+#define SCP_SY_8BBUS 0x01
+ unsigned short scp_junk[2]; /* junk */
+ unsigned short scp_iscpl; /* lower 16 bits of iscp */
+ unsigned short scp_iscph; /* upper 16 bits of iscp */
+} scp_t;
+
+/* commands */
+#define CMD_NOP 0
+#define CMD_SETADDRESS 1
+#define CMD_CONFIG 2
+#define CMD_SETMULTICAST 3
+#define CMD_TX 4
+#define CMD_TDR 5
+#define CMD_DUMP 6
+#define CMD_DIAGNOSE 7
+
+#define CMD_MASK 7
+
+#define CMD_INTR (1 << 13)
+#define CMD_SUSP (1 << 14)
+#define CMD_EOL (1 << 15)
+
+#define STAT_COLLISIONS (15)
+#define STAT_COLLEXCESSIVE (1 << 5)
+#define STAT_COLLAFTERTX (1 << 6)
+#define STAT_TXDEFERRED (1 << 7)
+#define STAT_TXSLOWDMA (1 << 8)
+#define STAT_TXLOSTCTS (1 << 9)
+#define STAT_NOCARRIER (1 << 10)
+#define STAT_FAIL (1 << 11)
+#define STAT_ABORTED (1 << 12)
+#define STAT_OK (1 << 13)
+#define STAT_BUSY (1 << 14)
+#define STAT_COMPLETE (1 << 15)
+#endif
+#endif
+
+/*
+ * Ether1 card definitions:
+ *
+ * FAST accesses:
+ * +0 Page register
+ * 16 pages
+ * +4 Control
+ * '1' = reset
+ * '2' = loopback
+ * '4' = CA
+ * '8' = int ack
+ *
+ * RAM at address + 0x2000
+ * Pod. Prod id = 3
+ * Words after ID block [base + 8 words]
+ * +0 pcb issue (0x0c and 0xf3 invalid)
+ * +1 - +6 eth hw address
+ */
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
new file mode 100644
index 000000000..0af70094a
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -0,0 +1,240 @@
+/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
+ munged into HPPA boxen .
+
+ This driver is based upon 82596.c, original credits are below...
+ but there were too many hoops which HP wants jumped through to
+ keep this code in there in a sane manner.
+
+ 3 primary sources of the mess --
+ 1) hppa needs *lots* of cacheline flushing to keep this kind of
+ MMIO running.
+
+ 2) The 82596 needs to see all of its pointers as their physical
+ address. Thus virt_to_bus/bus_to_virt are *everywhere*.
+
+ 3) The implementation HP is using seems to be significantly pickier
+ about when and how the command and RX units are started. some
+ command ordering was changed.
+
+ Examination of the mach driver leads one to believe that there
+ might be a saner way to pull this off... anyone who feels like a
+ full rewrite can be my guest.
+
+ Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
+
+ 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
+ 03/02/2000 changes for better/correct(?) cache-flushing (deller)
+*/
+
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performance test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pdc.h>
+#include <asm/parisc-device.h>
+
+#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
+
+#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
+#define PA_CPU_PORT_L_ACCESS 4
+#define PA_CHANNEL_ATTENTION 8
+
+#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+
+#define SYSBUS 0x0000006c
+
+/* big endian CPU, 82596 "big" endian mode */
+#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define SWAP16(x) (x)
+
+#define NONCOHERENT_DMA 1
+
+#include "lib82596.c"
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
+
+static inline void ca(struct net_device *dev)
+{
+ gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
+}
+
+
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ u32 v = (u32) (c) | (u32) (x);
+ u16 a, b;
+
+ if (lp->options & OPT_SWAP_PORT) {
+ a = v >> 16;
+ b = v & 0xffff;
+ } else {
+ a = v & 0xffff;
+ b = v >> 16;
+ }
+
+ gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
+ if (!running_on_qemu)
+ udelay(1);
+ gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
+}
+
+#define LAN_PROM_ADDR 0xF0810000
+
+static int __init
+lan_init_chip(struct parisc_device *dev)
+{
+ struct net_device *netdevice;
+ struct i596_private *lp;
+ int retval = -ENOMEM;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ if (!dev->irq) {
+ printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
+ __FILE__, (unsigned long)dev->hpa.start);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n",
+ (unsigned long)dev->hpa.start, dev->irq);
+
+ netdevice = alloc_etherdev(sizeof(struct i596_private));
+ if (!netdevice)
+ return -ENOMEM;
+ SET_NETDEV_DEV(netdevice, &dev->dev);
+ parisc_set_drvdata (dev, netdevice);
+
+ netdevice->base_addr = dev->hpa.start;
+ netdevice->irq = dev->irq;
+
+ if (pdc_lan_station_id(addr, netdevice->base_addr)) {
+ for (i = 0; i < 6; i++) {
+ addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ }
+ printk(KERN_INFO
+ "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
+ }
+ eth_hw_addr_set(netdevice, addr);
+
+ lp = netdev_priv(netdevice);
+ lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
+ lp->dma = dma_alloc_noncoherent(&dev->dev,
+ sizeof(struct i596_dma), &lp->dma_addr,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!lp->dma)
+ goto out_free_netdev;
+
+ retval = i82596_probe(netdevice);
+ if (retval)
+ goto out_free_dma;
+ return 0;
+
+out_free_dma:
+ dma_free_noncoherent(&dev->dev, sizeof(struct i596_dma),
+ lp->dma, lp->dma_addr, DMA_BIDIRECTIONAL);
+out_free_netdev:
+ free_netdev(netdevice);
+ return retval;
+}
+
+static void __exit lan_remove_chip(struct parisc_device *pdev)
+{
+ struct net_device *dev = parisc_get_drvdata(pdev);
+ struct i596_private *lp = netdev_priv(dev);
+
+ unregister_netdev (dev);
+ dma_free_noncoherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
+ lp->dma_addr, DMA_BIDIRECTIONAL);
+ free_netdev (dev);
+}
+
+static const struct parisc_device_id lan_tbl[] __initconst = {
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, lan_tbl);
+
+static struct parisc_driver lan_driver __refdata = {
+ .name = "lasi_82596",
+ .id_table = lan_tbl,
+ .probe = lan_init_chip,
+ .remove = __exit_p(lan_remove_chip),
+};
+
+static int lasi_82596_init(void)
+{
+ printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
+ return register_parisc_driver(&lan_driver);
+}
+
+module_init(lasi_82596_init);
+
+static void __exit lasi_82596_exit(void)
+{
+ unregister_parisc_driver(&lan_driver);
+}
+
+module_exit(lasi_82596_exit);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
new file mode 100644
index 000000000..ca2fb303f
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -0,0 +1,1424 @@
+/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
+ munged into HPPA boxen .
+
+ This driver is based upon 82596.c, original credits are below...
+ but there were too many hoops which HP wants jumped through to
+ keep this code in there in a sane manner.
+
+ 3 primary sources of the mess --
+ 1) hppa needs *lots* of cacheline flushing to keep this kind of
+ MMIO running.
+
+ 2) The 82596 needs to see all of its pointers as their physical
+ address. Thus virt_to_bus/bus_to_virt are *everywhere*.
+
+ 3) The implementation HP is using seems to be significantly pickier
+ about when and how the command and RX units are started. some
+ command ordering was changed.
+
+ Examination of the mach driver leads one to believe that there
+ might be a saner way to pull this off... anyone who feels like a
+ full rewrite can be my guest.
+
+ Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
+
+ 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
+ 03/02/2000 changes for better/correct(?) cache-flushing (deller)
+*/
+
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performance test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gfp.h>
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT 0x0001
+#define DEB_PROBE 0x0002
+#define DEB_SERIOUS 0x0004
+#define DEB_ERRORS 0x0008
+#define DEB_MULTI 0x0010
+#define DEB_TDR 0x0020
+#define DEB_OPEN 0x0040
+#define DEB_RESET 0x0080
+#define DEB_ADDCMD 0x0100
+#define DEB_STATUS 0x0200
+#define DEB_STARTTX 0x0400
+#define DEB_RXADDR 0x0800
+#define DEB_TXADDR 0x1000
+#define DEB_RXFRAME 0x2000
+#define DEB_INTS 0x4000
+#define DEB_STRUCT 0x8000
+#define DEB_ANY 0xffff
+
+
+#define DEB(x, y) if (i596_debug & (x)) { y; }
+
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PKT_BUF_SZ 1536
+#define MAX_MC_CNT 64
+
+#define ISCP_BUSY 0x0001
+
+#define I596_NULL ((u32)0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+#define TX_TIMEOUT (HZ/20)
+
+
+struct i596_reg {
+ unsigned short porthi;
+ unsigned short portlo;
+ u32 ca;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ u32 next;
+ u32 data;
+ u32 cache_pad[5]; /* Total 32 bytes... */
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596. The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next. It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+ struct i596_cmd *v_next; /* Address from CPUs viewpoint */
+ unsigned short status;
+ unsigned short command;
+ u32 b_next; /* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ u32 tbd;
+ unsigned short size;
+ unsigned short pad;
+ struct sk_buff *skb; /* So we can free it after tx */
+ dma_addr_t dma_addr;
+#ifdef __LP64__
+ u32 cache_pad[6]; /* Total 64 bytes... */
+#else
+ u32 cache_pad[1]; /* Total 32 bytes... */
+#endif
+};
+
+struct tdr_cmd {
+ struct i596_cmd cmd;
+ unsigned short status;
+ unsigned short pad;
+};
+
+struct mc_cmd {
+ struct i596_cmd cmd;
+ short mc_cnt;
+ char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+ struct i596_cmd cmd;
+ char eth_addr[8];
+};
+
+struct cf_cmd {
+ struct i596_cmd cmd;
+ char i596_config[16];
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ u32 b_next; /* Address from i596 viewpoint */
+ u32 rbd;
+ unsigned short count;
+ unsigned short size;
+ struct i596_rfd *v_next; /* Address from CPUs viewpoint */
+ struct i596_rfd *v_prev;
+#ifndef __LP64__
+ u32 cache_pad[2]; /* Total 32 bytes... */
+#endif
+};
+
+struct i596_rbd {
+ /* hardware data */
+ unsigned short count;
+ unsigned short zero1;
+ u32 b_next;
+ u32 b_data; /* Address from i596 viewpoint */
+ unsigned short size;
+ unsigned short zero2;
+ /* driver data */
+ struct sk_buff *skb;
+ struct i596_rbd *v_next;
+ u32 b_addr; /* This rbd addr from i596 view */
+ unsigned char *v_data; /* Address from CPUs viewpoint */
+ /* Total 32 bytes... */
+#ifdef __LP64__
+ u32 cache_pad[4];
+#endif
+};
+
+/* These values as chosen so struct i596_dma fits in one page... */
+
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ u32 cmd;
+ u32 rfd;
+ u32 crc_err;
+ u32 align_err;
+ u32 resource_err;
+ u32 over_err;
+ u32 rcvdt_err;
+ u32 short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ u32 stat;
+ u32 scb;
+};
+
+struct i596_scp {
+ u32 sysbus;
+ u32 pad;
+ u32 iscp;
+};
+
+struct i596_dma {
+ struct i596_scp scp __attribute__((aligned(32)));
+ volatile struct i596_iscp iscp __attribute__((aligned(32)));
+ volatile struct i596_scb scb __attribute__((aligned(32)));
+ struct sa_cmd sa_cmd __attribute__((aligned(32)));
+ struct cf_cmd cf_cmd __attribute__((aligned(32)));
+ struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
+ struct mc_cmd mc_cmd __attribute__((aligned(32)));
+ struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
+ struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
+ struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
+ struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
+};
+
+struct i596_private {
+ struct i596_dma *dma;
+ u32 stat;
+ int last_restart;
+ struct i596_rfd *rfd_head;
+ struct i596_rbd *rbd_head;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ u32 last_cmd;
+ int next_tx_cmd;
+ int options;
+ spinlock_t lock; /* serialize access to chip */
+ dma_addr_t dma_addr;
+ void __iomem *mpu_port;
+ void __iomem *ca;
+};
+
+static const char init_setup[] =
+{
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+ 0x80, /* don't save bad frames */
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id);
+static int i596_close(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+static inline void ca(struct net_device *dev);
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 100;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void i596_poll_controller(struct net_device *dev);
+#endif
+
+static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
+{
+ return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
+}
+
+#ifdef NONCOHERENT_DMA
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_device(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+#else
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+}
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+}
+#endif /* NONCOHERENT_DMA */
+
+static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
+{
+ dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ while (--delcnt && dma->iscp.stat) {
+ udelay(10);
+ dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ }
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
+ dev->name, str, SWAP16(dma->iscp.stat));
+ return -1;
+ } else
+ return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
+{
+ dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
+ while (--delcnt && dma->scb.command) {
+ udelay(10);
+ dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
+ }
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str,
+ SWAP16(dma->scb.status),
+ SWAP16(dma->scb.command));
+ return -1;
+ } else
+ return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
+ &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
+ printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
+ &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
+ printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
+ " .cmd = %08x, .rfd = %08x\n",
+ &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
+ SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
+ printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
+ " over %x, rcvdt %x, short %x\n",
+ SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
+ SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
+ SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
+ cmd = lp->cmd_head;
+ while (cmd != NULL) {
+ printk(KERN_DEBUG
+ "cmd at %p, .status = %04x, .command = %04x,"
+ " .b_next = %08x\n",
+ cmd, SWAP16(cmd->status), SWAP16(cmd->command),
+ SWAP32(cmd->b_next));
+ cmd = cmd->v_next;
+ }
+ rfd = lp->rfd_head;
+ printk(KERN_DEBUG "rfd_head = %p\n", rfd);
+ do {
+ printk(KERN_DEBUG
+ " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
+ " count %04x\n",
+ rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
+ SWAP32(rfd->b_next), SWAP32(rfd->rbd),
+ SWAP16(rfd->count));
+ rfd = rfd->v_next;
+ } while (rfd != lp->rfd_head);
+ rbd = lp->rbd_head;
+ printk(KERN_DEBUG "rbd_head = %p\n", rbd);
+ do {
+ printk(KERN_DEBUG
+ " %p .count %04x, b_next %08x, b_data %08x,"
+ " size %04x\n",
+ rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
+ SWAP32(rbd->b_data), SWAP16(rbd->size));
+ rbd = rbd->v_next;
+ } while (rbd != lp->rbd_head);
+ dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
+}
+
+static inline int init_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ int i;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ /* First build the Receive Buffer Descriptor List */
+
+ for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (skb == NULL)
+ return -1;
+ dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ rbd->v_next = rbd+1;
+ rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
+ rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
+ rbd->skb = skb;
+ rbd->v_data = skb->data;
+ rbd->b_data = SWAP32(dma_addr);
+ rbd->size = SWAP16(PKT_BUF_SZ);
+ }
+ lp->rbd_head = dma->rbds;
+ rbd = dma->rbds + rx_ring_size - 1;
+ rbd->v_next = dma->rbds;
+ rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
+
+ /* Now build the Receive Frame Descriptor List */
+
+ for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
+ rfd->rbd = I596_NULL;
+ rfd->v_next = rfd+1;
+ rfd->v_prev = rfd-1;
+ rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
+ rfd->cmd = SWAP16(CMD_FLEX);
+ }
+ lp->rfd_head = dma->rfds;
+ dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+ rfd = dma->rfds;
+ rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
+ rfd->v_prev = dma->rfds + rx_ring_size - 1;
+ rfd = dma->rfds + rx_ring_size - 1;
+ rfd->v_next = dma->rfds;
+ rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
+ rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
+
+ dma_sync_dev(dev, dma, sizeof(struct i596_dma));
+ return 0;
+}
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_rbd *rbd;
+ int i;
+
+ for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
+ if (rbd->skb == NULL)
+ break;
+ dma_unmap_single(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb(rbd->skb);
+ }
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ int i;
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+
+ for (i = 0; i < rx_ring_size; i++) {
+ dma->rfds[i].rbd = I596_NULL;
+ dma->rfds[i].cmd = SWAP16(CMD_FLEX);
+ }
+ dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
+ lp->rfd_head = dma->rfds;
+ dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+ lp->rbd_head = dma->rbds;
+ dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
+
+ dma_sync_dev(dev, dma, sizeof(struct i596_dma));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ unsigned long flags;
+
+ mpu_port(dev, PORT_RESET, 0);
+ udelay(100); /* Wait 100us - seems to help */
+
+ /* change the scp address */
+
+ lp->last_cmd = jiffies;
+
+ dma->scp.sysbus = SYSBUS;
+ dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
+ dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
+ dma->iscp.stat = SWAP32(ISCP_BUSY);
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = NULL;
+ dma->scb.cmd = I596_NULL;
+
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
+
+ dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
+ dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
+
+ mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
+ ca(dev);
+ if (wait_istat(dev, dma, 1000, "initialization timed out"))
+ goto failed;
+ DEB(DEB_INIT, printk(KERN_DEBUG
+ "%s: i82596 initialization successful\n",
+ dev->name));
+
+ if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ goto failed;
+ }
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+ rebuild_rx_bufs(dev);
+
+ dma->scb.command = 0;
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
+
+ DEB(DEB_INIT, printk(KERN_DEBUG
+ "%s: queuing CmdConfigure\n", dev->name));
+ memcpy(dma->cf_cmd.i596_config, init_setup, 14);
+ dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
+ dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
+ i596_add_cmd(dev, &dma->cf_cmd.cmd);
+
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
+ memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
+ dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
+ dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
+ i596_add_cmd(dev, &dma->sa_cmd.cmd);
+
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
+ dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
+ dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
+ i596_add_cmd(dev, &dma->tdr_cmd.cmd);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
+ spin_unlock_irqrestore (&lp->lock, flags);
+ goto failed_free_irq;
+ }
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
+ dma->scb.command = SWAP16(RX_START);
+ dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
+
+ ca(dev);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+ if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
+ goto failed_free_irq;
+ DEB(DEB_INIT, printk(KERN_DEBUG
+ "%s: Receive unit started OK\n", dev->name));
+ return 0;
+
+failed_free_irq:
+ free_irq(dev->irq, dev);
+failed:
+ printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
+ mpu_port(dev, PORT_RESET, 0);
+ return -1;
+}
+
+
+static inline int i596_rx(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+ int frames = 0;
+
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG
+ "i596_rx(), rfd_head %p, rbd_head %p\n",
+ lp->rfd_head, lp->rbd_head));
+
+
+ rfd = lp->rfd_head; /* Ref next frame to check */
+
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
+ while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
+ if (rfd->rbd == I596_NULL)
+ rbd = NULL;
+ else if (rfd->rbd == lp->rbd_head->b_addr) {
+ rbd = lp->rbd_head;
+ dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
+ } else {
+ printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
+ /* XXX Now what? */
+ rbd = NULL;
+ }
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG
+ " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
+ rfd, rfd->rbd, rfd->stat));
+
+ if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
+ /* a good frame */
+ int pkt_len = SWAP16(rbd->count) & 0x3fff;
+ struct sk_buff *skb = rbd->skb;
+ int rx_in_place = 0;
+
+ DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
+ frames++;
+
+ /* Check if the packet is long enough to just accept
+ * without copying to a properly sized skbuff.
+ */
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ dma_addr_t dma_addr;
+
+ dma_unmap_single(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ /* Get fresh skbuff to replace filled one. */
+ newskb = netdev_alloc_skb_ip_align(dev,
+ PKT_BUF_SZ);
+ if (newskb == NULL) {
+ skb = NULL; /* drop pkt */
+ goto memory_squeeze;
+ }
+
+ /* Pass up the skb already on the Rx ring. */
+ skb_put(skb, pkt_len);
+ rx_in_place = 1;
+ rbd->skb = newskb;
+ dma_addr = dma_map_single(dev->dev.parent,
+ newskb->data,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ rbd->v_data = newskb->data;
+ rbd->b_data = SWAP32(dma_addr);
+ dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
+ } else {
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ }
+memory_squeeze:
+ if (skb == NULL) {
+ /* XXX tulip.c can defer packets here!! */
+ dev->stats.rx_dropped++;
+ } else {
+ if (!rx_in_place) {
+ /* 16 byte align the data fields */
+ dma_sync_single_for_cpu(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ skb_put_data(skb, rbd->v_data,
+ pkt_len);
+ dma_sync_single_for_device(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ }
+ skb->len = pkt_len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ } else {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: Error, rfd.stat = 0x%04x\n",
+ dev->name, rfd->stat));
+ dev->stats.rx_errors++;
+ if (rfd->stat & SWAP16(0x0100))
+ dev->stats.collisions++;
+ if (rfd->stat & SWAP16(0x8000))
+ dev->stats.rx_length_errors++;
+ if (rfd->stat & SWAP16(0x0001))
+ dev->stats.rx_over_errors++;
+ if (rfd->stat & SWAP16(0x0002))
+ dev->stats.rx_fifo_errors++;
+ if (rfd->stat & SWAP16(0x0004))
+ dev->stats.rx_frame_errors++;
+ if (rfd->stat & SWAP16(0x0008))
+ dev->stats.rx_crc_errors++;
+ if (rfd->stat & SWAP16(0x0010))
+ dev->stats.rx_length_errors++;
+ }
+
+ /* Clear the buffer descriptor count and EOF + F flags */
+
+ if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
+ rbd->count = 0;
+ lp->rbd_head = rbd->v_next;
+ dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
+ }
+
+ /* Tidy the frame descriptor, marking it as end of list */
+
+ rfd->rbd = I596_NULL;
+ rfd->stat = 0;
+ rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
+ rfd->count = 0;
+
+ /* Update record of next frame descriptor to process */
+
+ lp->dma->scb.rfd = rfd->b_next;
+ lp->rfd_head = rfd->v_next;
+ dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
+
+ /* Remove end-of-list from old end descriptor */
+
+ rfd->v_prev->cmd = SWAP16(CMD_FLEX);
+ dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
+ rfd = lp->rfd_head;
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
+ }
+
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
+
+ return 0;
+}
+
+
+static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+
+ while (lp->cmd_head != NULL) {
+ ptr = lp->cmd_head;
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch (SWAP16(ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+ dma_unmap_single(dev->dev.parent,
+ tx_cmd->dma_addr,
+ skb->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ tx_cmd->cmd.command = 0; /* Mark as free */
+ break;
+ }
+ default:
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ }
+ dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
+ }
+
+ wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
+ lp->dma->scb.cmd = I596_NULL;
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+}
+
+
+static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
+{
+ unsigned long flags;
+
+ DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
+
+ netif_stop_queue(dev);
+
+ /* FIXME: this command might cause an lpmc */
+ lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ ca(dev);
+
+ /* wait for shutdown */
+ wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ i596_cleanup_cmd(dev, lp);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ init_i596_mem(dev);
+}
+
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ unsigned long flags;
+
+ DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
+ lp->cmd_head));
+
+ cmd->status = 0;
+ cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
+ cmd->v_next = NULL;
+ cmd->b_next = I596_NULL;
+ dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (lp->cmd_head != NULL) {
+ lp->cmd_tail->v_next = cmd;
+ lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
+ dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
+ } else {
+ lp->cmd_head = cmd;
+ wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
+ dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
+ dma->scb.command = SWAP16(CUC_START);
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
+ ca(dev);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (lp->cmd_backlog > max_cmd_backlog) {
+ unsigned long tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < ticks_limit)
+ return;
+
+ printk(KERN_ERR
+ "%s: command unit timed out, status resetting.\n",
+ dev->name);
+#if 1
+ i596_reset(dev, lp);
+#endif
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ DEB(DEB_OPEN, printk(KERN_DEBUG
+ "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+ if (init_rx_bufs(dev)) {
+ printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
+ return -EAGAIN;
+ }
+ if (init_i596_mem(dev)) {
+ printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
+ goto out_remove_rx_bufs;
+ }
+ netif_start_queue(dev);
+
+ return 0;
+
+out_remove_rx_bufs:
+ remove_rx_bufs(dev);
+ return -EAGAIN;
+}
+
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ /* Transmitter timeout, serious problems. */
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: transmit timed out, status resetting.\n",
+ dev->name));
+
+ dev->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == dev->stats.tx_packets) {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
+ /* Shutdown and restart */
+ i596_reset (dev, lp);
+ } else {
+ /* Issue a channel attention signal */
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
+ lp->dma->scb.command = SWAP16(CUC_START | RX_START);
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ ca (dev);
+ lp->last_restart = dev->stats.tx_packets;
+ }
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tbd;
+ short length = skb->len;
+
+ DEB(DEB_STARTTX, printk(KERN_DEBUG
+ "%s: i596_start_xmit(%x,%p) called\n",
+ dev->name, skb->len, skb->data));
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+
+ tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
+ tbd = lp->dma->tbds + lp->next_tx_cmd;
+
+ if (tx_cmd->cmd.command) {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: xmit ring full, dropping packet.\n",
+ dev->name));
+ dev->stats.tx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ } else {
+ if (++lp->next_tx_cmd == TX_RING_SIZE)
+ lp->next_tx_cmd = 0;
+ tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
+ tbd->next = I596_NULL;
+
+ tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
+ tx_cmd->skb = skb;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tbd->pad = 0;
+ tbd->size = SWAP16(EOF | length);
+
+ tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ tbd->data = SWAP32(tx_cmd->dma_addr);
+
+ DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
+ dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
+ dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
+ i596_add_cmd(dev, &tx_cmd->cmd);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
+ }
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+ printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
+ add, add + 6, add, add[12], add[13], str);
+}
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i596_poll_controller,
+#endif
+};
+
+static int i82596_probe(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ int ret;
+
+ /* This lot is ensure things have been cache line aligned. */
+ BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
+ BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
+ BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
+ BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
+#ifndef __LP64__
+ BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
+#endif
+
+ if (!dev->base_addr || !dev->irq)
+ return -ENODEV;
+
+ dev->netdev_ops = &i596_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ memset(lp->dma, 0, sizeof(struct i596_dma));
+ lp->dma->scb.command = 0;
+ lp->dma->scb.cmd = I596_NULL;
+ lp->dma->scb.rfd = I596_NULL;
+ spin_lock_init(&lp->lock);
+
+ dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
+
+ ret = register_netdev(dev);
+ if (ret)
+ return ret;
+
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq));
+ DEB(DEB_INIT, printk(KERN_INFO
+ "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
+ dev->name, lp->dma, (int)sizeof(struct i596_dma),
+ &lp->dma->scb));
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void i596_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ i596_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct i596_private *lp;
+ struct i596_dma *dma;
+ unsigned short status, ack_cmd = 0;
+
+ lp = netdev_priv(dev);
+ dma = lp->dma;
+
+ spin_lock (&lp->lock);
+
+ wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
+ status = SWAP16(dma->scb.status);
+
+ DEB(DEB_INTS, printk(KERN_DEBUG
+ "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ dev->name, dev->irq, status));
+
+ ack_cmd = status & 0xf000;
+
+ if (!ack_cmd) {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: interrupt with no events\n",
+ dev->name));
+ spin_unlock (&lp->lock);
+ return IRQ_NONE;
+ }
+
+ if ((status & 0x8000) || (status & 0x2000)) {
+ struct i596_cmd *ptr;
+
+ if ((status & 0x8000))
+ DEB(DEB_INTS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt completed command.\n",
+ dev->name));
+ if ((status & 0x2000))
+ DEB(DEB_INTS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt command unit inactive %x.\n",
+ dev->name, status & 0x0700));
+
+ while (lp->cmd_head != NULL) {
+ dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
+ if (!(lp->cmd_head->status & SWAP16(STAT_C)))
+ break;
+
+ ptr = lp->cmd_head;
+
+ DEB(DEB_STATUS,
+ printk(KERN_DEBUG
+ "cmd_head->status = %04x, ->command = %04x\n",
+ SWAP16(lp->cmd_head->status),
+ SWAP16(lp->cmd_head->command)));
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch (SWAP16(ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ if (ptr->status & SWAP16(STAT_OK)) {
+ DEB(DEB_TXADDR,
+ print_eth(skb->data, "tx-done"));
+ } else {
+ dev->stats.tx_errors++;
+ if (ptr->status & SWAP16(0x0020))
+ dev->stats.collisions++;
+ if (!(ptr->status & SWAP16(0x0040)))
+ dev->stats.tx_heartbeat_errors++;
+ if (ptr->status & SWAP16(0x0400))
+ dev->stats.tx_carrier_errors++;
+ if (ptr->status & SWAP16(0x0800))
+ dev->stats.collisions++;
+ if (ptr->status & SWAP16(0x1000))
+ dev->stats.tx_aborted_errors++;
+ }
+ dma_unmap_single(dev->dev.parent,
+ tx_cmd->dma_addr,
+ skb->len, DMA_TO_DEVICE);
+ dev_consume_skb_irq(skb);
+
+ tx_cmd->cmd.command = 0; /* Mark free */
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
+
+ if (status & 0x8000) {
+ DEB(DEB_ANY,
+ printk(KERN_DEBUG "%s: link ok.\n",
+ dev->name));
+ } else {
+ if (status & 0x4000)
+ printk(KERN_ERR
+ "%s: Transceiver problem.\n",
+ dev->name);
+ if (status & 0x2000)
+ printk(KERN_ERR
+ "%s: Termination problem.\n",
+ dev->name);
+ if (status & 0x1000)
+ printk(KERN_ERR
+ "%s: Short circuit.\n",
+ dev->name);
+
+ DEB(DEB_TDR,
+ printk(KERN_DEBUG "%s: Time %d.\n",
+ dev->name, status & 0x07ff));
+ }
+ break;
+ }
+ case CmdConfigure:
+ /*
+ * Zap command so set_multicast_list() know
+ * it is free
+ */
+ ptr->command = 0;
+ break;
+ }
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
+ lp->last_cmd = jiffies;
+ }
+
+ /* This mess is arranging that only the last of any outstanding
+ * commands has the interrupt bit set. Should probably really
+ * only add to the cmd queue when the CU is stopped.
+ */
+ ptr = lp->cmd_head;
+ while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
+ struct i596_cmd *prev = ptr;
+
+ ptr->command &= SWAP16(0x1fff);
+ ptr = ptr->v_next;
+ dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
+ }
+
+ if (lp->cmd_head != NULL)
+ ack_cmd |= CUC_START;
+ dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
+ dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
+ }
+ if ((status & 0x1000) || (status & 0x4000)) {
+ if ((status & 0x4000))
+ DEB(DEB_INTS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt received a frame.\n",
+ dev->name));
+ i596_rx(dev);
+ /* Only RX_START if stopped - RGH 07-07-96 */
+ if (status & 0x1000) {
+ if (netif_running(dev)) {
+ DEB(DEB_ERRORS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt receive unit inactive, status 0x%x\n",
+ dev->name, status));
+ ack_cmd |= RX_START;
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ rebuild_rx_bufs(dev);
+ }
+ }
+ }
+ wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
+ dma->scb.command = SWAP16(ack_cmd);
+ dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
+
+ /* DANGER: I suspect that some kind of interrupt
+ acknowledgement aside from acking the 82596 might be needed
+ here... but it's running acceptably without */
+
+ ca(dev);
+
+ wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
+ DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
+
+ spin_unlock (&lp->lock);
+ return IRQ_HANDLED;
+}
+
+static int i596_close(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ DEB(DEB_INIT,
+ printk(KERN_DEBUG
+ "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, SWAP16(lp->dma->scb.status)));
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ wait_cmd(dev, lp->dma, 100, "close1 timed out");
+ lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
+ dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
+
+ ca(dev);
+
+ wait_cmd(dev, lp->dma, 100, "close2 timed out");
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DEB(DEB_STRUCT, i596_display_data(dev));
+ i596_cleanup_cmd(dev, lp);
+
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ int config = 0, cnt;
+
+ DEB(DEB_MULTI,
+ printk(KERN_DEBUG
+ "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+ dev->name, netdev_mc_count(dev),
+ dev->flags & IFF_PROMISC ? "ON" : "OFF",
+ dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+ if ((dev->flags & IFF_PROMISC) &&
+ !(dma->cf_cmd.i596_config[8] & 0x01)) {
+ dma->cf_cmd.i596_config[8] |= 0x01;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC) &&
+ (dma->cf_cmd.i596_config[8] & 0x01)) {
+ dma->cf_cmd.i596_config[8] &= ~0x01;
+ config = 1;
+ }
+ if ((dev->flags & IFF_ALLMULTI) &&
+ (dma->cf_cmd.i596_config[11] & 0x20)) {
+ dma->cf_cmd.i596_config[11] &= ~0x20;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_ALLMULTI) &&
+ !(dma->cf_cmd.i596_config[11] & 0x20)) {
+ dma->cf_cmd.i596_config[11] |= 0x20;
+ config = 1;
+ }
+ if (config) {
+ if (dma->cf_cmd.cmd.command)
+ printk(KERN_INFO
+ "%s: config change request already queued\n",
+ dev->name);
+ else {
+ dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
+ dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
+ i596_add_cmd(dev, &dma->cf_cmd.cmd);
+ }
+ }
+
+ cnt = netdev_mc_count(dev);
+ if (cnt > MAX_MC_CNT) {
+ cnt = MAX_MC_CNT;
+ printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
+ dev->name, cnt);
+ }
+
+ if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ unsigned char *cp;
+ struct mc_cmd *cmd;
+
+ cmd = &dma->mc_cmd;
+ cmd->cmd.command = SWAP16(CmdMulticastList);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
+ cp = cmd->mc_addrs;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt--)
+ break;
+ memcpy(cp, ha->addr, ETH_ALEN);
+ if (i596_debug > 1)
+ DEB(DEB_MULTI,
+ printk(KERN_DEBUG
+ "%s: Adding address %pM\n",
+ dev->name, cp));
+ cp += ETH_ALEN;
+ }
+ dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
+ i596_add_cmd(dev, &cmd->cmd);
+ }
+}
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
new file mode 100644
index 000000000..54bb4d9a0
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sni_82596.c -- driver for intel 82596 ethernet controller, as
+ * used in older SNI RM machines
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01"
+
+static const char sni_82596_string[] = "snirm_82596";
+
+#define SYSBUS 0x00004400
+
+/* big endian CPU, 82596 little endian */
+#define SWAP32(x) cpu_to_le32((u32)(x))
+#define SWAP16(x) cpu_to_le16((u16)(x))
+
+#define OPT_MPU_16BIT 0x01
+
+#include "lib82596.c"
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_82596");
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "82596 debug mask");
+
+static inline void ca(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ writel(0, lp->ca);
+}
+
+
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ u32 v = (u32) (c) | (u32) (x);
+
+ if (lp->options & OPT_MPU_16BIT) {
+ writew(v & 0xffff, lp->mpu_port);
+ wmb(); /* order writes to MPU port */
+ udelay(1);
+ writew(v >> 16, lp->mpu_port);
+ } else {
+ writel(v, lp->mpu_port);
+ wmb(); /* order writes to MPU port */
+ udelay(1);
+ writel(v, lp->mpu_port);
+ }
+}
+
+
+static int sni_82596_probe(struct platform_device *dev)
+{
+ struct net_device *netdevice;
+ struct i596_private *lp;
+ struct resource *res, *ca, *idprom, *options;
+ int retval = -ENOMEM;
+ void __iomem *mpu_addr;
+ void __iomem *ca_addr;
+ u8 __iomem *eth_addr;
+ u8 mac[ETH_ALEN];
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ options = platform_get_resource(dev, 0, 0);
+ idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
+ if (!res || !ca || !options || !idprom)
+ return -ENODEV;
+ mpu_addr = ioremap(res->start, 4);
+ if (!mpu_addr)
+ return -ENOMEM;
+ ca_addr = ioremap(ca->start, 4);
+ if (!ca_addr)
+ goto probe_failed_free_mpu;
+
+ printk(KERN_INFO "Found i82596 at 0x%x\n", res->start);
+
+ netdevice = alloc_etherdev(sizeof(struct i596_private));
+ if (!netdevice)
+ goto probe_failed_free_ca;
+
+ SET_NETDEV_DEV(netdevice, &dev->dev);
+ platform_set_drvdata (dev, netdevice);
+
+ netdevice->base_addr = res->start;
+ netdevice->irq = platform_get_irq(dev, 0);
+
+ eth_addr = ioremap(idprom->start, 0x10);
+ if (!eth_addr)
+ goto probe_failed;
+
+ /* someone seems to like messed up stuff */
+ mac[0] = readb(eth_addr + 0x0b);
+ mac[1] = readb(eth_addr + 0x0a);
+ mac[2] = readb(eth_addr + 0x09);
+ mac[3] = readb(eth_addr + 0x08);
+ mac[4] = readb(eth_addr + 0x07);
+ mac[5] = readb(eth_addr + 0x06);
+ eth_hw_addr_set(netdevice, mac);
+ iounmap(eth_addr);
+
+ if (netdevice->irq < 0) {
+ printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
+ __FILE__, netdevice->base_addr);
+ retval = netdevice->irq;
+ goto probe_failed;
+ }
+
+ lp = netdev_priv(netdevice);
+ lp->options = options->flags & IORESOURCE_BITS;
+ lp->ca = ca_addr;
+ lp->mpu_port = mpu_addr;
+
+ lp->dma = dma_alloc_coherent(&dev->dev, sizeof(struct i596_dma),
+ &lp->dma_addr, GFP_KERNEL);
+ if (!lp->dma)
+ goto probe_failed;
+
+ retval = i82596_probe(netdevice);
+ if (retval)
+ goto probe_failed_free_dma;
+ return 0;
+
+probe_failed_free_dma:
+ dma_free_coherent(&dev->dev, sizeof(struct i596_dma), lp->dma,
+ lp->dma_addr);
+probe_failed:
+ free_netdev(netdevice);
+probe_failed_free_ca:
+ iounmap(ca_addr);
+probe_failed_free_mpu:
+ iounmap(mpu_addr);
+ return retval;
+}
+
+static int sni_82596_driver_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct i596_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_coherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
+ lp->dma_addr);
+ iounmap(lp->ca);
+ iounmap(lp->mpu_port);
+ free_netdev (dev);
+ return 0;
+}
+
+static struct platform_driver sni_82596_driver = {
+ .probe = sni_82596_probe,
+ .remove = sni_82596_driver_remove,
+ .driver = {
+ .name = sni_82596_string,
+ },
+};
+
+static int sni_82596_init(void)
+{
+ printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n");
+ return platform_driver_register(&sni_82596_driver);
+}
+
+
+static void __exit sni_82596_exit(void)
+{
+ platform_driver_unregister(&sni_82596_driver);
+}
+
+module_init(sni_82596_init);
+module_exit(sni_82596_exit);
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
new file mode 100644
index 000000000..3909c6a0a
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -0,0 +1,1185 @@
+/*
+ * Sun3 i82586 Ethernet driver
+ *
+ * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net)
+ *
+ * Original copyright follows:
+ * --------------------------
+ *
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
+ * --------------------------
+ *
+ * Consult ni52.c for further notes from the original driver.
+ *
+ * This incarnation currently supports the OBIO version of the i82586 chip
+ * used in certain sun3 models. It should be fairly doable to expand this
+ * to support VME if I should every acquire such a board.
+ *
+ */
+
+static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume = 0; /* experimental .. better should be zero */
+static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo=0x8; /* don't change */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/sun3mmu.h>
+#include <asm/dvma.h>
+#include <asm/byteorder.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "sun3_82586.h"
+
+#define DRV_NAME "sun3_82586"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 0 /* 16 Bit */
+#define SUN3_82586_TOTAL_SIZE PAGE_SIZE
+
+#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;}
+#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;}
+#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;}
+#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;}
+#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);}
+
+#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) )
+#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base))
+#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1536 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1536 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */
+#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+/* different DELAYs */
+#define DELAY(x) mdelay(32 * x);
+#define DELAY_16(); { udelay(16); }
+#define DELAY_18(); { udelay(4); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() \
+{ int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_cuc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
+ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
+
+#define WAIT_4_SCB_CMD_RUC() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_ruc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
+ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
+
+#define WAIT_4_STAT_COMPL(addr) { int i; \
+ for(i=0;i<32767;i++) { \
+ if(swab16((addr)->cmd_status) & STAT_COMPL) break; \
+ DELAY_16(); DELAY_16(); } }
+
+static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
+static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
+static int sun3_82586_open(struct net_device *dev);
+static int sun3_82586_close(struct net_device *dev);
+static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
+ struct net_device *);
+static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue);
+#if 0
+static void sun3_82586_dump(struct net_device *,void *);
+#endif
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev,char *where,unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void *alloc_rfa(struct net_device *dev,void *ptr);
+static void sun3_82586_rcv_int(struct net_device *dev);
+static void sun3_82586_xmt_int(struct net_device *dev);
+static void sun3_82586_rnr_int(struct net_device *dev);
+
+struct priv
+{
+ unsigned long base;
+ char *memtop;
+ long int lock;
+ int reseted;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+/**********************************************
+ * close device
+ */
+static int sun3_82586_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+
+ sun3_reset586(); /* the hard way to stop the receiver */
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int sun3_82586_open(struct net_device *dev)
+{
+ int ret;
+
+ sun3_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ sun3_enaint();
+
+ ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev);
+ if (ret)
+ {
+ sun3_reset586();
+ return ret;
+ }
+
+ netif_start_queue(dev);
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct net_device *dev,char *where,unsigned size)
+{
+ struct priv pb;
+ struct priv *p = &pb;
+ char *iscp_addr;
+ int i;
+
+ p->base = (unsigned long) dvma_btov(0);
+ p->memtop = (char *)dvma_btov((unsigned long)where);
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
+ if(((char *)p->scp)[i])
+ return 0;
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if(p->scp->sysbus != SYSBUSVAL)
+ return 0;
+
+ iscp_addr = (char *)dvma_btov((unsigned long)where);
+
+ p->iscp = (struct iscp_struct *) iscp_addr;
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ sun3_reset586();
+ sun3_attn586();
+ DELAY(1); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by sun3_82586_probe1 and open586.
+ */
+static void alloc586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ sun3_reset586();
+ DELAY(1);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start);
+ p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+ p->iscp->scb_base = make24(dvma_btov(dev->mem_start));
+
+ p->iscp->busy = 1;
+ sun3_reset586();
+ sun3_attn586();
+
+ DELAY(1);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ p->reseted = 0;
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+static int __init sun3_82586_probe(void)
+{
+ struct net_device *dev;
+ unsigned long ioaddr;
+ static int found = 0;
+ int err = -ENOMEM;
+
+ /* check that this machine has an onboard 82586 */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_160:
+ case SM_SUN3|SM_3_260:
+ /* these machines have 82586 */
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ if (found)
+ return -ENODEV;
+
+ ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
+ found = 1;
+
+ dev = alloc_etherdev(sizeof(struct priv));
+ if (!dev)
+ goto out;
+ dev->irq = IE_IRQ;
+ dev->base_addr = ioaddr;
+ err = sun3_82586_probe1(dev, ioaddr);
+ if (err)
+ goto out1;
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return 0;
+
+out2:
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+out1:
+ free_netdev(dev);
+out:
+ iounmap((void __iomem *)ioaddr);
+ return err;
+}
+module_init(sun3_82586_probe);
+
+static const struct net_device_ops sun3_82586_netdev_ops = {
+ .ndo_open = sun3_82586_open,
+ .ndo_stop = sun3_82586_close,
+ .ndo_start_xmit = sun3_82586_send_packet,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = sun3_82586_timeout,
+ .ndo_get_stats = sun3_82586_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
+{
+ int size, retval;
+
+ if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ /* copy in the ethernet address from the prom */
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
+
+ printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
+
+ /*
+ * check (or search) IO-Memory, 32K
+ */
+ size = 0x8000;
+
+ dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000);
+ dev->mem_end = dev->mem_start + size;
+
+ if(size != 0x2000 && size != 0x4000 && size != 0x8000) {
+ printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size);
+ retval = -ENODEV;
+ goto out;
+ }
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
+ retval = -ENODEV;
+ goto out;
+ }
+
+ ((struct priv *)netdev_priv(dev))->memtop =
+ (char *)dvma_btov(dev->mem_start);
+ ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0);
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *)netdev_priv(dev))->num_recv_buffs =
+ NUM_RECV_BUFFS_8;
+ else if(size == 0x4000)
+ ((struct priv *)netdev_priv(dev))->num_recv_buffs =
+ NUM_RECV_BUFFS_16;
+ else
+ ((struct priv *)netdev_priv(dev))->num_recv_buffs =
+ NUM_RECV_BUFFS_32;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
+
+ dev->netdev_ops = &sun3_82586_netdev_ops;
+ dev->watchdog_timeo = HZ/20;
+
+ dev->if_port = 0;
+ return 0;
+out:
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+ return retval;
+}
+
+
+static int init586(struct net_device *dev)
+{
+ void *ptr;
+ int i,result=0;
+ struct priv *p = netdev_priv(dev);
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct netdev_hw_addr *ha;
+ int num_addrs=netdev_mc_count(dev);
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST);
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags & IFF_ALLMULTI) {
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(num_addrs > len) {
+ printk("%s: switching to promisc. mode\n",dev->name);
+ cfg_cmd->promisc = 1;
+ }
+ }
+ if(dev->flags&IFF_PROMISC)
+ cfg_cmd->promisc = 1;
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+ p->scb->cmd_ruc = 0;
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(cfg_cmd);
+
+ if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(ias_cmd);
+
+ if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST);
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(tdr_cmd);
+
+ if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL))
+ {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ }
+ else
+ {
+ DELAY_16(); /* wait for result */
+ result = swab16(tdr_cmd->status);
+
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ sun3_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK)
+ ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if(num_addrs && !(dev->flags & IFF_PROMISC) )
+ {
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST);
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = swab16(num_addrs * 6);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, ETH_ALEN);
+
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(mc_cmd);
+
+ if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if(ptr > (void *)dev->mem_end)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
+ p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL);
+ p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT);
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
+ p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT);
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ sun3_attn586();
+ DELAY_16();
+
+ sun3_enaint();
+ sun3_active();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for sun3_82586_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct net_device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = netdev_priv(dev);
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
+ p->rfd_first = rfd;
+
+ for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
+ rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
+ rfd[i].rbd_offset = 0xffff;
+ }
+ rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = swab16(RECV_BUFF_SIZE);
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ unsigned short stat;
+ int cnt=0;
+ struct priv *p;
+
+ if (!dev) {
+ printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq);
+ return IRQ_NONE;
+ }
+ p = netdev_priv(dev);
+
+ if(debuglevel > 1)
+ printk("I");
+
+ WAIT_4_SCB_CMD(); /* wait for last command */
+
+ while((stat=p->scb->cus & STAT_MASK))
+ {
+ p->scb->cmd_cuc = stat;
+ sun3_attn586();
+
+ if(stat & STAT_FR) /* received a frame */
+ sun3_82586_rcv_int(dev);
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ printk("(R)");
+ if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
+ sun3_82586_rnr_int(dev);
+ }
+ }
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ sun3_82586_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(netif_running(dev))
+ printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
+ }
+#endif
+
+ if(debuglevel > 1)
+ printk("%d",cnt++);
+
+ WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd_cuc) /* timed out? */
+ {
+ printk("%s: Acknowledge timed out.\n",dev->name);
+ sun3_disint();
+ break;
+ }
+ }
+
+ if(debuglevel > 1)
+ printk("i");
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void sun3_82586_rcv_int(struct net_device *dev)
+{
+ int status,cnt=0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = netdev_priv(dev);
+
+ if(debuglevel > 0)
+ printk("R");
+
+ for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & RFD_OK) /* frame received without error? */
+ {
+ if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = netdev_alloc_skb(dev, totlen + 2);
+ if(skb != NULL)
+ {
+ skb_reserve(skb,2);
+ skb_put(skb,totlen);
+ skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+ else
+ dev->stats.rx_dropped++;
+ }
+ else
+ {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while(!((rstat=swab16(rbd->status)) & RBD_LAST))
+ {
+ totlen += rstat & RBD_MASK;
+ if(!rstat)
+ {
+ printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
+ break;
+ }
+ rbd->status = 0;
+ rbd = (struct rbd_struct *) make32(rbd->next);
+ }
+ totlen += rstat & RBD_MASK;
+ rbd->status = 0;
+ printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
+ dev->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ dev->stats.rx_errors++;
+ }
+ p->rfd_top->stat_high = 0;
+ p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
+ p->rfd_top->rbd_offset = 0xffff;
+ p->rfd_last->last = 0; /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ p->scb->rfa_offset = make16(p->rfd_top);
+
+ if(debuglevel > 0)
+ printk("%d",cnt++);
+ }
+
+ if(automatic_resume)
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for(i=0;i<1024;i++)
+ {
+ if(p->rfd_top->status)
+ break;
+ DELAY_16();
+ if(i == 1023)
+ printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
+ }
+ }
+#endif
+
+#if 0
+ if(!at_least_one)
+ {
+ int i;
+ volatile struct rfd_struct *rfds=p->rfd_top;
+ volatile struct rbd_struct *rbds;
+ printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
+ for(i=0;i< (p->num_recv_buffs+4);i++)
+ {
+ rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
+ printk("%04x:%04x ",rfds->status,rbds->status);
+ rfds = (struct rfd_struct *) make32(rfds->next);
+ }
+ printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
+ printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
+ }
+ old_at_least = at_least_one;
+#endif
+
+ if(debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void sun3_82586_rnr_int(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ dev->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+/* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void sun3_82586_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = netdev_priv(dev);
+
+ if(debuglevel > 0)
+ printk("X");
+
+ status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status);
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ dev->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ dev->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ dev->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ dev->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ dev->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS > 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd_ruc = RUC_START;
+ sun3_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
+}
+
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct priv *p = netdev_priv(dev);
+#ifndef NO_NOPCOMMANDS
+ if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ netif_wake_queue(dev);
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point);
+#endif
+ p->scb->cmd_cuc = CUC_ABORT;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ netif_trans_update(dev); /* prevent tx timeout */
+ return 0;
+ }
+#endif
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+ printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+#endif
+ sun3_82586_close(dev);
+ sun3_82586_open(dev);
+ }
+ netif_trans_update(dev); /* prevent tx timeout */
+}
+
+/******************************************************
+ * send frame
+ */
+
+static netdev_tx_t
+sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = netdev_priv(dev);
+
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+#if(NUM_XMIT_BUFFS > 1)
+ if(test_and_set_bit(0,(void *) &p->lock)) {
+ printk("%s: Queue was locked\n",dev->name);
+ return NETDEV_TX_BUSY;
+ }
+ else
+#endif
+ {
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ memset((void *)p->xmit_cbuffs[p->xmit_count], 0,
+ ETH_ZLEN);
+ len = ETH_ZLEN;
+ }
+ skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len);
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if(p->scb->cus & CU_ACTIVE)
+ {
+ printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
+ printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status));
+ }
+#endif
+
+ p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
+ for(i=0;i<16;i++)
+ {
+ p->xmit_cmds[0]->cmd_status = 0;
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
+ p->scb->cmd_cuc = CUC_RESUME;
+ else
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ }
+
+ sun3_attn586();
+ if(!i)
+ dev_kfree_skb(skb);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len);
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ /* linkpointer of xmit-command already points to next nop cmd */
+ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ p->xmit_count = next_nop;
+
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+ if(p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ p->lock = 0;
+ local_irq_restore(flags);
+ }
+ dev_kfree_skb(skb);
+#endif
+ }
+ return NETDEV_TX_OK;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */
+ p->scb->crc_errs = 0;
+ aln = swab16(p->scb->aln_errs);
+ p->scb->aln_errs = 0;
+ rsc = swab16(p->scb->rsc_errs);
+ p->scb->rsc_errs = 0;
+ ovrn = swab16(p->scb->ovrn_errs);
+ p->scb->ovrn_errs = 0;
+
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_fifo_errors += ovrn;
+ dev->stats.rx_frame_errors += aln;
+ dev->stats.rx_dropped += rsc;
+
+ return &dev->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ sun3_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ sun3_enaint();
+ netif_wake_queue(dev);
+}
+
+#if 0
+/*
+ * DUMP .. we expect a not running CMD unit and enough space
+ */
+void sun3_82586_dump(struct net_device *dev,void *ptr)
+{
+ struct priv *p = netdev_priv(dev);
+ struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
+ int i;
+
+ p->scb->cmd_cuc = CUC_ABORT;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+
+ dump_cmd->cmd_status = 0;
+ dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
+ dump_cmd->dump_offset = make16((dump_cmd + 1));
+ dump_cmd->cmd_link = 0xffff;
+
+ p->scb->cbl_offset = make16(dump_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_STAT_COMPL(dump_cmd);
+
+ if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't get dump information.\n",dev->name);
+
+ for(i=0;i<170;i++) {
+ printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
+ if(i % 24 == 23)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
new file mode 100644
index 000000000..d82eca563
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -0,0 +1,318 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+/*
+ * Cloned from ni52.h, copyright as above.
+ *
+ * Modified for Sun3 OBIO i82586 by Sam Creasey (sammy@sammy.net)
+ */
+
+
+/* defines for the obio chip (not vme) */
+#define IEOB_NORSET 0x80 /* don't reset the board */
+#define IEOB_ONAIR 0x40 /* put us on the air */
+#define IEOB_ATTEN 0x20 /* attention! */
+#define IEOB_IENAB 0x10 /* interrupt enable */
+#define IEOB_XXXXX 0x08 /* free bit */
+#define IEOB_XCVRL2 0x04 /* level 2 transceiver? */
+#define IEOB_BUSERR 0x02 /* bus error */
+#define IEOB_INT 0x01 /* interrupt */
+
+/* where the obio one lives */
+#define IE_OBIO 0xc0000
+#define IE_IRQ 3
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* has to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned char rus;
+ unsigned char cus;
+ unsigned char cmd_ruc; /* command word: RU part */
+ unsigned char cmd_cuc; /* command word: CU part & ACK */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned char stat_low; /* status word */
+ unsigned char stat_high; /* status word */
+ unsigned char rfd_sf; /* 82596 mode only */
+ unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */
+ unsigned char source[ETH_ALEN]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame too short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+